You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2014/10/18 01:31:11 UTC
[24/34] git commit: HDFS-7159. Use block storage policy to set lazy
persist preference. (Arpit Agarwal)
HDFS-7159. Use block storage policy to set lazy persist preference. (Arpit Agarwal)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d61daff9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d61daff9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d61daff9
Branch: refs/heads/branch-2.6
Commit: d61daff9bf9264ed204829352bb570a7dc7e8fe0
Parents: ba31a95
Author: arp <ar...@apache.org>
Authored: Mon Sep 29 22:27:59 2014 -0700
Committer: Jitendra Pandey <Ji...@Jitendra-Pandeys-MacBook-Pro-4.local>
Committed: Fri Oct 17 16:00:52 2014 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/fs/shell/Stat.java | 9 +--
.../src/test/resources/testConf.xml | 2 +-
.../org/apache/hadoop/hdfs/DFSOutputStream.java | 15 ++++-
.../org/apache/hadoop/hdfs/StorageType.java | 8 ++-
.../hdfs/protocol/BlockStoragePolicy.java | 37 ++++++++++--
.../hadoop/hdfs/protocol/HdfsConstants.java | 2 +
.../server/blockmanagement/BlockCollection.java | 6 --
.../server/blockmanagement/BlockManager.java | 4 ++
.../BlockStoragePolicySuite.java | 7 +++
.../datanode/fsdataset/impl/FsDatasetImpl.java | 2 +-
.../fsdataset/impl/FsTransientVolumeImpl.java | 60 --------------------
.../datanode/fsdataset/impl/FsVolumeImpl.java | 6 +-
.../fsdataset/impl/FsVolumeImplAllocator.java | 44 --------------
.../hdfs/server/namenode/FSDirectory.java | 28 ++++++---
.../hadoop/hdfs/server/namenode/FSEditLog.java | 2 -
.../hdfs/server/namenode/FSEditLogLoader.java | 2 +-
.../hdfs/server/namenode/FSEditLogOp.java | 23 +-------
.../hdfs/server/namenode/FSImageFormat.java | 6 +-
.../server/namenode/FSImageFormatPBINode.java | 5 +-
.../hdfs/server/namenode/FSNamesystem.java | 45 ++++++++++++++-
.../hadoop/hdfs/server/namenode/INodeFile.java | 10 ++--
.../server/namenode/INodeFileAttributes.java | 6 +-
.../server/namenode/NameNodeLayoutVersion.java | 2 -
.../snapshot/FSImageFormatPBSnapshot.java | 2 -
.../tools/offlineImageViewer/FSImageLoader.java | 1 -
.../offlineImageViewer/PBImageXmlWriter.java | 4 --
.../hadoop-hdfs/src/main/proto/fsimage.proto | 1 -
.../fsdataset/impl/TestLazyPersistFiles.java | 22 ++++---
.../fsdataset/impl/TestScrLazyPersistFiles.java | 17 +-----
.../src/test/resources/testHDFSConf.xml | 36 ------------
30 files changed, 160 insertions(+), 254 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
index 8b32eb8..ee56fe6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.fs.FileStatus;
* %u: User name of owner
* %y: UTC date as "yyyy-MM-dd HH:mm:ss"
* %Y: Milliseconds since January 1, 1970 UTC
- * %l: Whether lazyPersist flag is set on the file.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
@@ -54,8 +53,7 @@ class Stat extends FsCommand {
public static final String DESCRIPTION =
"Print statistics about the file/directory at <path> " +
"in the specified format. Format accepts filesize in blocks (%b), group name of owner(%g), " +
- "filename (%n), block size (%o), replication (%r), user name of owner(%u), modification date (%y, %Y), " +
- "lazyPersist flag (%l)\n";
+ "filename (%n), block size (%o), replication (%r), user name of owner(%u), modification date (%y, %Y)\n";
protected static final SimpleDateFormat timeFmt;
static {
@@ -93,7 +91,7 @@ class Stat extends FsCommand {
break;
case 'F':
buf.append(stat.isDirectory()
- ? "directory"
+ ? "directory"
: (stat.isFile() ? "regular file" : "symlink"));
break;
case 'g':
@@ -117,9 +115,6 @@ class Stat extends FsCommand {
case 'Y':
buf.append(stat.getModificationTime());
break;
- case 'l':
- buf.append(stat.isLazyPersist());
- break;
default:
// this leaves %<unknown> alone, which causes the potential for
// future format options to break strings; should use %% to
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index 29d4174..c6e5fc5 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -798,7 +798,7 @@
</comparator>
<comparator>
<type>RegexpComparator</type>
- <expected-output>^( |\t)*\(%y, %Y\), lazyPersist flag \(\%l\)( )*</expected-output>
+ <expected-output>^( |\t)*\(%y, %Y\)( )*</expected-output>
</comparator>
</comparators>
</test>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 2bcda8b..a37e4d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -75,6 +76,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.RetryStartFileException;
@@ -172,6 +174,8 @@ public class DFSOutputStream extends FSOutputSummer
private final AtomicReference<CachingStrategy> cachingStrategy;
private boolean failPacket = false;
private FileEncryptionInfo fileEncryptionInfo;
+ private static final BlockStoragePolicySuite blockStoragePolicySuite =
+ BlockStoragePolicySuite.createDefaultSuite();
private static class Packet {
private static final long HEART_BEAT_SEQNO = -1L;
@@ -386,7 +390,7 @@ public class DFSOutputStream extends FSOutputSummer
*/
private DataStreamer(HdfsFileStatus stat, Span span) {
isAppend = false;
- isLazyPersistFile = stat.isLazyPersist();
+ isLazyPersistFile = initLazyPersist(stat);
stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
traceSpan = span;
}
@@ -406,7 +410,7 @@ public class DFSOutputStream extends FSOutputSummer
block = lastBlock.getBlock();
bytesSent = block.getNumBytes();
accessToken = lastBlock.getBlockToken();
- isLazyPersistFile = stat.isLazyPersist();
+ isLazyPersistFile = initLazyPersist(stat);
long usedInLastBlock = stat.getLen() % blockSize;
int freeInLastBlock = (int)(blockSize - usedInLastBlock);
@@ -450,6 +454,13 @@ public class DFSOutputStream extends FSOutputSummer
}
}
+ private boolean initLazyPersist(HdfsFileStatus stat) {
+ final BlockStoragePolicy lpPolicy =
+ blockStoragePolicySuite.getPolicy("LAZY_PERSIST");
+ return lpPolicy != null &&
+ stat.getStoragePolicy() == lpPolicy.getId();
+ }
+
private void setPipeline(LocatedBlock lb) {
setPipeline(lb.getLocations(), lb.getStorageTypes(), lb.getStorageIDs());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
index 99cae9a..88cc7d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
@@ -47,7 +47,13 @@ public enum StorageType {
StorageType(boolean isTransient) { this.isTransient = isTransient; }
- public boolean isMovable() { return isTransient == false; }
+ public boolean isTransient() {
+ return isTransient;
+ }
+
+ public boolean isMovable() {
+ return !isTransient;
+ }
public static List<StorageType> asList() {
return Arrays.asList(VALUES);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
index 8ca83a0..ef13e0c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockStoragePolicy.java
@@ -49,15 +49,29 @@ public class BlockStoragePolicy {
private final StorageType[] creationFallbacks;
/** The fallback storage type for replication. */
private final StorageType[] replicationFallbacks;
+ /**
+ * Whether the policy is inherited during file creation.
+ * If set then the policy cannot be changed after file creation.
+ */
+ private boolean copyOnCreateFile;
@VisibleForTesting
public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes,
StorageType[] creationFallbacks, StorageType[] replicationFallbacks) {
+ this(id, name, storageTypes, creationFallbacks, replicationFallbacks,
+ false);
+ }
+
+ @VisibleForTesting
+ public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes,
+ StorageType[] creationFallbacks, StorageType[] replicationFallbacks,
+ boolean copyOnCreateFile) {
this.id = id;
this.name = name;
this.storageTypes = storageTypes;
this.creationFallbacks = creationFallbacks;
this.replicationFallbacks = replicationFallbacks;
+ this.copyOnCreateFile = copyOnCreateFile;
}
/**
@@ -65,13 +79,22 @@ public class BlockStoragePolicy {
*/
public List<StorageType> chooseStorageTypes(final short replication) {
final List<StorageType> types = new LinkedList<StorageType>();
- int i = 0;
- for(; i < replication && i < storageTypes.length; i++) {
- types.add(storageTypes[i]);
+ int i = 0, j = 0;
+
+ // Do not return transient storage types. We will not have accurate
+ // usage information for transient types.
+ for (;i < replication && j < storageTypes.length; ++j) {
+ if (!storageTypes[j].isTransient()) {
+ types.add(storageTypes[j]);
+ ++i;
+ }
}
+
final StorageType last = storageTypes[storageTypes.length - 1];
- for(; i < replication; i++) {
- types.add(last);
+ if (!last.isTransient()) {
+ for (; i < replication; i++) {
+ types.add(last);
+ }
}
return types;
}
@@ -241,4 +264,8 @@ public class BlockStoragePolicy {
}
return null;
}
+
+ public boolean isCopyOnCreateFile() {
+ return copyOnCreateFile;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 25c64d3..b0d1dca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -171,7 +171,9 @@ public class HdfsConstants {
public static final String HOT_STORAGE_POLICY_NAME = "HOT";
public static final String WARM_STORAGE_POLICY_NAME = "WARM";
public static final String COLD_STORAGE_POLICY_NAME = "COLD";
+ public static final String LAZY_PERSIST_STORAGE_POLICY_NAME = "LAZY_PERSIST";
+ public static final byte LAZY_PERSIST_STORAGE_POLICY_ID = 15;
public static final byte ALLSSD_STORAGE_POLICY_ID = 12;
public static final byte ONESSD_STORAGE_POLICY_ID = 10;
public static final byte HOT_STORAGE_POLICY_ID = 7;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index c97dcb9..d095a1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -55,12 +55,6 @@ public interface BlockCollection {
public long getPreferredBlockSize();
/**
- * Return true if the file was created with {@Link CreateFlag#LAZY_PERSIST}.
- * @return
- */
- public boolean getLazyPersistFlag();
-
- /**
* Get block replication for the collection
* @return block replication value
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e95f705..99710d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -402,6 +402,10 @@ public class BlockManager {
return storagePolicySuite.getPolicy(policyName);
}
+ public BlockStoragePolicy getStoragePolicy(final byte policyId) {
+ return storagePolicySuite.getPolicy(policyId);
+ }
+
public BlockStoragePolicy[] getStoragePolicies() {
return storagePolicySuite.getAllPolicies();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index cbd1823..fa94921 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -45,6 +45,13 @@ public class BlockStoragePolicySuite {
public static BlockStoragePolicySuite createDefaultSuite() {
final BlockStoragePolicy[] policies =
new BlockStoragePolicy[1 << ID_BIT_LENGTH];
+ final byte lazyPersistId = HdfsConstants.LAZY_PERSIST_STORAGE_POLICY_ID;
+ policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId,
+ HdfsConstants.LAZY_PERSIST_STORAGE_POLICY_NAME,
+ new StorageType[]{StorageType.RAM_DISK, StorageType.DISK},
+ new StorageType[]{StorageType.DISK},
+ new StorageType[]{StorageType.DISK},
+ true); // Cannot be changed on regular files, but inherited.
final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
policies[allssdId] = new BlockStoragePolicy(allssdId,
HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 11322a9..8ed81df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -295,7 +295,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
// If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
// nothing needed to be rolled back to make various data structures, e.g.,
// storageMap and asyncDiskService, consistent.
- FsVolumeImpl fsVolume = FsVolumeImplAllocator.createVolume(
+ FsVolumeImpl fsVolume = new FsVolumeImpl(
this, sd.getStorageUuid(), dir, this.conf, storageType);
ReplicaMap tempVolumeMap = new ReplicaMap(this);
fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsTransientVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsTransientVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsTransientVolumeImpl.java
deleted file mode 100644
index dafa74f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsTransientVolumeImpl.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.ThreadPoolExecutor;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.StorageType;
-
-/**
- * Volume for storing replicas in memory. These can be deleted at any time
- * to make space for new replicas and there is no persistence guarantee.
- *
- * The backing store for these replicas is expected to be RAM_DISK.
- * The backing store may be disk when testing.
- *
- * It uses the {@link FsDatasetImpl} object for synchronization.
- */
-@InterfaceAudience.Private
-@VisibleForTesting
-public class FsTransientVolumeImpl extends FsVolumeImpl {
-
-
- FsTransientVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
- Configuration conf, StorageType storageType)
- throws IOException {
- super(dataset, storageID, currentDir, conf, storageType);
- }
-
- @Override
- protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
- // Can't 'cache' replicas already in RAM.
- return null;
- }
-
- @Override
- public boolean isTransientStorage() {
- return true;
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 098ad09..60ea125 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -96,6 +96,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
}
protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
+ if (storageType.isTransient()) {
+ return null;
+ }
+
final int maxNumThreads = dataset.datanode.getConf().getInt(
DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY,
DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT);
@@ -203,7 +207,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
@Override
public boolean isTransientStorage() {
- return false;
+ return storageType.isTransient();
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplAllocator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplAllocator.java
deleted file mode 100644
index 14d3aaf..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImplAllocator.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
-
-import java.io.File;
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.StorageType;
-
-/**
- * Generate volumes based on the storageType.
- */
-@InterfaceAudience.Private
-class FsVolumeImplAllocator {
- static FsVolumeImpl createVolume(FsDatasetImpl fsDataset, String storageUuid,
- File dir, Configuration conf, StorageType storageType)
- throws IOException {
- switch(storageType) {
- case RAM_DISK:
- return new FsTransientVolumeImpl(
- fsDataset, storageUuid, dir, conf, storageType);
- default:
- return new FsVolumeImpl(
- fsDataset, storageUuid, dir, conf, storageType);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 56f9045..3e6cf5d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
@@ -299,14 +300,13 @@ public class FSDirectory implements Closeable {
*/
INodeFile addFile(String path, PermissionStatus permissions,
short replication, long preferredBlockSize,
- boolean isLazyPersist,
String clientName, String clientMachine)
throws FileAlreadyExistsException, QuotaExceededException,
UnresolvedLinkException, SnapshotAccessControlException, AclException {
long modTime = now();
INodeFile newNode = newINodeFile(namesystem.allocateNewInodeId(),
- permissions, modTime, modTime, replication, preferredBlockSize, isLazyPersist);
+ permissions, modTime, modTime, replication, preferredBlockSize);
newNode.toUnderConstruction(clientName, clientMachine);
boolean added = false;
@@ -336,7 +336,6 @@ public class FSDirectory implements Closeable {
long modificationTime,
long atime,
long preferredBlockSize,
- boolean isLazyPersist,
boolean underConstruction,
String clientName,
String clientMachine,
@@ -345,12 +344,12 @@ public class FSDirectory implements Closeable {
assert hasWriteLock();
if (underConstruction) {
newNode = newINodeFile(id, permissions, modificationTime,
- modificationTime, replication, preferredBlockSize, storagePolicyId, isLazyPersist);
+ modificationTime, replication, preferredBlockSize, storagePolicyId);
newNode.toUnderConstruction(clientName, clientMachine);
} else {
newNode = newINodeFile(id, permissions, modificationTime, atime,
- replication, preferredBlockSize, storagePolicyId, isLazyPersist);
+ replication, preferredBlockSize, storagePolicyId);
}
try {
@@ -1038,6 +1037,20 @@ public class FSDirectory implements Closeable {
}
final int snapshotId = iip.getLatestSnapshotId();
if (inode.isFile()) {
+ BlockStoragePolicy newPolicy = getBlockManager().getStoragePolicy(policyId);
+ if (newPolicy.isCopyOnCreateFile()) {
+ throw new HadoopIllegalArgumentException(
+ "Policy " + newPolicy + " cannot be set after file creation.");
+ }
+
+ BlockStoragePolicy currentPolicy =
+ getBlockManager().getStoragePolicy(inode.getLocalStoragePolicyID());
+
+ if (currentPolicy != null && currentPolicy.isCopyOnCreateFile()) {
+ throw new HadoopIllegalArgumentException(
+ "Existing policy " + currentPolicy.getName() +
+ " cannot be changed after file creation.");
+ }
inode.asFile().setStoragePolicyID(policyId, snapshotId);
} else if (inode.isDirectory()) {
setDirStoragePolicy(inode.asDirectory(), policyId, snapshotId);
@@ -2431,7 +2444,6 @@ public class FSDirectory implements Closeable {
node.isDirectory(),
replication,
blocksize,
- isLazyPersist,
node.getModificationTime(snapshot),
node.getAccessTime(snapshot),
getPermissionForFileStatus(node, snapshot, isEncrypted),
@@ -2455,7 +2467,6 @@ public class FSDirectory implements Closeable {
long size = 0; // length is zero for directories
short replication = 0;
long blocksize = 0;
- boolean isLazyPersist = false;
LocatedBlocks loc = null;
final boolean isEncrypted;
final FileEncryptionInfo feInfo = isRawPath ? null :
@@ -2465,7 +2476,6 @@ public class FSDirectory implements Closeable {
size = fileNode.computeFileSize(snapshot);
replication = fileNode.getFileReplication(snapshot);
blocksize = fileNode.getPreferredBlockSize();
- isLazyPersist = fileNode.getLazyPersistFlag();
final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
@@ -2488,7 +2498,7 @@ public class FSDirectory implements Closeable {
HdfsLocatedFileStatus status =
new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
- blocksize, isLazyPersist, node.getModificationTime(snapshot),
+ blocksize, node.getModificationTime(snapshot),
node.getAccessTime(snapshot),
getPermissionForFileStatus(node, snapshot, isEncrypted),
node.getUserName(snapshot), node.getGroupName(snapshot),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 1a57179..cb44011 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -714,7 +714,6 @@ public class FSEditLog implements LogsPurgeable {
.setModificationTime(newNode.getModificationTime())
.setAccessTime(newNode.getAccessTime())
.setBlockSize(newNode.getPreferredBlockSize())
- .setLazyPersistFlag(newNode.getLazyPersistFlag())
.setBlocks(newNode.getBlocks())
.setPermissionStatus(permissions)
.setClientName(newNode.getFileUnderConstructionFeature().getClientName())
@@ -747,7 +746,6 @@ public class FSEditLog implements LogsPurgeable {
.setModificationTime(newNode.getModificationTime())
.setAccessTime(newNode.getAccessTime())
.setBlockSize(newNode.getPreferredBlockSize())
- .setLazyPersistFlag(newNode.getLazyPersistFlag())
.setBlocks(newNode.getBlocks())
.setPermissionStatus(newNode.getPermissionStatus());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 472a7f3..7dfe688 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -366,7 +366,7 @@ public class FSEditLogLoader {
path, addCloseOp.permissions, addCloseOp.aclEntries,
addCloseOp.xAttrs,
replication, addCloseOp.mtime, addCloseOp.atime,
- addCloseOp.blockSize, addCloseOp.isLazyPersist, true, addCloseOp.clientName,
+ addCloseOp.blockSize, true, addCloseOp.clientName,
addCloseOp.clientMachine, addCloseOp.storagePolicyId);
fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index f66530d..ec2b633 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -406,7 +406,6 @@ public abstract class FSEditLogOp {
long mtime;
long atime;
long blockSize;
- boolean isLazyPersist;
Block[] blocks;
PermissionStatus permissions;
List<AclEntry> aclEntries;
@@ -418,6 +417,7 @@ public abstract class FSEditLogOp {
private AddCloseOp(FSEditLogOpCodes opCode) {
super(opCode);
+ storagePolicyId = BlockStoragePolicySuite.ID_UNSPECIFIED;
assert(opCode == OP_ADD || opCode == OP_CLOSE);
}
@@ -456,11 +456,6 @@ public abstract class FSEditLogOp {
return (T)this;
}
- <T extends AddCloseOp> T setLazyPersistFlag(boolean isLazyPersist) {
- this.isLazyPersist = isLazyPersist;
- return (T)this;
- }
-
<T extends AddCloseOp> T setBlocks(Block[] blocks) {
if (blocks.length > MAX_BLOCKS) {
throw new RuntimeException("Can't have more than " + MAX_BLOCKS +
@@ -518,7 +513,6 @@ public abstract class FSEditLogOp {
FSImageSerialization.writeLong(mtime, out);
FSImageSerialization.writeLong(atime, out);
FSImageSerialization.writeLong(blockSize, out);
- FSImageSerialization.writeInt((isLazyPersist ? 1 : 0), out);
new ArrayWritable(Block.class, blocks).write(out);
permissions.write(out);
@@ -588,13 +582,6 @@ public abstract class FSEditLogOp {
this.blockSize = readLong(in);
}
- if (NameNodeLayoutVersion.supports(
- NameNodeLayoutVersion.Feature.LAZY_PERSIST_FILES, logVersion)) {
- this.isLazyPersist = (FSImageSerialization.readInt(in) != 0);
- } else {
- this.isLazyPersist = false;
- }
-
this.blocks = readBlocks(in, logVersion);
this.permissions = PermissionStatus.read(in);
@@ -660,8 +647,6 @@ public abstract class FSEditLogOp {
builder.append(atime);
builder.append(", blockSize=");
builder.append(blockSize);
- builder.append(", lazyPersist");
- builder.append(isLazyPersist);
builder.append(", blocks=");
builder.append(Arrays.toString(blocks));
builder.append(", permissions=");
@@ -702,8 +687,6 @@ public abstract class FSEditLogOp {
Long.toString(atime));
XMLUtils.addSaxString(contentHandler, "BLOCKSIZE",
Long.toString(blockSize));
- XMLUtils.addSaxString(contentHandler, "LAZY_PERSIST",
- Boolean.toString(isLazyPersist));
XMLUtils.addSaxString(contentHandler, "CLIENT_NAME", clientName);
XMLUtils.addSaxString(contentHandler, "CLIENT_MACHINE", clientMachine);
XMLUtils.addSaxString(contentHandler, "OVERWRITE",
@@ -730,10 +713,6 @@ public abstract class FSEditLogOp {
this.atime = Long.parseLong(st.getValue("ATIME"));
this.blockSize = Long.parseLong(st.getValue("BLOCKSIZE"));
- String lazyPersistString = st.getValueOrNull("LAZY_PERSIST");
- this.isLazyPersist =
- lazyPersistString != null && Boolean.parseBoolean(lazyPersistString);
-
this.clientName = st.getValue("CLIENT_NAME");
this.clientMachine = st.getValue("CLIENT_MACHINE");
this.overwrite = Boolean.parseBoolean(st.getValueOrNull("OVERWRITE"));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index fd0f467..3c58ed9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -786,8 +786,6 @@ public class FSImageFormat {
counter.increment();
}
- // Images in the old format will not have the lazyPersist flag so it is
- // safe to pass false always.
final INodeFile file = new INodeFile(inodeId, localName, permissions,
modificationTime, atime, blocks, replication, blockSize, (byte)0);
if (underConstruction) {
@@ -892,10 +890,8 @@ public class FSImageFormat {
in.readShort());
final long preferredBlockSize = in.readLong();
- // LazyPersist flag will not be present in old image formats and hence
- // can be safely set to false always.
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
- accessTime, replication, preferredBlockSize, (byte)0, false null);
+ accessTime, replication, preferredBlockSize, (byte) 0, null);
}
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index 8cce6d2..5631b2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -290,7 +290,7 @@ public final class FSImageFormatPBINode {
final INodeFile file = new INodeFile(n.getId(),
n.getName().toByteArray(), permissions, f.getModificationTime(),
f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(),
- (byte)f.getStoragePolicyID(), f.hasIsLazyPersist() ? f.getIsLazyPersist() : false);
+ (byte)f.getStoragePolicyID());
if (f.hasAcl()) {
file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(),
@@ -400,8 +400,7 @@ public final class FSImageFormatPBINode {
.setPermission(buildPermissionStatus(file, state.getStringMap()))
.setPreferredBlockSize(file.getPreferredBlockSize())
.setReplication(file.getFileReplication())
- .setStoragePolicyID(file.getLocalStoragePolicyID())
- .setIsLazyPersist(file.getLazyPersistFlag());
+ .setStoragePolicyID(file.getLocalStoragePolicyID());
AclFeature f = file.getAclFeature();
if (f != null) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7dd3874..3639b2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2356,6 +2356,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
src = FSDirectory.resolvePath(src, pathComponents, dir);
+ INode inode = dir.getINode(src);
// get the corresponding policy and make sure the policy name is valid
BlockStoragePolicy policy = blockManager.getStoragePolicy(policyName);
@@ -2741,7 +2742,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (parent != null && mkdirsRecursively(parent.toString(),
permissions, true, now())) {
newNode = dir.addFile(src, permissions, replication, blockSize,
- isLazyPersist, holder, clientMachine);
+ holder, clientMachine);
}
if (newNode == null) {
@@ -2756,6 +2757,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
newNode = dir.getInode(newNode.getId()).asFile();
}
+ setNewINodeStoragePolicy(newNode, iip, isLazyPersist);
+
// record file record in log, record new generation stamp
getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry);
if (NameNode.stateChangeLog.isDebugEnabled()) {
@@ -2770,6 +2773,37 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
}
+ private void setNewINodeStoragePolicy(INodeFile inode,
+ INodesInPath iip,
+ boolean isLazyPersist)
+ throws IOException {
+
+ if (isLazyPersist) {
+ BlockStoragePolicy lpPolicy =
+ blockManager.getStoragePolicy("LAZY_PERSIST");
+
+ // Set LAZY_PERSIST storage policy if the flag was passed to
+ // CreateFile.
+ if (lpPolicy == null) {
+ throw new HadoopIllegalArgumentException(
+ "The LAZY_PERSIST storage policy has been disabled " +
+ "by the administrator.");
+ }
+ inode.setStoragePolicyID(lpPolicy.getId(),
+ iip.getLatestSnapshotId());
+ } else {
+ BlockStoragePolicy effectivePolicy =
+ blockManager.getStoragePolicy(inode.getStoragePolicyID());
+
+ if (effectivePolicy != null &&
+ effectivePolicy.isCopyOnCreateFile()) {
+ // Copy effective policy from ancestor directory to current file.
+ inode.setStoragePolicyID(effectivePolicy.getId(),
+ iip.getLatestSnapshotId());
+ }
+ }
+ }
+
/**
* Append to an existing file for append.
* <p>
@@ -2809,8 +2843,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
+ src + " for client " + clientMachine);
}
INodeFile myFile = INodeFile.valueOf(inode, src, true);
+ final BlockStoragePolicy lpPolicy =
+ blockManager.getStoragePolicy("LAZY_PERSIST");
- if (myFile.getLazyPersistFlag()) {
+ if (lpPolicy != null &&
+ lpPolicy.getId() == myFile.getStoragePolicyID()) {
throw new UnsupportedOperationException(
"Cannot append to lazy persist file " + src);
}
@@ -5166,6 +5203,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
throws SafeModeException, AccessControlException,
UnresolvedLinkException, IOException {
+ BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST");
+
List<BlockCollection> filesToDelete = new ArrayList<BlockCollection>();
writeLock();
@@ -5176,7 +5215,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
while (it.hasNext()) {
Block b = it.next();
BlockInfo blockInfo = blockManager.getStoredBlock(b);
- if (blockInfo.getBlockCollection().getLazyPersistFlag()) {
+ if (blockInfo.getBlockCollection().getStoragePolicyID() == lpPolicy.getId()) {
filesToDelete.add(blockInfo.getBlockCollection());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index aeddcad..b04c0bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -80,8 +80,7 @@ public class INodeFile extends INodeWithAdditionalFields
PREFERRED_BLOCK_SIZE(null, 48, 1),
REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 12, 1),
STORAGE_POLICY_ID(REPLICATION.BITS, BlockStoragePolicySuite.ID_BIT_LENGTH,
- 0),
- LAZY_PERSIST(REPLICATION.BITS, 4, 0);
+ 0);
private final LongBitFormat BITS;
@@ -107,7 +106,6 @@ public class INodeFile extends INodeWithAdditionalFields
h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
h = REPLICATION.BITS.combine(replication, h);
h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);
- h = LAZY_PERSIST.BITS.combine(isLazyPersist ? 1 : 0, h);
return h;
}
static boolean getLazyPersistFlag(long header) {
@@ -123,15 +121,15 @@ public class INodeFile extends INodeWithAdditionalFields
long atime, BlockInfo[] blklist, short replication,
long preferredBlockSize) {
this(id, name, permissions, mtime, atime, blklist, replication,
- preferredBlockSize, false);
+ preferredBlockSize, (byte) 0);
}
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
long atime, BlockInfo[] blklist, short replication,
- long preferredBlockSize, byte storagePolicyID, boolean isLazyPersist) {
+ long preferredBlockSize, byte storagePolicyID) {
super(id, name, permissions, mtime, atime);
header = HeaderFormat.toLong(preferredBlockSize, replication,
- storagePolicyID, isLazyPersist);
+ storagePolicyID);
this.blocks = blklist;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
index 05ce78f..9577d1e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
@@ -32,8 +32,6 @@ public interface INodeFileAttributes extends INodeAttributes {
/** @return preferred block size in bytes */
public long getPreferredBlockSize();
- public boolean getLazyPersistFlag();
-
/** @return the header as a long. */
public long getHeaderLong();
@@ -49,10 +47,10 @@ public interface INodeFileAttributes extends INodeAttributes {
public SnapshotCopy(byte[] name, PermissionStatus permissions,
AclFeature aclFeature, long modificationTime, long accessTime,
short replication, long preferredBlockSize, byte storagePolicyID,
- boolean isTransient, XAttrFeature xAttrsFeature) {
+ XAttrFeature xAttrsFeature) {
super(name, permissions, aclFeature, modificationTime, accessTime,
xAttrsFeature);
- header = HeaderFormat.toLong(preferredBlockSize, replication, isTransient, storagePolicyID);
+ header = HeaderFormat.toLong(preferredBlockSize, replication, storagePolicyID);
}
public SnapshotCopy(INodeFile file) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
index 72fc9df..512913b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
@@ -70,8 +70,6 @@ public class NameNodeLayoutVersion {
"creating file with overwrite"),
XATTRS_NAMESPACE_EXT(-59, "Increase number of xattr namespaces"),
BLOCK_STORAGE_POLICY(-60, "Block Storage policy");
- LAZY_PERSIST_FILES(-61, "Support for optional lazy persistence of "
- + " files with reduced durability guarantees");
private final FeatureInfo info;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
index b1727c6..e7598d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
@@ -222,9 +222,7 @@ public class FSImageFormatPBSnapshot {
fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
fileInPb.getPreferredBlockSize(),
(byte)fileInPb.getStoragePolicyID(),
- fileInPb.hasIsLazyPersist() ? fileInPb.getIsLazyPersist() : false,
xAttrs);
-
}
FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
index 9297c64..b68d842 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
@@ -434,7 +434,6 @@ class FSImageLoader {
f.getPermission(), stringTable);
map.put("accessTime", f.getAccessTime());
map.put("blockSize", f.getPreferredBlockSize());
- map.put("lazyPersist", f.getIsLazyPersist());
map.put("group", p.getGroupName());
map.put("length", getFileSize(f));
map.put("modificationTime", f.getModificationTime());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
index 744fc75..99617b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
@@ -247,10 +247,6 @@ public final class PBImageXmlWriter {
.o("perferredBlockSize", f.getPreferredBlockSize())
.o("permission", dumpPermission(f.getPermission()));
- if (f.hasIsLazyPersist()) {
- o("lazyPersist", f.getIsLazyPersist());
- }
-
if (f.getBlocksCount() > 0) {
out.print("<blocks>");
for (BlockProto b : f.getBlocksList()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
index 85edfab..588f6c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
@@ -139,7 +139,6 @@ message INodeSection {
optional AclFeatureProto acl = 8;
optional XAttrFeatureProto xAttrs = 9;
optional uint32 storagePolicyID = 10;
- optional bool isLazyPersist = 11 [default = false];
}
message INodeDirectory {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index 95404b3..928d0d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
@@ -56,6 +56,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.apache.hadoop.hdfs.StorageType.DEFAULT;
import static org.apache.hadoop.hdfs.StorageType.RAM_DISK;
import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.IsNot.not;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
@@ -68,6 +69,8 @@ public class TestLazyPersistFiles {
((Log4JLogger) FsDatasetImpl.LOG).getLogger().setLevel(Level.ALL);
}
+ private static final byte LAZY_PERSIST_POLICY_ID = (byte) 15;
+
private static final int THREADPOOL_SIZE = 10;
private static final short REPL_FACTOR = 1;
@@ -100,19 +103,20 @@ public class TestLazyPersistFiles {
}
@Test (timeout=300000)
- public void testFlagNotSetByDefault() throws IOException {
+ public void testPolicyNotSetByDefault() throws IOException {
startUpCluster(false, -1);
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
makeTestFile(path, 0, false);
- // Stat the file and check that the lazyPersist flag is returned back.
+ // Stat the file and check that the LAZY_PERSIST policy is not
+ // returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
- assertThat(status.isLazyPersist(), is(false));
+ assertThat(status.getStoragePolicy(), not(LAZY_PERSIST_POLICY_ID));
}
@Test (timeout=300000)
- public void testFlagPropagation() throws IOException {
+ public void testPolicyPropagation() throws IOException {
startUpCluster(false, -1);
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
@@ -120,11 +124,11 @@ public class TestLazyPersistFiles {
makeTestFile(path, 0, true);
// Stat the file and check that the lazyPersist flag is returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
- assertThat(status.isLazyPersist(), is(true));
+ assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
@Test (timeout=300000)
- public void testFlagPersistenceInEditLog() throws IOException {
+ public void testPolicyPersistenceInEditLog() throws IOException {
startUpCluster(false, -1);
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
@@ -134,11 +138,11 @@ public class TestLazyPersistFiles {
// Stat the file and check that the lazyPersist flag is returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
- assertThat(status.isLazyPersist(), is(true));
+ assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
@Test (timeout=300000)
- public void testFlagPersistenceInFsImage() throws IOException {
+ public void testPolicyPersistenceInFsImage() throws IOException {
startUpCluster(false, -1);
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path path = new Path("/" + METHOD_NAME + ".dat");
@@ -152,7 +156,7 @@ public class TestLazyPersistFiles {
// Stat the file and check that the lazyPersist flag is returned back.
HdfsFileStatus status = client.getFileInfo(path.toString());
- assertThat(status.isLazyPersist(), is(true));
+ assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
@Test (timeout=300000)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
index 4270f8c..b6ac287 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
@@ -255,7 +255,7 @@ public class TestScrLazyPersistFiles {
for (FsVolumeSpi volume : volumes) {
if (volume.getStorageType() == RAM_DISK) {
- ((FsTransientVolumeImpl) volume).setCapacityForTesting(ramDiskStorageLimit);
+ ((FsVolumeImpl) volume).setCapacityForTesting(ramDiskStorageLimit);
}
}
}
@@ -263,13 +263,6 @@ public class TestScrLazyPersistFiles {
LOG.info("Cluster startup complete");
}
- private void startUpCluster(final int numDataNodes,
- final StorageType[] storageTypes,
- final long ramDiskStorageLimit)
- throws IOException {
- startUpCluster(numDataNodes, storageTypes, ramDiskStorageLimit, false);
- }
-
private void makeTestFile(Path path, long length, final boolean isLazyPersist)
throws IOException {
@@ -324,14 +317,6 @@ public class TestScrLazyPersistFiles {
BLOCK_SIZE, REPL_FACTOR, seed, true);
}
- private boolean verifyReadRandomFile(
- Path path, int fileLength, int seed) throws IOException {
- byte contents[] = DFSTestUtil.readFileBuffer(fs, path);
- byte expected[] = DFSTestUtil.
- calculateFileContentsFromSeed(seed, fileLength);
- return Arrays.equals(contents, expected);
- }
-
private void triggerBlockReport()
throws IOException, InterruptedException {
// Trigger block report to NN
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d61daff9/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 37ea234..2c2a7a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -4554,42 +4554,6 @@
</comparators>
</test>
- <test> <!-- TESTED -->
- <description>put: The LazyPersist flag is set when requested</description>
- <test-commands>
- <command>-fs NAMENODE -mkdir /dirLp</command>
- <command>-fs NAMENODE -put -l CLITEST_DATA/data15bytes /dirLp/data15bytes</command>
- <command>-fs NAMENODE -stat %l /dirLp/data15bytes</command>
- </test-commands>
- <cleanup-commands>
- <command>-fs NAMENODE -rm -r /dirLp/</command>
- </cleanup-commands>
- <comparators>
- <comparator>
- <type>RegexpComparator</type>
- <expected-output>^true</expected-output>
- </comparator>
- </comparators>
- </test>
-
- <test> <!-- TESTED -->
- <description>put: The LazyPersist flag is not set by default</description>
- <test-commands>
- <command>-fs NAMENODE -mkdir /dirLp</command>
- <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dirLp/data15bytes</command>
- <command>-fs NAMENODE -stat %l /dirLp/data15bytes</command>
- </test-commands>
- <cleanup-commands>
- <command>-fs NAMENODE -rm -r /dirLp/</command>
- </cleanup-commands>
- <comparators>
- <comparator>
- <type>RegexpComparator</type>
- <expected-output>^false</expected-output>
- </comparator>
- </comparators>
- </test>
-
<!-- Tests for copyFromLocal -->
<test> <!-- TESTED -->
<description>copyFromLocal: copying file into a file (absolute path)</description>