You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/08/16 02:37:25 UTC
svn commit: r1158072 [5/7] - in /hadoop/common/branches/HDFS-1623/hdfs: ./
ivy/ src/c++/libhdfs/ src/contrib/ src/contrib/fuse-dfs/ src/java/
src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/
src/java/org/apache/hadoop/hdfs/ser...
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java Tue Aug 16 00:37:15 2011
@@ -21,6 +21,7 @@ import java.io.IOException;
import java.io.PrintWriter;
import java.net.URI;
import java.net.URISyntaxException;
+import java.net.URL;
import javax.net.SocketFactory;
import javax.servlet.ServletContext;
@@ -36,11 +37,14 @@ import org.apache.hadoop.hdfs.DFSConfigK
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ServletUtil;
import org.znerd.xmlenc.XMLOutputter;
/** Servlets for file checksum */
@@ -52,6 +56,32 @@ public class FileChecksumServlets {
/** For java.io.Serializable */
private static final long serialVersionUID = 1L;
+ /** Create a redirection URL */
+ private URL createRedirectURL(UserGroupInformation ugi, DatanodeID host,
+ HttpServletRequest request, NameNode nn)
+ throws IOException {
+ final String hostname = host instanceof DatanodeInfo
+ ? ((DatanodeInfo)host).getHostName() : host.getHost();
+ final String scheme = request.getScheme();
+ final int port = "https".equals(scheme)
+ ? (Integer)getServletContext().getAttribute("datanode.https.port")
+ : host.getInfoPort();
+ final String encodedPath = ServletUtil.getRawPath(request, "/fileChecksum");
+
+ String dtParam = "";
+ if (UserGroupInformation.isSecurityEnabled()) {
+ String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
+ dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
+ }
+ String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
+ String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
+
+ return new URL(scheme, hostname, port,
+ "/getFileChecksum" + encodedPath + '?' +
+ "ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
+ dtParam + addrParam);
+ }
+
/** {@inheritDoc} */
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws ServletException, IOException {
@@ -62,12 +92,8 @@ public class FileChecksumServlets {
context);
final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode);
try {
- final URI uri = createRedirectUri("/getFileChecksum", ugi, datanode,
- request, namenode);
- response.sendRedirect(uri.toURL().toString());
- } catch(URISyntaxException e) {
- throw new ServletException(e);
- //response.getWriter().println(e.toString());
+ response.sendRedirect(
+ createRedirectURL(ugi, datanode, request, namenode).toString());
} catch (IOException e) {
response.sendError(400, e.getMessage());
}
@@ -84,7 +110,7 @@ public class FileChecksumServlets {
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws ServletException, IOException {
final PrintWriter out = response.getWriter();
- final String filename = getFilename(request, response);
+ final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
xml.declaration();
@@ -103,12 +129,12 @@ public class FileChecksumServlets {
datanode, conf, getUGI(request, conf));
final ClientProtocol nnproxy = dfs.getNamenode();
final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
- filename, nnproxy, socketFactory, socketTimeout);
+ path, nnproxy, socketFactory, socketTimeout);
MD5MD5CRC32FileChecksum.write(xml, checksum);
} catch(IOException ioe) {
- writeXml(ioe, filename, xml);
+ writeXml(ioe, path, xml);
} catch (InterruptedException e) {
- writeXml(e, filename, xml);
+ writeXml(e, path, xml);
}
xml.endDocument();
}
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java Tue Aug 16 00:37:15 2011
@@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
-import java.net.URI;
import java.net.URISyntaxException;
+import java.net.URL;
import java.security.PrivilegedExceptionAction;
import javax.servlet.http.HttpServletRequest;
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ServletUtil;
/** Redirect queries about the hosted filesystem to an appropriate datanode.
* @see org.apache.hadoop.hdfs.HftpFileSystem
@@ -44,22 +45,25 @@ public class FileDataServlet extends Dfs
/** For java.io.Serializable */
private static final long serialVersionUID = 1L;
- /** Create a redirection URI */
- protected URI createUri(String parent, HdfsFileStatus i, UserGroupInformation ugi,
- ClientProtocol nnproxy, HttpServletRequest request, String dt)
- throws IOException, URISyntaxException {
+ /** Create a redirection URL */
+ private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus status,
+ UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt)
+ throws IOException {
String scheme = request.getScheme();
final LocatedBlocks blks = nnproxy.getBlockLocations(
- i.getFullPath(new Path(parent)).toUri().getPath(), 0, 1);
- final DatanodeID host = pickSrcDatanode(blks, i);
+ status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
+ final DatanodeID host = pickSrcDatanode(blks, status);
final String hostname;
if (host instanceof DatanodeInfo) {
hostname = ((DatanodeInfo)host).getHostName();
} else {
hostname = host.getHost();
}
-
- String dtParam="";
+ final int port = "https".equals(scheme)
+ ? (Integer)getServletContext().getAttribute("datanode.https.port")
+ : host.getInfoPort();
+
+ String dtParam = "";
if (dt != null) {
dtParam=JspHelper.getDelegationTokenUrlParam(dt);
}
@@ -70,12 +74,10 @@ public class FileDataServlet extends Dfs
String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
- return new URI(scheme, null, hostname,
- "https".equals(scheme)
- ? (Integer)getServletContext().getAttribute("datanode.https.port")
- : host.getInfoPort(),
- "/streamFile" + i.getFullName(parent),
- "ugi=" + ugi.getShortUserName() + dtParam + addrParam, null);
+ return new URL(scheme, hostname, port,
+ "/streamFile" + encodedPath + '?' +
+ "ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
+ dtParam + addrParam);
}
/** Select a datanode to service this request.
@@ -112,20 +114,15 @@ public class FileDataServlet extends Dfs
@Override
public Void run() throws IOException {
ClientProtocol nn = createNameNodeProxy();
- final String path = request.getPathInfo() != null ? request
- .getPathInfo() : "/";
-
+ final String path = ServletUtil.getDecodedPath(request, "/data");
+ final String encodedPath = ServletUtil.getRawPath(request, "/data");
String delegationToken = request
.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
HdfsFileStatus info = nn.getFileInfo(path);
if (info != null && !info.isDir()) {
- try {
- response.sendRedirect(createUri(path, info, ugi, nn, request,
- delegationToken).toURL().toString());
- } catch (URISyntaxException e) {
- response.getWriter().println(e.toString());
- }
+ response.sendRedirect(createRedirectURL(path, encodedPath,
+ info, ugi, nn, request, delegationToken).toString());
} else if (info == null) {
response.sendError(400, "File not found " + path);
} else {
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java Tue Aug 16 00:37:15 2011
@@ -23,14 +23,21 @@ import org.apache.commons.logging.LogFac
import java.io.File;
import java.io.IOException;
import java.util.List;
+import java.util.Comparator;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.ComparisonChain;
/**
* Journal manager for the common case of edits files being written
@@ -45,6 +52,15 @@ class FileJournalManager implements Jour
private final StorageDirectory sd;
private int outputBufferCapacity = 512*1024;
+ private static final Pattern EDITS_REGEX = Pattern.compile(
+ NameNodeFile.EDITS.getName() + "_(\\d+)-(\\d+)");
+ private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile(
+ NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)");
+
+ @VisibleForTesting
+ StoragePurger purger
+ = new NNStorageRetentionManager.DeletionStoragePurger();
+
public FileJournalManager(StorageDirectory sd) {
this.sd = sd;
}
@@ -91,13 +107,13 @@ class FileJournalManager implements Jour
}
@Override
- public void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
+ public void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException {
File[] files = FileUtil.listFiles(sd.getCurrentDir());
- List<FoundEditLog> editLogs =
- FSImageTransactionalStorageInspector.matchEditLogs(files);
- for (FoundEditLog log : editLogs) {
- if (log.getStartTxId() < minTxIdToKeep &&
+ List<EditLogFile> editLogs =
+ FileJournalManager.matchEditLogs(files);
+ for (EditLogFile log : editLogs) {
+ if (log.getFirstTxId() < minTxIdToKeep &&
log.getLastTxId() < minTxIdToKeep) {
purger.purgeLog(log);
}
@@ -110,5 +126,167 @@ class FileJournalManager implements Jour
File f = NNStorage.getInProgressEditsFile(sd, segmentStartsAtTxId);
return new EditLogFileInputStream(f);
}
+
+ /**
+ * Find all editlog segments starting at or above the given txid.
+ * @param fromTxId the txnid which to start looking
+ * @return a list of remote edit logs
+ * @throws IOException if edit logs cannot be listed.
+ */
+ List<RemoteEditLog> getRemoteEditLogs(long firstTxId) throws IOException {
+ File currentDir = sd.getCurrentDir();
+ List<EditLogFile> allLogFiles = matchEditLogs(
+ FileUtil.listFiles(currentDir));
+ List<RemoteEditLog> ret = Lists.newArrayListWithCapacity(
+ allLogFiles.size());
+
+ for (EditLogFile elf : allLogFiles) {
+ if (elf.isCorrupt() || elf.isInProgress()) continue;
+ if (elf.getFirstTxId() >= firstTxId) {
+ ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId));
+ } else if ((firstTxId > elf.getFirstTxId()) &&
+ (firstTxId <= elf.getLastTxId())) {
+ throw new IOException("Asked for firstTxId " + firstTxId
+ + " which is in the middle of file " + elf.file);
+ }
+ }
+
+ return ret;
+ }
+
+ static List<EditLogFile> matchEditLogs(File[] filesInStorage) {
+ List<EditLogFile> ret = Lists.newArrayList();
+ for (File f : filesInStorage) {
+ String name = f.getName();
+ // Check for edits
+ Matcher editsMatch = EDITS_REGEX.matcher(name);
+ if (editsMatch.matches()) {
+ try {
+ long startTxId = Long.valueOf(editsMatch.group(1));
+ long endTxId = Long.valueOf(editsMatch.group(2));
+ ret.add(new EditLogFile(f, startTxId, endTxId));
+ } catch (NumberFormatException nfe) {
+ LOG.error("Edits file " + f + " has improperly formatted " +
+ "transaction ID");
+ // skip
+ }
+ }
+
+ // Check for in-progress edits
+ Matcher inProgressEditsMatch = EDITS_INPROGRESS_REGEX.matcher(name);
+ if (inProgressEditsMatch.matches()) {
+ try {
+ long startTxId = Long.valueOf(inProgressEditsMatch.group(1));
+ ret.add(
+ new EditLogFile(f, startTxId, EditLogFile.UNKNOWN_END));
+ } catch (NumberFormatException nfe) {
+ LOG.error("In-progress edits file " + f + " has improperly " +
+ "formatted transaction ID");
+ // skip
+ }
+ }
+ }
+ return ret;
+ }
+
+ /**
+ * Record of an edit log that has been located and had its filename parsed.
+ */
+ static class EditLogFile {
+ private File file;
+ private final long firstTxId;
+ private long lastTxId;
+
+ private EditLogValidation cachedValidation = null;
+ private boolean isCorrupt = false;
+
+ static final long UNKNOWN_END = -1;
+
+ final static Comparator<EditLogFile> COMPARE_BY_START_TXID
+ = new Comparator<EditLogFile>() {
+ public int compare(EditLogFile a, EditLogFile b) {
+ return ComparisonChain.start()
+ .compare(a.getFirstTxId(), b.getFirstTxId())
+ .compare(a.getLastTxId(), b.getLastTxId())
+ .result();
+ }
+ };
+
+ EditLogFile(File file,
+ long firstTxId, long lastTxId) {
+ assert lastTxId == UNKNOWN_END || lastTxId >= firstTxId;
+ assert firstTxId > 0;
+ assert file != null;
+
+ this.firstTxId = firstTxId;
+ this.lastTxId = lastTxId;
+ this.file = file;
+ }
+
+ public void finalizeLog() throws IOException {
+ long numTransactions = validateLog().numTransactions;
+ long lastTxId = firstTxId + numTransactions - 1;
+ File dst = new File(file.getParentFile(),
+ NNStorage.getFinalizedEditsFileName(firstTxId, lastTxId));
+ LOG.info("Finalizing edits log " + file + " by renaming to "
+ + dst.getName());
+ if (!file.renameTo(dst)) {
+ throw new IOException("Couldn't finalize log " +
+ file + " to " + dst);
+ }
+ this.lastTxId = lastTxId;
+ file = dst;
+ }
+ long getFirstTxId() {
+ return firstTxId;
+ }
+
+ long getLastTxId() {
+ return lastTxId;
+ }
+
+ EditLogValidation validateLog() throws IOException {
+ if (cachedValidation == null) {
+ cachedValidation = EditLogFileInputStream.validateEditLog(file);
+ }
+ return cachedValidation;
+ }
+
+ boolean isInProgress() {
+ return (lastTxId == UNKNOWN_END);
+ }
+
+ File getFile() {
+ return file;
+ }
+
+ void markCorrupt() {
+ isCorrupt = true;
+ }
+
+ boolean isCorrupt() {
+ return isCorrupt;
+ }
+
+ void moveAsideCorruptFile() throws IOException {
+ assert isCorrupt;
+
+ File src = file;
+ File dst = new File(src.getParent(), src.getName() + ".corrupt");
+ boolean success = src.renameTo(dst);
+ if (!success) {
+ throw new IOException(
+ "Couldn't rename corrupt log " + src + " to " + dst);
+ }
+ file = dst;
+ }
+
+ @Override
+ public String toString() {
+ return String.format("EditLogFile(file=%s,first=%019d,last=%019d,"
+ +"inProgress=%b,corrupt=%b)", file.toString(),
+ firstTxId, lastTxId, isInProgress(), isCorrupt);
+ }
+ }
}
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java Tue Aug 16 00:37:15 2011
@@ -30,6 +30,7 @@ import javax.servlet.http.HttpServletRes
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.security.UserGroupInformation;
/**
@@ -59,13 +60,12 @@ public class FsckServlet extends DfsServ
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
final FSNamesystem namesystem = nn.getNamesystem();
+ final BlockManager bm = namesystem.getBlockManager();
final int totalDatanodes =
- namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
- final short minReplication = namesystem.getMinReplication();
-
+ namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
new NamenodeFsck(conf, nn,
- NamenodeJspHelper.getNetworkTopology(nn), pmap, out,
- totalDatanodes, minReplication, remoteAddress).fsck();
+ bm.getDatanodeManager().getNetworkTopology(), pmap, out,
+ totalDatanodes, bm.minReplication, remoteAddress).fsck();
return null;
}
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java Tue Aug 16 00:37:15 2011
@@ -55,7 +55,7 @@ interface JournalManager {
* @param purger the purging implementation to use
* @throws IOException if purging fails
*/
- void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
+ void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException;
/**
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java Tue Aug 16 00:37:15 2011
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.VersionInfo;
import org.znerd.xmlenc.*;
@@ -86,8 +87,7 @@ public class ListPathsServlet extends Df
*/
protected Map<String,String> buildRoot(HttpServletRequest request,
XMLOutputter doc) {
- final String path = request.getPathInfo() != null
- ? request.getPathInfo() : "/";
+ final String path = ServletUtil.getDecodedPath(request, "/listPaths");
final String exclude = request.getParameter("exclude") != null
? request.getParameter("exclude") : "";
final String filter = request.getParameter("filter") != null
@@ -135,6 +135,7 @@ public class ListPathsServlet extends Df
final Map<String, String> root = buildRoot(request, doc);
final String path = root.get("path");
+ final String filePath = ServletUtil.getDecodedPath(request, "/listPaths");
try {
final boolean recur = "yes".equals(root.get("recursive"));
@@ -153,7 +154,7 @@ public class ListPathsServlet extends Df
doc.attribute(m.getKey(), m.getValue());
}
- HdfsFileStatus base = nn.getFileInfo(path);
+ HdfsFileStatus base = nn.getFileInfo(filePath);
if ((base != null) && base.isDir()) {
writeInfo(base.getFullPath(new Path(path)), base, doc);
}
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java Tue Aug 16 00:37:15 2011
@@ -27,8 +27,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
-import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundFSImage;
+import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import com.google.common.collect.Lists;
@@ -80,14 +80,14 @@ public class NNStorageRetentionManager {
// If fsimage_N is the image we want to keep, then we need to keep
// all txns > N. We can remove anything < N+1, since fsimage_N
// reflects the state up to and including N.
- editLog.purgeLogsOlderThan(minImageTxId + 1, purger);
+ editLog.purgeLogsOlderThan(minImageTxId + 1);
}
private void purgeCheckpointsOlderThan(
FSImageTransactionalStorageInspector inspector,
long minTxId) {
- for (FoundFSImage image : inspector.getFoundImages()) {
- if (image.getTxId() < minTxId) {
+ for (FSImageFile image : inspector.getFoundImages()) {
+ if (image.getCheckpointTxId() < minTxId) {
LOG.info("Purging old image " + image);
purger.purgeImage(image);
}
@@ -101,10 +101,10 @@ public class NNStorageRetentionManager {
*/
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {
- List<FoundFSImage> images = inspector.getFoundImages();
+ List<FSImageFile> images = inspector.getFoundImages();
TreeSet<Long> imageTxIds = Sets.newTreeSet();
- for (FoundFSImage image : images) {
- imageTxIds.add(image.getTxId());
+ for (FSImageFile image : images) {
+ imageTxIds.add(image.getCheckpointTxId());
}
List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
@@ -124,18 +124,18 @@ public class NNStorageRetentionManager {
* Interface responsible for disposing of old checkpoints and edit logs.
*/
static interface StoragePurger {
- void purgeLog(FoundEditLog log);
- void purgeImage(FoundFSImage image);
+ void purgeLog(EditLogFile log);
+ void purgeImage(FSImageFile image);
}
static class DeletionStoragePurger implements StoragePurger {
@Override
- public void purgeLog(FoundEditLog log) {
+ public void purgeLog(EditLogFile log) {
deleteOrWarn(log.getFile());
}
@Override
- public void purgeImage(FoundFSImage image) {
+ public void purgeImage(FSImageFile image) {
deleteOrWarn(image.getFile());
deleteOrWarn(MD5FileUtils.getDigestFileForFile(image.getFile()));
}
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Tue Aug 16 00:37:15 2011
@@ -58,6 +58,11 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
+import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_LENGTH;
+import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_DEPTH;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -146,7 +151,7 @@ import org.apache.hadoop.util.StringUtil
* NameNode state, for example partial blocksMap etc.
**********************************************************/
@InterfaceAudience.Private
-public class NameNode implements NamenodeProtocols, FSConstants {
+public class NameNode implements NamenodeProtocols {
static{
HdfsConfiguration.init();
}
@@ -654,7 +659,7 @@ public class NameNode implements Namenod
"Unexpected not positive size: "+size);
}
- return namesystem.getBlocks(datanode, size);
+ return namesystem.getBlockManager().getBlocks(datanode, size);
}
@Override // NamenodeProtocol
@@ -750,8 +755,8 @@ public class NameNode implements Namenod
+src+" for "+clientName+" at "+clientMachine);
}
if (!checkPathLength(src)) {
- throw new IOException("create: Pathname too long. Limit "
- + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+ throw new IOException("create: Pathname too long. Limit "
+ + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.startFile(src,
new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
@@ -898,7 +903,7 @@ public class NameNode implements Namenod
DatanodeInfo[] nodes = blocks[i].getLocations();
for (int j = 0; j < nodes.length; j++) {
DatanodeInfo dn = nodes[j];
- namesystem.markBlockAsCorrupt(blk, dn);
+ namesystem.getBlockManager().findAndMarkBlockAsCorrupt(blk, dn);
}
}
}
@@ -944,8 +949,8 @@ public class NameNode implements Namenod
stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
}
if (!checkPathLength(dst)) {
- throw new IOException("rename: Pathname too long. Limit "
- + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+ throw new IOException("rename: Pathname too long. Limit "
+ + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
boolean ret = namesystem.renameTo(src, dst);
if (ret) {
@@ -968,8 +973,8 @@ public class NameNode implements Namenod
stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
}
if (!checkPathLength(dst)) {
- throw new IOException("rename: Pathname too long. Limit "
- + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+ throw new IOException("rename: Pathname too long. Limit "
+ + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.renameTo(src, dst, options);
metrics.incrFilesRenamed();
@@ -1100,7 +1105,8 @@ public class NameNode implements Namenod
@Override // ClientProtocol
public void refreshNodes() throws IOException {
// TODO:HA decide on OperationCategory for this
- namesystem.refreshNodes(new HdfsConfiguration());
+ namesystem.getBlockManager().getDatanodeManager().refreshNodes(
+ new HdfsConfiguration());
}
@Override // NamenodeProtocol
@@ -1119,7 +1125,7 @@ public class NameNode implements Namenod
public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
throws IOException {
// TODO:HA decide on OperationCategory for this
- return namesystem.getEditLogManifest(sinceTxId);
+ return namesystem.getEditLog().getEditLogManifest(sinceTxId);
}
@Override // ClientProtocol
@@ -1167,7 +1173,7 @@ public class NameNode implements Namenod
@Override // ClientProtocol
public void setBalancerBandwidth(long bandwidth) throws IOException {
// TODO:HA decide on OperationCategory for this
- namesystem.setBalancerBandwidth(bandwidth);
+ namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
}
@Override // ClientProtocol
@@ -1271,7 +1277,7 @@ public class NameNode implements Namenod
+ " blocks");
}
- namesystem.processReport(nodeReg, poolId, blist);
+ namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
if (getFSImage().isUpgradeFinalized())
return new DatanodeCommand.Finalize(poolId);
return null;
@@ -1286,7 +1292,8 @@ public class NameNode implements Namenod
+"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
}
for (int i = 0; i < blocks.length; i++) {
- namesystem.blockReceived(nodeReg, poolId, blocks[i], delHints[i]);
+ namesystem.getBlockManager().blockReceived(
+ nodeReg, poolId, blocks[i], delHints[i]);
}
}
@@ -1305,7 +1312,7 @@ public class NameNode implements Namenod
LOG.warn("Disk error on " + dnName + ": " + msg);
} else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
LOG.warn("Fatal disk error on " + dnName + ": " + msg);
- namesystem.removeDatanode(nodeReg);
+ namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg);
} else {
LOG.info("Error report from " + dnName + ": " + msg);
}
@@ -1347,7 +1354,7 @@ public class NameNode implements Namenod
* @throws IOException
*/
public void verifyVersion(int version) throws IOException {
- if (version != LAYOUT_VERSION)
+ if (version != FSConstants.LAYOUT_VERSION)
throw new IncorrectVersionException(version, "data node");
}
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Tue Aug 16 00:37:15 2011
@@ -646,46 +646,77 @@ public class NamenodeFsck {
/** {@inheritDoc} */
public String toString() {
StringBuilder res = new StringBuilder();
- res.append("Status: " + (isHealthy() ? "HEALTHY" : "CORRUPT"));
- res.append("\n Total size:\t" + totalSize + " B");
- if (totalOpenFilesSize != 0)
- res.append(" (Total open files size: " + totalOpenFilesSize + " B)");
- res.append("\n Total dirs:\t" + totalDirs);
- res.append("\n Total files:\t" + totalFiles);
- if (totalOpenFiles != 0)
- res.append(" (Files currently being written: " +
- totalOpenFiles + ")");
- res.append("\n Total blocks (validated):\t" + totalBlocks);
- if (totalBlocks > 0) res.append(" (avg. block size "
- + (totalSize / totalBlocks) + " B)");
- if (totalOpenFilesBlocks != 0)
- res.append(" (Total open file blocks (not validated): " +
- totalOpenFilesBlocks + ")");
- if (corruptFiles > 0) {
- res.append("\n ********************************");
- res.append("\n CORRUPT FILES:\t" + corruptFiles);
+ res.append("Status: ").append((isHealthy() ? "HEALTHY" : "CORRUPT"))
+ .append("\n Total size:\t").append(totalSize).append(" B");
+ if (totalOpenFilesSize != 0) {
+ res.append(" (Total open files size: ").append(totalOpenFilesSize)
+ .append(" B)");
+ }
+ res.append("\n Total dirs:\t").append(totalDirs).append(
+ "\n Total files:\t").append(totalFiles);
+ if (totalOpenFiles != 0) {
+ res.append(" (Files currently being written: ").append(totalOpenFiles)
+ .append(")");
+ }
+ res.append("\n Total blocks (validated):\t").append(totalBlocks);
+ if (totalBlocks > 0) {
+ res.append(" (avg. block size ").append((totalSize / totalBlocks))
+ .append(" B)");
+ }
+ if (totalOpenFilesBlocks != 0) {
+ res.append(" (Total open file blocks (not validated): ").append(
+ totalOpenFilesBlocks).append(")");
+ }
+ if (corruptFiles > 0) {
+ res.append("\n ********************************").append(
+ "\n CORRUPT FILES:\t").append(corruptFiles);
if (missingSize > 0) {
- res.append("\n MISSING BLOCKS:\t" + missingIds.size());
- res.append("\n MISSING SIZE:\t\t" + missingSize + " B");
+ res.append("\n MISSING BLOCKS:\t").append(missingIds.size()).append(
+ "\n MISSING SIZE:\t\t").append(missingSize).append(" B");
}
if (corruptBlocks > 0) {
- res.append("\n CORRUPT BLOCKS: \t" + corruptBlocks);
+ res.append("\n CORRUPT BLOCKS: \t").append(corruptBlocks);
}
res.append("\n ********************************");
}
- res.append("\n Minimally replicated blocks:\t" + numMinReplicatedBlocks);
- if (totalBlocks > 0) res.append(" (" + ((float) (numMinReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
- res.append("\n Over-replicated blocks:\t" + numOverReplicatedBlocks);
- if (totalBlocks > 0) res.append(" (" + ((float) (numOverReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
- res.append("\n Under-replicated blocks:\t" + numUnderReplicatedBlocks);
- if (totalBlocks > 0) res.append(" (" + ((float) (numUnderReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
- res.append("\n Mis-replicated blocks:\t\t" + numMisReplicatedBlocks);
- if (totalBlocks > 0) res.append(" (" + ((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
- res.append("\n Default replication factor:\t" + replication);
- res.append("\n Average block replication:\t" + getReplicationFactor());
- res.append("\n Corrupt blocks:\t\t" + corruptBlocks);
- res.append("\n Missing replicas:\t\t" + missingReplicas);
- if (totalReplicas > 0) res.append(" (" + ((float) (missingReplicas * 100) / (float) totalReplicas) + " %)");
+ res.append("\n Minimally replicated blocks:\t").append(
+ numMinReplicatedBlocks);
+ if (totalBlocks > 0) {
+ res.append(" (").append(
+ ((float) (numMinReplicatedBlocks * 100) / (float) totalBlocks))
+ .append(" %)");
+ }
+ res.append("\n Over-replicated blocks:\t")
+ .append(numOverReplicatedBlocks);
+ if (totalBlocks > 0) {
+ res.append(" (").append(
+ ((float) (numOverReplicatedBlocks * 100) / (float) totalBlocks))
+ .append(" %)");
+ }
+ res.append("\n Under-replicated blocks:\t").append(
+ numUnderReplicatedBlocks);
+ if (totalBlocks > 0) {
+ res.append(" (").append(
+ ((float) (numUnderReplicatedBlocks * 100) / (float) totalBlocks))
+ .append(" %)");
+ }
+ res.append("\n Mis-replicated blocks:\t\t")
+ .append(numMisReplicatedBlocks);
+ if (totalBlocks > 0) {
+ res.append(" (").append(
+ ((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks))
+ .append(" %)");
+ }
+ res.append("\n Default replication factor:\t").append(replication)
+ .append("\n Average block replication:\t").append(
+ getReplicationFactor()).append("\n Corrupt blocks:\t\t").append(
+ corruptBlocks).append("\n Missing replicas:\t\t").append(
+ missingReplicas);
+ if (totalReplicas > 0) {
+ res.append(" (").append(
+ ((float) (missingReplicas * 100) / (float) totalReplicas)).append(
+ " %)");
+ }
return res.toString();
}
}
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Tue Aug 16 00:37:15 2011
@@ -42,13 +42,14 @@ import org.apache.hadoop.hdfs.protocol.B
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -229,14 +230,10 @@ class NamenodeJspHelper {
void generateHealthReport(JspWriter out, NameNode nn,
HttpServletRequest request) throws IOException {
FSNamesystem fsn = nn.getNamesystem();
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
- fsn.DFSNodesStatus(live, dead);
- // If a data node has been first included in the include list,
- // then decommissioned, then removed from both include and exclude list.
- // We make the web console to "forget" this node by not displaying it.
- fsn.removeDecomNodeFromList(live);
- fsn.removeDecomNodeFromList(dead);
+ final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+ dm.fetchDatanodes(live, dead, true);
int liveDecommissioned = 0;
for (DatanodeDescriptor d : live) {
@@ -248,8 +245,7 @@ class NamenodeJspHelper {
deadDecommissioned += d.isDecommissioned() ? 1 : 0;
}
- ArrayList<DatanodeDescriptor> decommissioning = fsn
- .getDecommissioningNodes();
+ final List<DatanodeDescriptor> decommissioning = dm.getDecommissioningNodes();
sorterField = request.getParameter("sorter/field");
sorterOrder = request.getParameter("sorter/order");
@@ -349,7 +345,7 @@ class NamenodeJspHelper {
+ colTxt() + ":" + colTxt() + decommissioning.size()
+ rowTxt() + colTxt("Excludes missing blocks.")
+ "Number of Under-Replicated Blocks" + colTxt() + ":" + colTxt()
- + fsn.getUnderReplicatedNotMissingBlocks()
+ + fsn.getBlockManager().getUnderReplicatedNotMissingBlocks()
+ "</table></div><br>\n");
if (live.isEmpty() && dead.isEmpty()) {
@@ -370,15 +366,10 @@ class NamenodeJspHelper {
return token == null ? null : token.encodeToUrlString();
}
- /** @return the network topology. */
- static NetworkTopology getNetworkTopology(final NameNode namenode) {
- return namenode.getNamesystem().getBlockManager().getDatanodeManager(
- ).getNetworkTopology();
- }
-
/** @return a randomly chosen datanode. */
static DatanodeDescriptor getRandomDatanode(final NameNode namenode) {
- return (DatanodeDescriptor)getNetworkTopology(namenode).chooseRandom(
+ return (DatanodeDescriptor)namenode.getNamesystem().getBlockManager(
+ ).getDatanodeManager().getNetworkTopology().chooseRandom(
NodeBase.ROOT);
}
@@ -564,12 +555,14 @@ class NamenodeJspHelper {
void generateNodesList(ServletContext context, JspWriter out,
HttpServletRequest request) throws IOException {
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
- nn.getNamesystem().DFSNodesStatus(live, dead);
- nn.getNamesystem().removeDecomNodeFromList(live);
- nn.getNamesystem().removeDecomNodeFromList(dead);
+ final FSNamesystem ns = nn.getNamesystem();
+ final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
+
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+ dm.fetchDatanodes(live, dead, true);
+
InetSocketAddress nnSocketAddress = (InetSocketAddress) context
.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
@@ -678,8 +671,7 @@ class NamenodeJspHelper {
}
} else if (whatNodes.equals("DECOMMISSIONING")) {
// Decommissioning Nodes
- ArrayList<DatanodeDescriptor> decommissioning = nn.getNamesystem()
- .getDecommissioningNodes();
+ final List<DatanodeDescriptor> decommissioning = dm.getDecommissioningNodes();
out.print("<br> <a name=\"DecommissioningNodes\" id=\"title\"> "
+ " Decommissioning Datanodes : " + decommissioning.size()
+ "</a><br><br>\n");
@@ -715,16 +707,17 @@ class NamenodeJspHelper {
static class XMLBlockInfo {
final Block block;
final INodeFile inode;
- final FSNamesystem fsn;
+ final BlockManager blockManager;
- public XMLBlockInfo(FSNamesystem fsn, Long blockId) {
- this.fsn = fsn;
+ XMLBlockInfo(FSNamesystem fsn, Long blockId) {
+ this.blockManager = fsn.getBlockManager();
+
if (blockId == null) {
this.block = null;
this.inode = null;
} else {
this.block = new Block(blockId);
- this.inode = fsn.getBlockManager().getINode(block);
+ this.inode = blockManager.getINode(block);
}
}
@@ -798,31 +791,25 @@ class NamenodeJspHelper {
}
doc.startTag("replicas");
-
- if (fsn.getBlockManager().blocksMap.contains(block)) {
- Iterator<DatanodeDescriptor> it =
- fsn.getBlockManager().blocksMap.nodeIterator(block);
-
- while (it.hasNext()) {
- doc.startTag("replica");
-
- DatanodeDescriptor dd = it.next();
-
- doc.startTag("host_name");
- doc.pcdata(dd.getHostName());
- doc.endTag();
-
- boolean isCorrupt = fsn.getCorruptReplicaBlockIds(0,
- block.getBlockId()) != null;
-
- doc.startTag("is_corrupt");
- doc.pcdata(""+isCorrupt);
- doc.endTag();
-
- doc.endTag(); // </replica>
- }
+ for(final Iterator<DatanodeDescriptor> it = blockManager.datanodeIterator(block);
+ it.hasNext(); ) {
+ doc.startTag("replica");
- }
+ DatanodeDescriptor dd = it.next();
+
+ doc.startTag("host_name");
+ doc.pcdata(dd.getHostName());
+ doc.endTag();
+
+ boolean isCorrupt = blockManager.getCorruptReplicaBlockIds(0,
+ block.getBlockId()) != null;
+
+ doc.startTag("is_corrupt");
+ doc.pcdata(""+isCorrupt);
+ doc.endTag();
+
+ doc.endTag(); // </replica>
+ }
doc.endTag(); // </replicas>
}
@@ -834,14 +821,14 @@ class NamenodeJspHelper {
// utility class used in corrupt_replicas_xml.jsp
static class XMLCorruptBlockInfo {
- final FSNamesystem fsn;
final Configuration conf;
final Long startingBlockId;
final int numCorruptBlocks;
+ final BlockManager blockManager;
- public XMLCorruptBlockInfo(FSNamesystem fsn, Configuration conf,
+ XMLCorruptBlockInfo(FSNamesystem fsn, Configuration conf,
int numCorruptBlocks, Long startingBlockId) {
- this.fsn = fsn;
+ this.blockManager = fsn.getBlockManager();
this.conf = conf;
this.numCorruptBlocks = numCorruptBlocks;
this.startingBlockId = startingBlockId;
@@ -864,17 +851,16 @@ class NamenodeJspHelper {
doc.endTag();
doc.startTag("num_missing_blocks");
- doc.pcdata(""+fsn.getMissingBlocksCount());
+ doc.pcdata(""+blockManager.getMissingBlocksCount());
doc.endTag();
doc.startTag("num_corrupt_replica_blocks");
- doc.pcdata(""+fsn.getCorruptReplicaBlocks());
+ doc.pcdata(""+blockManager.getCorruptReplicaBlocksCount());
doc.endTag();
doc.startTag("corrupt_replica_block_ids");
- long[] corruptBlockIds
- = fsn.getCorruptReplicaBlockIds(numCorruptBlocks,
- startingBlockId);
+ final long[] corruptBlockIds = blockManager.getCorruptReplicaBlockIds(
+ numCorruptBlocks, startingBlockId);
if (corruptBlockIds != null) {
for (Long blockId: corruptBlockIds) {
doc.startTag("block_id");
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java Tue Aug 16 00:37:15 2011
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.dat
import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ServletUtil;
import org.mortbay.jetty.InclusiveByteRange;
@InterfaceAudience.Private
@@ -57,13 +58,14 @@ public class StreamFile extends DfsServl
final DataNode datanode = (DataNode) context.getAttribute("datanode");
return DatanodeJspHelper.getDFSClient(request, datanode, conf, ugi);
}
-
+
@SuppressWarnings("unchecked")
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
- final String path = request.getPathInfo() != null ?
- request.getPathInfo() : "/";
+ final String path = ServletUtil.getDecodedPath(request, "/streamFile");
+ final String rawPath = ServletUtil.getRawPath(request, "/streamFile");
final String filename = JspHelper.validatePath(path);
+ final String rawFilename = JspHelper.validatePath(rawPath);
if (filename == null) {
response.setContentType("text/plain");
PrintWriter out = response.getWriter();
@@ -98,7 +100,7 @@ public class StreamFile extends DfsServl
} else {
// No ranges, so send entire file
response.setHeader("Content-Disposition", "attachment; filename=\"" +
- filename + "\"");
+ rawFilename + "\"");
response.setContentType("application/octet-stream");
response.setHeader(CONTENT_LENGTH, "" + fileLen);
StreamFile.copyFromOffset(in, out, 0L, fileLen);
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Tue Aug 16 00:37:15 2011
@@ -41,7 +41,7 @@ import com.google.common.collect.Lists;
/**
* This class provides fetching a specified file from the NameNode.
*/
-class TransferFsImage implements FSConstants {
+class TransferFsImage {
public final static String CONTENT_LENGTH = "Content-Length";
public final static String MD5_HEADER = "X-MD5-Digest";
@@ -69,6 +69,8 @@ class TransferFsImage implements FSConst
static void downloadEditsToStorage(String fsName, RemoteEditLog log,
NNStorage dstStorage) throws IOException {
+ assert log.getStartTxId() > 0 && log.getEndTxId() > 0 :
+ "bad log: " + log;
String fileid = GetImageServlet.getParamStringForLog(
log, dstStorage);
String fileName = NNStorage.getFinalizedEditsFileName(
@@ -122,7 +124,7 @@ class TransferFsImage implements FSConst
static void getFileServer(OutputStream outstream, File localfile,
DataTransferThrottler throttler)
throws IOException {
- byte buf[] = new byte[BUFFER_SIZE];
+ byte buf[] = new byte[FSConstants.IO_FILE_BUFFER_SIZE];
FileInputStream infile = null;
try {
infile = new FileInputStream(localfile);
@@ -137,7 +139,7 @@ class TransferFsImage implements FSConst
&& localfile.getAbsolutePath().contains("fsimage")) {
// Test sending image shorter than localfile
long len = localfile.length();
- buf = new byte[(int)Math.min(len/2, BUFFER_SIZE)];
+ buf = new byte[(int)Math.min(len/2, FSConstants.IO_FILE_BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
@@ -177,7 +179,7 @@ class TransferFsImage implements FSConst
static MD5Hash getFileClient(String nnHostPort,
String queryString, List<File> localPaths,
NNStorage dstStorage, boolean getChecksum) throws IOException {
- byte[] buf = new byte[BUFFER_SIZE];
+ byte[] buf = new byte[FSConstants.IO_FILE_BUFFER_SIZE];
String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?");
str.append(queryString);
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java Tue Aug 16 00:37:15 2011
@@ -20,11 +20,15 @@ package org.apache.hadoop.hdfs.server.pr
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import java.util.Comparator;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.io.Writable;
-public class RemoteEditLog implements Writable {
+import com.google.common.base.Function;
+import com.google.common.collect.ComparisonChain;
+
+public class RemoteEditLog implements Writable, Comparable<RemoteEditLog> {
private long startTxId = FSConstants.INVALID_TXID;
private long endTxId = FSConstants.INVALID_TXID;
@@ -60,5 +64,34 @@ public class RemoteEditLog implements Wr
startTxId = in.readLong();
endTxId = in.readLong();
}
+
+ @Override
+ public int compareTo(RemoteEditLog log) {
+ return ComparisonChain.start()
+ .compare(startTxId, log.startTxId)
+ .compare(endTxId, log.endTxId)
+ .result();
+ }
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof RemoteEditLog)) return false;
+ return this.compareTo((RemoteEditLog)o) == 0;
+ }
+
+ @Override
+ public int hashCode() {
+ return (int) (startTxId * endTxId);
+ }
+
+ /**
+ * Guava <code>Function</code> which applies {@link #getStartTxId()}
+ */
+ public static final Function<RemoteEditLog, Long> GET_START_TXID =
+ new Function<RemoteEditLog, Long>() {
+ @Override
+ public Long apply(RemoteEditLog log) {
+ return log.getStartTxId();
+ }
+ };
}
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/namenode/FileDataServletAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/namenode/FileDataServletAspects.aj?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/namenode/FileDataServletAspects.aj (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/aop/org/apache/hadoop/hdfs/server/namenode/FileDataServletAspects.aj Tue Aug 16 00:37:15 2011
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import java.net.URI;
-import java.net.URISyntaxException;
+import java.net.URL;
+import java.io.IOException;
import javax.servlet.http.HttpServletRequest;
@@ -30,18 +30,17 @@ import org.apache.hadoop.security.UserGr
public aspect FileDataServletAspects {
static final Log LOG = FileDataServlet.LOG;
- pointcut callCreateUri() : call (URI FileDataServlet.createUri(
- String, HdfsFileStatus, UserGroupInformation, ClientProtocol,
+ pointcut callCreateUrl() : call (URL FileDataServlet.createRedirectURL(
+ String, String, HdfsFileStatus, UserGroupInformation, ClientProtocol,
HttpServletRequest, String));
/** Replace host name with "localhost" for unit test environment. */
- URI around () throws URISyntaxException : callCreateUri() {
- final URI original = proceed();
- LOG.info("FI: original uri = " + original);
- final URI replaced = new URI(original.getScheme(), original.getUserInfo(),
- "localhost", original.getPort(), original.getPath(),
- original.getQuery(), original.getFragment()) ;
- LOG.info("FI: replaced uri = " + replaced);
+ URL around () throws IOException : callCreateUrl() {
+ final URL original = proceed();
+ LOG.info("FI: original url = " + original);
+ final URL replaced = new URL("http", "localhost", original.getPort(),
+ original.getPath() + '?' + original.getQuery());
+ LOG.info("FI: replaced url = " + replaced);
return replaced;
}
}
Propchange: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/hdfs/src/test/hdfs:1152502-1153927
+/hadoop/common/trunk/hdfs/src/test/hdfs:1152502-1158071
/hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
/hadoop/core/trunk/src/test/hdfs:776175-785643
/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs:987665-1095512
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/TestResolveHdfsSymlink.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/TestResolveHdfsSymlink.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/TestResolveHdfsSymlink.java Tue Aug 16 00:37:15 2011
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
@@ -47,8 +48,8 @@ public class TestResolveHdfsSymlink {
public static void setUp() throws IOException {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
- cluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
cluster.waitActive();
+ NameNodeAdapter.getDtSecretManager(cluster.getNamesystem()).startThreads();
}
@AfterClass
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java Tue Aug 16 00:37:15 2011
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -52,7 +53,7 @@ public class TestViewFileSystemHdfs exte
SupportsBlocks = true;
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
- cluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
+ NameNodeAdapter.getDtSecretManager(cluster.getNamesystem()).startThreads();
fHdfs = cluster.getFileSystem();
defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java Tue Aug 16 00:37:15 2011
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -52,7 +53,7 @@ public class TestViewFsHdfs extends View
SupportsBlocks = true;
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
- cluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
+ NameNodeAdapter.getDtSecretManager(cluster.getNamesystem()).startThreads();
fc = FileContext.getFileContext(cluster.getURI(0), CONF);
defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Tue Aug 16 00:37:15 2011
@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
@@ -61,6 +62,7 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
@@ -375,10 +377,9 @@ public class DFSTestUtil {
/*
* Return the total capacity of all live DNs.
*/
- public static long getLiveDatanodeCapacity(FSNamesystem ns) {
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
- ns.DFSNodesStatus(live, dead);
+ public static long getLiveDatanodeCapacity(DatanodeManager dm) {
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ dm.fetchDatanodes(live, null, false);
long capacity = 0;
for (final DatanodeDescriptor dn : live) {
capacity += dn.getCapacity();
@@ -389,21 +390,20 @@ public class DFSTestUtil {
/*
* Return the capacity of the given live DN.
*/
- public static long getDatanodeCapacity(FSNamesystem ns, int index) {
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
- ns.DFSNodesStatus(live, dead);
+ public static long getDatanodeCapacity(DatanodeManager dm, int index) {
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ dm.fetchDatanodes(live, null, false);
return live.get(index).getCapacity();
}
/*
* Wait for the given # live/dead DNs, total capacity, and # vol failures.
*/
- public static void waitForDatanodeStatus(FSNamesystem ns, int expectedLive,
+ public static void waitForDatanodeStatus(DatanodeManager dm, int expectedLive,
int expectedDead, long expectedVolFails, long expectedTotalCapacity,
long timeout) throws InterruptedException, TimeoutException {
- ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
- ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+ final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+ final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
final int ATTEMPTS = 10;
int count = 0;
long currTotalCapacity = 0;
@@ -413,7 +413,7 @@ public class DFSTestUtil {
Thread.sleep(timeout);
live.clear();
dead.clear();
- ns.DFSNodesStatus(live, dead);
+ dm.fetchDatanodes(live, dead, false);
currTotalCapacity = 0;
volFails = 0;
for (final DatanodeDescriptor dd : live) {
@@ -670,7 +670,7 @@ public class DFSTestUtil {
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout),
- DataNode.SMALL_BUFFER_SIZE));
+ FSConstants.SMALL_BUFFER_SIZE));
final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));
// send the request
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Aug 16 00:37:15 2011
@@ -48,13 +48,13 @@ import org.apache.hadoop.hdfs.protocol.C
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -71,7 +71,6 @@ import org.apache.hadoop.security.Refres
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
-import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
@@ -1659,9 +1658,7 @@ public class MiniDFSCluster {
* Set the softLimit and hardLimit of client lease periods
*/
public void setLeasePeriod(long soft, long hard) {
- final FSNamesystem namesystem = getNamesystem();
- namesystem.leaseManager.setLeasePeriod(soft, hard);
- namesystem.lmthread.interrupt();
+ NameNodeAdapter.setLeasePeriod(getNamesystem(), soft, hard);
}
/**
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java Tue Aug 16 00:37:15 2011
@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FSDataOutput
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
/**
* This class tests DatanodeDescriptor.getBlocksScheduled() at the
@@ -50,7 +51,9 @@ public class TestBlocksScheduledCounter
((DFSOutputStream)(out.getWrappedStream())).hflush();
ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
- cluster.getNamesystem().DFSNodesStatus(dnList, dnList);
+ final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
+ ).getDatanodeManager();
+ dm.fetchDatanodes(dnList, dnList, false);
DatanodeDescriptor dn = dnList.get(0);
assertEquals(1, dn.getBlocksScheduled());
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java Tue Aug 16 00:37:15 2011
@@ -17,20 +17,16 @@
*/
package org.apache.hadoop.hdfs;
-import java.io.*;
+import java.io.DataOutputStream;
+import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
public class TestDFSRemove extends junit.framework.TestCase {
- static int countLease(MiniDFSCluster cluster) {
- return cluster.getNamesystem().leaseManager.countLease();
- }
-
final Path dir = new Path("/test/remove/");
void list(FileSystem fs, String name) throws IOException {
@@ -76,7 +72,7 @@ public class TestDFSRemove extends junit
fs.delete(a, false);
}
// wait 3 heartbeat intervals, so that all blocks are deleted.
- Thread.sleep(3 * FSConstants.HEARTBEAT_INTERVAL * 1000);
+ Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000);
// all blocks should be gone now.
long dfsUsedFinal = getTotalDfsUsed(cluster);
assertEquals("All blocks should be gone. start=" + dfsUsedStart
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java Tue Aug 16 00:37:15 2011
@@ -17,16 +17,18 @@
*/
package org.apache.hadoop.hdfs;
-import java.io.*;
+import java.io.DataOutputStream;
+import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
public class TestDFSRename extends junit.framework.TestCase {
static int countLease(MiniDFSCluster cluster) {
- return cluster.getNamesystem().leaseManager.countLease();
+ return NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).countLease();
}
final Path dir = new Path("/test/rename/");
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java Tue Aug 16 00:37:15 2011
@@ -17,6 +17,10 @@
*/
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -30,14 +34,12 @@ import org.apache.hadoop.fs.FSDataOutput
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import static org.junit.Assert.*;
-
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -224,7 +226,7 @@ public class TestDecommission {
}
nodes.add(nodename);
writeConfigFile(excludeFile, nodes);
- cluster.getNamesystem(nnIndex).refreshNodes(conf);
+ refreshNodes(cluster.getNamesystem(nnIndex), conf);
DatanodeInfo ret = NameNodeAdapter.getDatanode(
cluster.getNamesystem(nnIndex), info[index]);
waitNodeState(ret, waitForState);
@@ -235,7 +237,7 @@ public class TestDecommission {
private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException {
LOG.info("Recommissioning node: " + decommissionedNode.getName());
writeConfigFile(excludeFile, null);
- cluster.getNamesystem().refreshNodes(conf);
+ refreshNodes(cluster.getNamesystem(), conf);
waitNodeState(decommissionedNode, AdminStates.NORMAL);
}
@@ -284,6 +286,11 @@ public class TestDecommission {
validateCluster(client, numDatanodes);
}
}
+
+ static void refreshNodes(final FSNamesystem ns, final Configuration conf
+ ) throws IOException {
+ ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
+ }
private void verifyStats(NameNode namenode, FSNamesystem fsn,
DatanodeInfo node, boolean decommissioning) throws InterruptedException {
@@ -465,7 +472,7 @@ public class TestDecommission {
// Stop decommissioning and verify stats
writeConfigFile(excludeFile, null);
- fsn.refreshNodes(conf);
+ refreshNodes(fsn, conf);
DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
waitNodeState(ret, AdminStates.NORMAL);
verifyStats(namenode, fsn, ret, false);
@@ -509,7 +516,7 @@ public class TestDecommission {
writeConfigFile(hostsFile, list);
for (int j = 0; j < numNameNodes; j++) {
- cluster.getNamesystem(j).refreshNodes(conf);
+ refreshNodes(cluster.getNamesystem(j), conf);
DFSClient client = getDfsClient(cluster.getNameNode(j), conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java Tue Aug 16 00:37:15 2011
@@ -146,8 +146,8 @@ public class TestFileCorruption extends
// report corrupted block by the third datanode
DatanodeRegistration dnR =
DataNodeTestUtils.getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
- cluster.getNamesystem().markBlockAsCorrupt(blk,
- new DatanodeInfo(dnR));
+ cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
+ blk, new DatanodeInfo(dnR));
// open the file
fs.open(FILE_PATH);
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Tue Aug 16 00:37:15 2011
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FsServerDefa
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -100,23 +101,23 @@ public class TestFileCreation extends ju
*/
public void testServerDefaults() throws IOException {
Configuration conf = new HdfsConfiguration();
- conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, FSConstants.DEFAULT_BLOCK_SIZE);
- conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, FSConstants.DEFAULT_BYTES_PER_CHECKSUM);
- conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, FSConstants.DEFAULT_WRITE_PACKET_SIZE);
- conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, FSConstants.DEFAULT_REPLICATION_FACTOR + 1);
- conf.setInt("io.file.buffer.size", FSConstants.DEFAULT_FILE_BUFFER_SIZE);
+ conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
+ conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT);
+ conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
+ conf.setInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT + 1);
+ conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
- .numDataNodes(FSConstants.DEFAULT_REPLICATION_FACTOR + 1)
+ .numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
FsServerDefaults serverDefaults = fs.getServerDefaults();
- assertEquals(FSConstants.DEFAULT_BLOCK_SIZE, serverDefaults.getBlockSize());
- assertEquals(FSConstants.DEFAULT_BYTES_PER_CHECKSUM, serverDefaults.getBytesPerChecksum());
- assertEquals(FSConstants.DEFAULT_WRITE_PACKET_SIZE, serverDefaults.getWritePacketSize());
- assertEquals(FSConstants.DEFAULT_REPLICATION_FACTOR + 1, serverDefaults.getReplication());
- assertEquals(FSConstants.DEFAULT_FILE_BUFFER_SIZE, serverDefaults.getFileBufferSize());
+ assertEquals(DFS_BLOCK_SIZE_DEFAULT, serverDefaults.getBlockSize());
+ assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT, serverDefaults.getBytesPerChecksum());
+ assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT, serverDefaults.getWritePacketSize());
+ assertEquals(DFS_REPLICATION_DEFAULT + 1, serverDefaults.getReplication());
+ assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT, serverDefaults.getFileBufferSize());
} finally {
fs.close();
cluster.shutdown();
@@ -269,8 +270,8 @@ public class TestFileCreation extends ju
*/
public void testFileCreationError1() throws IOException {
Configuration conf = new HdfsConfiguration();
- conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
- conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+ conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
+ conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
}
@@ -343,8 +344,8 @@ public class TestFileCreation extends ju
long leasePeriod = 1000;
System.out.println("testFileCreationError2 start");
Configuration conf = new HdfsConfiguration();
- conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
- conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+ conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
+ conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
}
@@ -403,6 +404,36 @@ public class TestFileCreation extends ju
}
}
+ /** test addBlock(..) when replication<min and excludeNodes==null. */
+ public void testFileCreationError3() throws IOException {
+ System.out.println("testFileCreationError3 start");
+ Configuration conf = new HdfsConfiguration();
+ // create cluster
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ DistributedFileSystem dfs = null;
+ try {
+ cluster.waitActive();
+ dfs = (DistributedFileSystem)cluster.getFileSystem();
+ DFSClient client = dfs.dfs;
+
+ // create a new file.
+ final Path f = new Path("/foo.txt");
+ createFile(dfs, f, 3);
+ try {
+ cluster.getNameNode().addBlock(f.toString(),
+ client.clientName, null, null);
+ fail();
+ } catch(IOException ioe) {
+ FileSystem.LOG.info("GOOD!", ioe);
+ }
+
+ System.out.println("testFileCreationError3 successful");
+ } finally {
+ IOUtils.closeStream(dfs);
+ cluster.shutdown();
+ }
+ }
+
/**
* Test that file leases are persisted across namenode restarts.
* This test is currently not triggered because more HDFS work is
@@ -412,8 +443,8 @@ public class TestFileCreation extends ju
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
- conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
- conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+ conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
+ conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
}
@@ -724,7 +755,7 @@ public class TestFileCreation extends ju
*/
public void testFileCreationSyncOnClose() throws IOException {
Configuration conf = new HdfsConfiguration();
- conf.setBoolean(DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY, true);
+ conf.setBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
@@ -763,8 +794,8 @@ public class TestFileCreation extends ju
final int DATANODE_NUM = 3;
Configuration conf = new HdfsConfiguration();
- conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
- conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+ conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
+ conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
@@ -853,7 +884,7 @@ public class TestFileCreation extends ju
final int DATANODE_NUM = 3;
Configuration conf = new HdfsConfiguration();
- conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 3);
+ conf.setInt(DFS_NAMENODE_REPLICATION_MIN_KEY, 3);
conf.setBoolean("ipc.client.ping", false); // hdfs timeout is default 60 seconds
conf.setInt("ipc.ping.interval", 10000); // hdfs timeout is now 10 second
Modified: hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/common/branches/HDFS-1623/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java Tue Aug 16 00:37:15 2011
@@ -91,7 +91,7 @@ public class TestFileStatus {
int fileSize, int blockSize) throws IOException {
// Create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true,
- FSConstants.BUFFER_SIZE, (short)repl, (long)blockSize);
+ FSConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);