You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cn...@apache.org on 2013/06/21 08:37:39 UTC
svn commit: r1495297 [22/46] - in /hadoop/common/branches/branch-1-win: ./
bin/ conf/ ivy/ lib/jdiff/ src/c++/libhdfs/docs/
src/c++/libhdfs/tests/conf/ src/contrib/capacity-scheduler/ivy/
src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred...
Modified: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri Jun 21 06:37:27 2013
@@ -30,7 +30,6 @@ import org.apache.hadoop.fs.shell.Comman
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
@@ -255,73 +254,78 @@ public class DFSAdmin extends FsShell {
super(conf);
}
+ protected DistributedFileSystem getDFS() throws IOException {
+ FileSystem fs = getFS();
+ if (!(fs instanceof DistributedFileSystem)) {
+ throw new IllegalArgumentException("FileSystem " + fs.getUri()
+ + " is not a distributed file system");
+ }
+ return (DistributedFileSystem) fs;
+ }
+
/**
* Gives a report on how the FileSystem is doing.
* @exception IOException if the filesystem does not exist.
*/
public void report() throws IOException {
- if (fs instanceof DistributedFileSystem) {
- DistributedFileSystem dfs = (DistributedFileSystem) fs;
- DiskStatus ds = dfs.getDiskStatus();
- long capacity = ds.getCapacity();
- long used = ds.getDfsUsed();
- long remaining = ds.getRemaining();
- long presentCapacity = used + remaining;
- boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
- UpgradeStatusReport status =
- dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
-
- if (mode) {
- System.out.println("Safe mode is ON");
- }
- if (status != null) {
- System.out.println(status.getStatusText(false));
- }
- System.out.println("Configured Capacity: " + capacity
- + " (" + StringUtils.byteDesc(capacity) + ")");
- System.out.println("Present Capacity: " + presentCapacity
- + " (" + StringUtils.byteDesc(presentCapacity) + ")");
- System.out.println("DFS Remaining: " + remaining
- + " (" + StringUtils.byteDesc(remaining) + ")");
- System.out.println("DFS Used: " + used
- + " (" + StringUtils.byteDesc(used) + ")");
- System.out.println("DFS Used%: "
- + StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100)
- + "%");
-
- /* These counts are not always upto date. They are updated after
- * iteration of an internal list. Should be updated in a few seconds to
- * minutes. Use "-metaSave" to list of all such blocks and accurate
- * counts.
- */
- System.out.println("Under replicated blocks: " +
- dfs.getUnderReplicatedBlocksCount());
- System.out.println("Blocks with corrupt replicas: " +
- dfs.getCorruptBlocksCount());
- System.out.println("Missing blocks: " +
- dfs.getMissingBlocksCount());
-
- System.out.println();
+ DistributedFileSystem dfs = getDFS();
+ DiskStatus ds = dfs.getDiskStatus();
+ long capacity = ds.getCapacity();
+ long used = ds.getDfsUsed();
+ long remaining = ds.getRemaining();
+ long presentCapacity = used + remaining;
+ boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
+ UpgradeStatusReport status = dfs
+ .distributedUpgradeProgress(UpgradeAction.GET_STATUS);
+
+ if (mode) {
+ System.out.println("Safe mode is ON");
+ }
+ if (status != null) {
+ System.out.println(status.getStatusText(false));
+ }
+ System.out.println("Configured Capacity: " + capacity + " ("
+ + StringUtils.byteDesc(capacity) + ")");
+ System.out.println("Present Capacity: " + presentCapacity + " ("
+ + StringUtils.byteDesc(presentCapacity) + ")");
+ System.out.println("DFS Remaining: " + remaining + " ("
+ + StringUtils.byteDesc(remaining) + ")");
+ System.out.println("DFS Used: " + used + " (" + StringUtils.byteDesc(used)
+ + ")");
+ System.out.println("DFS Used%: "
+ + StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100)
+ + "%");
+
+ /*
+ * These counts are not always upto date. They are updated after iteration
+ * of an internal list. Should be updated in a few seconds to minutes. Use
+ * "-metaSave" to list of all such blocks and accurate counts.
+ */
+ System.out.println("Under replicated blocks: "
+ + dfs.getUnderReplicatedBlocksCount());
+ System.out.println("Blocks with corrupt replicas: "
+ + dfs.getCorruptBlocksCount());
+ System.out.println("Missing blocks: " + dfs.getMissingBlocksCount());
+
+ System.out.println();
+
+ System.out.println("-------------------------------------------------");
+
+ DatanodeInfo[] live = dfs.getClient().datanodeReport(
+ DatanodeReportType.LIVE);
+ DatanodeInfo[] dead = dfs.getClient().datanodeReport(
+ DatanodeReportType.DEAD);
+ System.out.println("Datanodes available: " + live.length + " ("
+ + (live.length + dead.length) + " total, " + dead.length + " dead)\n");
- System.out.println("-------------------------------------------------");
-
- DatanodeInfo[] live = dfs.getClient().datanodeReport(
- DatanodeReportType.LIVE);
- DatanodeInfo[] dead = dfs.getClient().datanodeReport(
- DatanodeReportType.DEAD);
- System.out.println("Datanodes available: " + live.length +
- " (" + (live.length + dead.length) + " total, " +
- dead.length + " dead)\n");
-
- for (DatanodeInfo dn : live) {
- System.out.println(dn.getDatanodeReport());
- System.out.println();
- }
- for (DatanodeInfo dn : dead) {
- System.out.println(dn.getDatanodeReport());
- System.out.println();
- }
+ for (DatanodeInfo dn : live) {
+ System.out.println(dn.getDatanodeReport());
+ System.out.println();
}
+ for (DatanodeInfo dn : dead) {
+ System.out.println(dn.getDatanodeReport());
+ System.out.println();
+ }
}
/**
@@ -332,10 +336,6 @@ public class DFSAdmin extends FsShell {
* @exception IOException if the filesystem does not exist.
*/
public void setSafeMode(String[] argv, int idx) throws IOException {
- if (!(fs instanceof DistributedFileSystem)) {
- System.err.println("FileSystem is " + fs.getUri());
- return;
- }
if (idx != argv.length - 1) {
printUsage("-safemode");
return;
@@ -356,7 +356,7 @@ public class DFSAdmin extends FsShell {
printUsage("-safemode");
return;
}
- DistributedFileSystem dfs = (DistributedFileSystem) fs;
+ DistributedFileSystem dfs = getDFS();
boolean inSafeMode = dfs.setSafeMode(action);
//
@@ -386,12 +386,7 @@ public class DFSAdmin extends FsShell {
public int saveNamespace() throws IOException {
int exitCode = -1;
- if (!(fs instanceof DistributedFileSystem)) {
- System.err.println("FileSystem is " + fs.getUri());
- return exitCode;
- }
-
- DistributedFileSystem dfs = (DistributedFileSystem) fs;
+ DistributedFileSystem dfs = getDFS();
dfs.saveNamespace();
exitCode = 0;
@@ -407,12 +402,7 @@ public class DFSAdmin extends FsShell {
public int refreshNodes() throws IOException {
int exitCode = -1;
- if (!(fs instanceof DistributedFileSystem)) {
- System.err.println("FileSystem is " + fs.getUri());
- return exitCode;
- }
-
- DistributedFileSystem dfs = (DistributedFileSystem) fs;
+ DistributedFileSystem dfs = getDFS();
dfs.refreshNodes();
exitCode = 0;
@@ -440,12 +430,7 @@ public class DFSAdmin extends FsShell {
return exitCode;
}
- if (!(fs instanceof DistributedFileSystem)) {
- System.err.println("FileSystem is " + fs.getUri());
- return exitCode;
- }
-
- DistributedFileSystem dfs = (DistributedFileSystem) fs;
+ DistributedFileSystem dfs = getDFS();
dfs.setBalancerBandwidth(bandwidth);
exitCode = 0;
@@ -595,18 +580,10 @@ public class DFSAdmin extends FsShell {
* @exception IOException
*/
public int finalizeUpgrade() throws IOException {
- int exitCode = -1;
-
- if (!(fs instanceof DistributedFileSystem)) {
- System.out.println("FileSystem is " + fs.getUri());
- return exitCode;
- }
-
- DistributedFileSystem dfs = (DistributedFileSystem) fs;
+ DistributedFileSystem dfs = getDFS();
dfs.finalizeUpgrade();
- exitCode = 0;
- return exitCode;
+ return 0;
}
/**
@@ -617,10 +594,6 @@ public class DFSAdmin extends FsShell {
* @exception IOException
*/
public int upgradeProgress(String[] argv, int idx) throws IOException {
- if (!(fs instanceof DistributedFileSystem)) {
- System.out.println("FileSystem is " + fs.getUri());
- return -1;
- }
if (idx != argv.length - 1) {
printUsage("-upgradeProgress");
return -1;
@@ -638,7 +611,7 @@ public class DFSAdmin extends FsShell {
return -1;
}
- DistributedFileSystem dfs = (DistributedFileSystem) fs;
+ DistributedFileSystem dfs = getDFS();
UpgradeStatusReport status = dfs.distributedUpgradeProgress(action);
String statusText = (status == null ?
"There are no upgrades in progress." :
@@ -657,7 +630,7 @@ public class DFSAdmin extends FsShell {
*/
public int metaSave(String[] argv, int idx) throws IOException {
String pathname = argv[idx];
- DistributedFileSystem dfs = (DistributedFileSystem) fs;
+ DistributedFileSystem dfs = getDFS();
dfs.metaSave(pathname);
System.out.println("Created file " + pathname + " on server " +
dfs.getUri());
@@ -936,13 +909,13 @@ public class DFSAdmin extends FsShell {
} else if ("-metasave".equals(cmd)) {
exitCode = metaSave(argv, i);
} else if (ClearQuotaCommand.matches(cmd)) {
- exitCode = new ClearQuotaCommand(argv, i, fs).runAll();
+ exitCode = new ClearQuotaCommand(argv, i, getDFS()).runAll();
} else if (SetQuotaCommand.matches(cmd)) {
- exitCode = new SetQuotaCommand(argv, i, fs).runAll();
+ exitCode = new SetQuotaCommand(argv, i, getDFS()).runAll();
} else if (ClearSpaceQuotaCommand.matches(cmd)) {
- exitCode = new ClearSpaceQuotaCommand(argv, i, fs).runAll();
+ exitCode = new ClearSpaceQuotaCommand(argv, i, getDFS()).runAll();
} else if (SetSpaceQuotaCommand.matches(cmd)) {
- exitCode = new SetSpaceQuotaCommand(argv, i, fs).runAll();
+ exitCode = new SetSpaceQuotaCommand(argv, i, getDFS()).runAll();
} else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcl();
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
Modified: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DFSck.java Fri Jun 21 06:37:27 2013
@@ -30,7 +30,6 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
-import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
@@ -111,14 +110,15 @@ public class DFSck extends Configured im
@Override
public Integer run() throws Exception {
- String proto = "http://";
- if(UserGroupInformation.isSecurityEnabled()) {
- System.setProperty("https.cipherSuites", Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
- proto = "https://";
+ if (SecurityUtil.useKsslAuth()) {
+ System.setProperty("https.cipherSuites",
+ Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
}
-
- final StringBuffer url = new StringBuffer(proto);
- url.append(NameNode.getInfoServer(getConf())).append("/fsck?ugi=").append(ugi.getShortUserName()).append("&path=");
+
+ final StringBuffer url = new StringBuffer(
+ NameNode.getHttpUriScheme() + "://");
+ url.append(NameNode.getInfoServer(getConf())).append("/fsck?ugi=")
+ .append(ugi.getShortUserName()).append("&path=");
String dir = "/";
// find top-level dir first
@@ -135,9 +135,10 @@ public class DFSck extends Configured im
else if (args[idx].equals("-locations")) { url.append("&locations=1"); }
else if (args[idx].equals("-racks")) { url.append("&racks=1"); }
}
+
URL path = new URL(url.toString());
- SecurityUtil.fetchServiceTicket(path);
- URLConnection connection = path.openConnection();
+
+ URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
InputStream stream = connection.getInputStream();
BufferedReader input = new BufferedReader(new InputStreamReader(
stream, "UTF-8"));
Modified: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Fri Jun 21 06:37:27 2013
@@ -25,6 +25,7 @@ import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
+import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import java.security.PrivilegedExceptionAction;
@@ -37,14 +38,19 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HftpFileSystem;
+import org.apache.hadoop.hdfs.HsftpFileSystem;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -66,10 +72,10 @@ public class DelegationTokenFetcher {
private static final String WEBSERVICE = "webservice";
private static final String CANCEL = "cancel";
private static final String RENEW = "renew";
-
+
static {
- // Enable Kerberos sockets
- System.setProperty("https.cipherSuites", "TLS_KRB5_WITH_3DES_EDE_CBC_SHA");
+ // reference a field to make sure the static blocks run
+ int x = Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.size();
}
private static void printUsage(PrintStream err) throws IOException {
@@ -96,9 +102,10 @@ public class DelegationTokenFetcher {
*/
public static void main(final String [] args) throws Exception {
final Configuration conf = new Configuration();
+ setupSsl(conf);
Options fetcherOptions = new Options();
fetcherOptions.addOption(WEBSERVICE, true,
- "HTTPS url to reach the NameNode at");
+ "HTTP/S url to reach the NameNode at");
fetcherOptions.addOption(CANCEL, false, "cancel the token");
fetcherOptions.addOption(RENEW, false, "renew the token");
GenericOptionsParser parser =
@@ -125,137 +132,199 @@ public class DelegationTokenFetcher {
FileSystem local = FileSystem.getLocal(conf);
final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]);
- // Login the current user
- final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- ugi.doAs(new PrivilegedExceptionAction<Object>() {
- @Override
- public Object run() throws Exception {
-
- if (cancel) {
- for(Token<?> token: readTokens(tokenFile, conf)) {
- if (token.isManaged()) {
- token.cancel(conf);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Cancelled token for " + token.getService());
- }
- }
- }
- } else if (renew) {
- for(Token<?> token: readTokens(tokenFile, conf)) {
- if (token.isManaged()) {
- token.renew(conf);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Renewed token for " + token.getService());
- }
- }
- }
- } else {
- if (webUrl != null) {
- getDTfromRemote(webUrl, null).
- writeTokenStorageFile(tokenFile, conf);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Fetched token via http for " + webUrl);
- }
- } else {
- FileSystem fs = FileSystem.get(conf);
- Token<?> token = fs.getDelegationToken(ugi.getShortUserName());
- Credentials cred = new Credentials();
- cred.addToken(token.getService(), token);
- cred.writeTokenStorageFile(tokenFile, conf);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Fetched token for " + fs.getUri() + " into " +
- tokenFile);
- }
- }
+ if (cancel) {
+ for(Token<?> token: readTokens(tokenFile, conf)) {
+ if (token.isManaged()) {
+ token.cancel(conf);
}
- return null;
- }
- });
-
+ }
+ } else if (renew) {
+ for(Token<?> token: readTokens(tokenFile, conf)) {
+ if (token.isManaged()) {
+ token.renew(conf);
+ }
+ }
+ } else {
+ if (webUrl != null) {
+ URI uri = new URI(webUrl);
+ getDTfromRemote(uri.getScheme(),
+ new InetSocketAddress(uri.getHost(), uri.getPort()),
+ null,
+ conf).
+ writeTokenStorageFile(tokenFile, conf);
+ } else {
+ FileSystem fs = FileSystem.get(conf);
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ Token<?> token = fs.getDelegationToken(ugi.getShortUserName());
+ Credentials cred = new Credentials();
+ cred.addToken(token.getService(), token);
+ cred.writeTokenStorageFile(tokenFile, conf);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Fetched token for " + fs.getUri() + " into " +
+ tokenFile);
+ }
+ }
+ }
}
+ /** Set up SSL resources */
+ public static void setupSsl(Configuration conf) {
+ Configuration sslConf = new Configuration(false);
+ sslConf.addResource(conf.get("dfs.https.client.keystore.resource",
+ "ssl-client.xml"));
+ System.setProperty("javax.net.ssl.trustStore", sslConf.get(
+ "ssl.client.truststore.location", ""));
+ System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get(
+ "ssl.client.truststore.password", ""));
+ System.setProperty("javax.net.ssl.trustStoreType", sslConf.get(
+ "ssl.client.truststore.type", "jks"));
+ System.setProperty("javax.net.ssl.keyStore", sslConf.get(
+ "ssl.client.keystore.location", ""));
+ System.setProperty("javax.net.ssl.keyStorePassword", sslConf.get(
+ "ssl.client.keystore.password", ""));
+ System.setProperty("javax.net.ssl.keyPassword", sslConf.get(
+ "ssl.client.keystore.keypassword", ""));
+ System.setProperty("javax.net.ssl.keyStoreType", sslConf.get(
+ "ssl.client.keystore.type", "jks"));
+ }
+
/**
* Utility method to obtain a delegation token over http
- * @param nnHttpAddr Namenode http addr, such as http://namenode:50070
+ * @param protocol whether to use http or https
+ * @param nnAddr the address for the NameNode
+ * @param renewer User that is renewing the ticket in such a request
+ * @param conf the configuration
*/
- static public Credentials getDTfromRemote(String nnAddr,
- String renewer) throws IOException {
- DataInputStream dis = null;
- InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnAddr);
+ static public Credentials getDTfromRemote(String protocol,
+ final InetSocketAddress nnAddr,
+ String renewer,
+ Configuration conf
+ ) throws IOException {
+ final String renewAddress = getRenewAddress(protocol, nnAddr, conf);
+ final boolean https = "https".equals(protocol);
try {
- StringBuffer url = new StringBuffer();
+ StringBuffer url = new StringBuffer(renewAddress);
+ url.append(GetDelegationTokenServlet.PATH_SPEC);
if (renewer != null) {
- url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC).append("?").
- append(GetDelegationTokenServlet.RENEWER).append("=").append(renewer);
- } else {
- url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
+ url.append("?").
+ append(GetDelegationTokenServlet.RENEWER).append("=").
+ append(renewer);
}
if(LOG.isDebugEnabled()) {
LOG.debug("Retrieving token from: " + url);
}
- URL remoteURL = new URL(url.toString());
- SecurityUtil.fetchServiceTicket(remoteURL);
- URLConnection connection = remoteURL.openConnection();
-
- InputStream in = connection.getInputStream();
- Credentials ts = new Credentials();
- dis = new DataInputStream(in);
- ts.readFields(dis);
- for(Token<?> token: ts.getAllTokens()) {
- token.setKind(HftpFileSystem.TOKEN_KIND);
- SecurityUtil.setTokenService(token, serviceAddr);
- }
- return ts;
- } catch (Exception e) {
- throw new IOException("Unable to obtain remote token", e);
- } finally {
- if(dis != null) dis.close();
+ final URL remoteURL = new URL(url.toString());
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ return ugi.doAs(new PrivilegedExceptionAction<Credentials>(){
+ public Credentials run() throws Exception {
+ URLConnection connection =
+ SecurityUtil.openSecureHttpConnection(remoteURL);
+
+ InputStream in = connection.getInputStream();
+ Credentials ts = new Credentials();
+ DataInputStream dis = new DataInputStream(in);
+ try {
+ ts.readFields(dis);
+ for(Token<?> token: ts.getAllTokens()) {
+ if (https) {
+ token.setKind(HsftpFileSystem.TOKEN_KIND);
+ } else {
+ token.setKind(HftpFileSystem.TOKEN_KIND);
+ }
+ SecurityUtil.setTokenService(token, nnAddr);
+ }
+ dis.close();
+ } catch (IOException ie) {
+ IOUtils.cleanup(LOG, dis);
+ }
+ return ts;
+ }
+ });
+ } catch (InterruptedException ie) {
+ return null;
}
}
/**
+ * Get the URI that we use for getting, renewing, and cancelling the
+ * delegation token. For KSSL with hftp that means we need to use
+ * https and the NN's https port.
+ */
+ protected static String getRenewAddress(String protocol,
+ InetSocketAddress addr,
+ Configuration conf) {
+ if (SecurityUtil.useKsslAuth() && "http".equals(protocol)) {
+ protocol = "https";
+ int port =
+ conf.getInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
+ DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
+ addr = new InetSocketAddress(addr.getAddress(), port);
+ }
+ return DFSUtil.createUri(protocol, addr).toString();
+ }
+
+ /**
* Renew a Delegation Token.
- * @param nnAddr the NameNode's address
+ * @param protocol The protocol to renew over (http or https)
+ * @param addr the address of the NameNode
* @param tok the token to renew
+ * @param conf the configuration
* @return the Date that the token will expire next.
* @throws IOException
*/
- static public long renewDelegationToken(String nnAddr,
- Token<DelegationTokenIdentifier> tok
+ static public long renewDelegationToken(String protocol,
+ InetSocketAddress addr,
+ Token<DelegationTokenIdentifier> tok,
+ Configuration conf
) throws IOException {
- StringBuilder buf = new StringBuilder();
- buf.append(nnAddr);
+ final String renewAddress = getRenewAddress(protocol, addr, conf);
+ final StringBuilder buf = new StringBuilder(renewAddress);
+ final String service = tok.getService().toString();
buf.append(RenewDelegationTokenServlet.PATH_SPEC);
buf.append("?");
buf.append(RenewDelegationTokenServlet.TOKEN);
buf.append("=");
buf.append(tok.encodeToUrlString());
- BufferedReader in = null;
- HttpURLConnection connection = null;
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
try {
- URL url = new URL(buf.toString());
- SecurityUtil.fetchServiceTicket(url);
- connection = (HttpURLConnection)url.openConnection();
- in = new BufferedReader(new InputStreamReader
- (connection.getInputStream()));
- long result = Long.parseLong(in.readLine());
- in.close();
- return result;
- } catch (IOException ie) {
- LOG.info("error in renew over HTTP", ie);
- IOException e = null;
- if(connection != null) {
- String resp = connection.getResponseMessage();
- e = getExceptionFromResponse(resp);
- }
+ return ugi.doAs(new PrivilegedExceptionAction<Long>(){
+ public Long run() throws Exception {
+ BufferedReader in = null;
+ HttpURLConnection connection = null;
+ try {
+ URL url = new URL(buf.toString());
+ connection =
+ (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url);
+ in = new BufferedReader(new InputStreamReader
+ (connection.getInputStream()));
+ long result = Long.parseLong(in.readLine());
+ in.close();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Renewed token for " + service + " via " +
+ renewAddress);
+ }
+ return result;
+ } catch (IOException ie) {
+ LOG.info("Error renewing token for " + renewAddress, ie);
+ IOException e = null;
+ if(connection != null) {
+ String resp = connection.getResponseMessage();
+ e = getExceptionFromResponse(resp);
+ }
- IOUtils.cleanup(LOG, in);
- if(e!=null) {
- LOG.info("rethrowing exception from HTTP request: " + e.getLocalizedMessage());
- throw e;
- }
- throw ie;
+ IOUtils.cleanup(LOG, in);
+ if (e!=null) {
+ LOG.info("rethrowing exception from HTTP request: " +
+ e.getLocalizedMessage());
+ throw e;
+ }
+ throw ie;
+ }
+ }
+ });
+ } catch (InterruptedException ie) {
+ return 0;
}
}
@@ -288,11 +357,13 @@ public class DelegationTokenFetcher {
* @param tok the token to cancel
* @throws IOException
*/
- static public void cancelDelegationToken(String nnAddr,
- Token<DelegationTokenIdentifier> tok
+ static public void cancelDelegationToken(String protocol,
+ InetSocketAddress addr,
+ Token<DelegationTokenIdentifier> tok,
+ Configuration conf
) throws IOException {
- StringBuilder buf = new StringBuilder();
- buf.append(nnAddr);
+ final String renewAddress = getRenewAddress(protocol, addr, conf);
+ StringBuilder buf = new StringBuilder(renewAddress);
buf.append(CancelDelegationTokenServlet.PATH_SPEC);
buf.append("?");
buf.append(CancelDelegationTokenServlet.TOKEN);
@@ -300,16 +371,33 @@ public class DelegationTokenFetcher {
buf.append(tok.encodeToUrlString());
BufferedReader in = null;
try {
- URL url = new URL(buf.toString());
- SecurityUtil.fetchServiceTicket(url);
- HttpURLConnection connection = (HttpURLConnection) url.openConnection();
- if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
- throw new IOException("Error cancelling token:" +
- connection.getResponseMessage());
+ final URL url = new URL(buf.toString());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("cancelling token at " + buf.toString());
+ }
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ ugi.doAs(new PrivilegedExceptionAction<Void>(){
+ public Void run() throws Exception {
+ HttpURLConnection connection =
+ (HttpURLConnection)SecurityUtil.openSecureHttpConnection(url);
+ if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
+ throw new IOException("Error cancelling token for " +
+ renewAddress + " response: " +
+ connection.getResponseMessage());
+ }
+ return null;
+ }
+ });
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Cancelled token for " + tok.getService() + " via " +
+ renewAddress);
}
} catch (IOException ie) {
+ LOG.warn("Error cancelling token for " + renewAddress, ie);
IOUtils.cleanup(LOG, in);
throw ie;
+ } catch (InterruptedException ie) {
+ // PASS
}
}
}
Added: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/HDFSConcat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/HDFSConcat.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/HDFSConcat.java (added)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/HDFSConcat.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools;
+
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+
+
+public class HDFSConcat {
+ private final static String def_uri = "hdfs://localhost:9000";
+ /**
+ * @param args
+ */
+ public static void main(String... args) throws IOException {
+
+ if(args.length < 2) {
+ System.err.println("Usage HDFSConcat target srcs..");
+ System.exit(0);
+ }
+
+ Configuration conf = new Configuration();
+ String uri = conf.get("fs.default.name", def_uri);
+ Path path = new Path(uri);
+ DistributedFileSystem dfs =
+ (DistributedFileSystem)FileSystem.get(path.toUri(), conf);
+
+ Path [] srcs = new Path[args.length-1];
+ for(int i=1; i<args.length; i++) {
+ srcs[i-1] = new Path(args[i]);
+ }
+ dfs.concat(new Path(args[0]), srcs);
+ }
+
+}
Added: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java (added)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import java.io.IOException;
+import java.util.AbstractMap;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+
+/**
+ * A DelimitedImageVisitor generates a text representation of the fsimage,
+ * with each element separated by a delimiter string. All of the elements
+ * common to both inodes and inodes-under-construction are included. When
+ * processing an fsimage with a layout version that did not include an
+ * element, such as AccessTime, the output file will include a column
+ * for the value, but no value will be included.
+ *
+ * Individual block information for each file is not currently included.
+ *
+ * The default delimiter is tab, as this is an unlikely value to be included
+ * an inode path or other text metadata. The delimiter value can be via the
+ * constructor.
+ */
+class DelimitedImageVisitor extends TextWriterImageVisitor {
+ private static final String defaultDelimiter = "\t";
+
+ final private LinkedList<ImageElement> elemQ = new LinkedList<ImageElement>();
+ private long fileSize = 0l;
+ // Elements of fsimage we're interested in tracking
+ private final Collection<ImageElement> elementsToTrack;
+ // Values for each of the elements in elementsToTrack
+ private final AbstractMap<ImageElement, String> elements =
+ new HashMap<ImageElement, String>();
+ private final String delimiter;
+
+ {
+ elementsToTrack = new ArrayList<ImageElement>();
+
+ // This collection determines what elements are tracked and the order
+ // in which they are output
+ Collections.addAll(elementsToTrack, ImageElement.INODE_PATH,
+ ImageElement.REPLICATION,
+ ImageElement.MODIFICATION_TIME,
+ ImageElement.ACCESS_TIME,
+ ImageElement.BLOCK_SIZE,
+ ImageElement.NUM_BLOCKS,
+ ImageElement.NUM_BYTES,
+ ImageElement.NS_QUOTA,
+ ImageElement.DS_QUOTA,
+ ImageElement.PERMISSION_STRING,
+ ImageElement.USER_NAME,
+ ImageElement.GROUP_NAME);
+ }
+
+ public DelimitedImageVisitor(String filename) throws IOException {
+ this(filename, false);
+ }
+
+ public DelimitedImageVisitor(String outputFile, boolean printToScreen)
+ throws IOException {
+ this(outputFile, printToScreen, defaultDelimiter);
+ }
+
+ public DelimitedImageVisitor(String outputFile, boolean printToScreen,
+ String delimiter) throws IOException {
+ super(outputFile, printToScreen);
+ this.delimiter = delimiter;
+ reset();
+ }
+
+ /**
+ * Reset the values of the elements we're tracking in order to handle
+ * the next file
+ */
+ private void reset() {
+ elements.clear();
+ for(ImageElement e : elementsToTrack)
+ elements.put(e, null);
+
+ fileSize = 0l;
+ }
+
+ @Override
+ void leaveEnclosingElement() throws IOException {
+ ImageElement elem = elemQ.pop();
+
+ // If we're done with an inode, write out our results and start over
+ if(elem == ImageElement.INODE ||
+ elem == ImageElement.INODE_UNDER_CONSTRUCTION) {
+ writeLine();
+ write("\n");
+ reset();
+ }
+ }
+
+ /**
+ * Iterate through all the elements we're tracking and, if a value was
+ * recorded for it, write it out.
+ */
+ private void writeLine() throws IOException {
+ Iterator<ImageElement> it = elementsToTrack.iterator();
+
+ while(it.hasNext()) {
+ ImageElement e = it.next();
+
+ String v = null;
+ if(e == ImageElement.NUM_BYTES)
+ v = String.valueOf(fileSize);
+ else
+ v = elements.get(e);
+
+ if(v != null)
+ write(v);
+
+ if(it.hasNext())
+ write(delimiter);
+ }
+ }
+
+ @Override
+ void visit(ImageElement element, String value) throws IOException {
+ // Explicitly label the root path
+ if(element == ImageElement.INODE_PATH && value.equals(""))
+ value = "/";
+
+ // Special case of file size, which is sum of the num bytes in each block
+ if(element == ImageElement.NUM_BYTES)
+ fileSize += Long.valueOf(value);
+
+ if(elements.containsKey(element) && element != ImageElement.NUM_BYTES)
+ elements.put(element, value);
+
+ }
+
+ @Override
+ void visitEnclosingElement(ImageElement element) throws IOException {
+ elemQ.push(element);
+ }
+
+ @Override
+ void visitEnclosingElement(ImageElement element, ImageElement key,
+ String value) throws IOException {
+ // Special case as numBlocks is an attribute of the blocks element
+ if(key == ImageElement.NUM_BLOCKS
+ && elements.containsKey(ImageElement.NUM_BLOCKS))
+ elements.put(key, value);
+
+ elemQ.push(element);
+ }
+
+ @Override
+ void start() throws IOException { /* Nothing to do */ }
+}
Added: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/DepthCounter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/DepthCounter.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/DepthCounter.java (added)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/DepthCounter.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Utility class for tracking descent into the structure of the
+ * Visitor class (ImageVisitor, EditsVisitor etc.)
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class DepthCounter {
+ private int depth = 0;
+
+ public void incLevel() { depth++; }
+ public void decLevel() { if(depth >= 1) depth--; }
+ public int getLevel() { return depth; }
+}
+
Added: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java (added)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import java.io.IOException;
+import java.util.LinkedList;
+
+/**
+ * File size distribution visitor.
+ *
+ * <h3>Description.</h3>
+ * This is the tool for analyzing file sizes in the namespace image.
+ * In order to run the tool one should define a range of integers
+ * <tt>[0, maxSize]</tt> by specifying <tt>maxSize</tt> and a <tt>step</tt>.
+ * The range of integers is divided into segments of size <tt>step</tt>:
+ * <tt>[0, s<sub>1</sub>, ..., s<sub>n-1</sub>, maxSize]</tt>,
+ * and the visitor calculates how many files in the system fall into
+ * each segment <tt>[s<sub>i-1</sub>, s<sub>i</sub>)</tt>.
+ * Note that files larger than <tt>maxSize</tt> always fall into
+ * the very last segment.
+ *
+ * <h3>Input.</h3>
+ * <ul>
+ * <li><tt>filename</tt> specifies the location of the image file;</li>
+ * <li><tt>maxSize</tt> determines the range <tt>[0, maxSize]</tt> of files
+ * sizes considered by the visitor;</li>
+ * <li><tt>step</tt> the range is divided into segments of size step.</li>
+ * </ul>
+ *
+ * <h3>Output.</h3>
+ * The output file is formatted as a tab separated two column table:
+ * Size and NumFiles. Where Size represents the start of the segment,
+ * and numFiles is the number of files form the image which size falls in
+ * this segment.
+ */
+class FileDistributionVisitor extends TextWriterImageVisitor {
+ final private LinkedList<ImageElement> elemS = new LinkedList<ImageElement>();
+
+ private final static long MAX_SIZE_DEFAULT = 0x2000000000L; // 1/8 TB = 2^37
+ private final static int INTERVAL_DEFAULT = 0x200000; // 2 MB = 2^21
+
+ private int[] distribution;
+ private long maxSize;
+ private int step;
+
+ private int totalFiles;
+ private int totalDirectories;
+ private int totalBlocks;
+ private long totalSpace;
+ private long maxFileSize;
+
+ private FileContext current;
+
+ private boolean inInode = false;
+
+ /**
+ * File or directory information.
+ */
+ private static class FileContext {
+ String path;
+ long fileSize;
+ int numBlocks;
+ int replication;
+ }
+
+ public FileDistributionVisitor(String filename,
+ long maxSize,
+ int step) throws IOException {
+ super(filename, false);
+ this.maxSize = (maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize);
+ this.step = (step == 0 ? INTERVAL_DEFAULT : step);
+ long numIntervals = this.maxSize / this.step;
+ if(numIntervals >= Integer.MAX_VALUE)
+ throw new IOException("Too many distribution intervals " + numIntervals);
+ this.distribution = new int[1 + (int)(numIntervals)];
+ this.totalFiles = 0;
+ this.totalDirectories = 0;
+ this.totalBlocks = 0;
+ this.totalSpace = 0;
+ this.maxFileSize = 0;
+ }
+
+ @Override
+ void start() throws IOException {}
+
+ @Override
+ void finish() throws IOException {
+ // write the distribution into the output file
+ write("Size\tNumFiles\n");
+ for(int i = 0; i < distribution.length; i++)
+ write(((long)i * step) + "\t" + distribution[i] + "\n");
+ System.out.println("totalFiles = " + totalFiles);
+ System.out.println("totalDirectories = " + totalDirectories);
+ System.out.println("totalBlocks = " + totalBlocks);
+ System.out.println("totalSpace = " + totalSpace);
+ System.out.println("maxFileSize = " + maxFileSize);
+ super.finish();
+ }
+
+ @Override
+ void leaveEnclosingElement() throws IOException {
+ ImageElement elem = elemS.pop();
+
+ if(elem != ImageElement.INODE &&
+ elem != ImageElement.INODE_UNDER_CONSTRUCTION)
+ return;
+ inInode = false;
+ if(current.numBlocks < 0) {
+ totalDirectories ++;
+ return;
+ }
+ totalFiles++;
+ totalBlocks += current.numBlocks;
+ totalSpace += current.fileSize * current.replication;
+ if(maxFileSize < current.fileSize)
+ maxFileSize = current.fileSize;
+ int high;
+ if(current.fileSize > maxSize)
+ high = distribution.length-1;
+ else
+ high = (int)Math.ceil((double)current.fileSize / step);
+ distribution[high]++;
+ if(totalFiles % 1000000 == 1)
+ System.out.println("Files processed: " + totalFiles
+ + " Current: " + current.path);
+ }
+
+ @Override
+ void visit(ImageElement element, String value) throws IOException {
+ if(inInode) {
+ switch(element) {
+ case INODE_PATH:
+ current.path = (value.equals("") ? "/" : value);
+ break;
+ case REPLICATION:
+ current.replication = Integer.valueOf(value);
+ break;
+ case NUM_BYTES:
+ current.fileSize += Long.valueOf(value);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ @Override
+ void visitEnclosingElement(ImageElement element) throws IOException {
+ elemS.push(element);
+ if(element == ImageElement.INODE ||
+ element == ImageElement.INODE_UNDER_CONSTRUCTION) {
+ current = new FileContext();
+ inInode = true;
+ }
+ }
+
+ @Override
+ void visitEnclosingElement(ImageElement element,
+ ImageElement key, String value) throws IOException {
+ elemS.push(element);
+ if(element == ImageElement.INODE ||
+ element == ImageElement.INODE_UNDER_CONSTRUCTION)
+ inInode = true;
+ else if(element == ImageElement.BLOCKS)
+ current.numBlocks = Integer.parseInt(value);
+ }
+}
Added: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoader.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoader.java (added)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoader.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import java.io.DataInputStream;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * An ImageLoader can accept a DataInputStream to an Hadoop FSImage file
+ * and walk over its structure using the supplied ImageVisitor.
+ *
+ * Each implementation of ImageLoader is designed to rapidly process an
+ * image file. As long as minor changes are made from one layout version
+ * to another, it is acceptable to tweak one implementation to read the next.
+ * However, if the layout version changes enough that it would make a
+ * processor slow or difficult to read, another processor should be created.
+ * This allows each processor to quickly read an image without getting
+ * bogged down in dealing with significant differences between layout versions.
+ */
+interface ImageLoader {
+
+ /**
+ * @param in DataInputStream pointing to an Hadoop FSImage file
+ * @param v Visit to apply to the FSImage file
+ * @param enumerateBlocks Should visitor visit each of the file blocks?
+ */
+ public void loadImage(DataInputStream in, ImageVisitor v,
+ boolean enumerateBlocks) throws IOException;
+
+ /**
+ * Can this processor handle the specified version of FSImage file?
+ *
+ * @param version FSImage version file
+ * @return True if this instance can process the file
+ */
+ public boolean canLoadVersion(int version);
+
+ /**
+ * Factory for obtaining version of image loader that can read
+ * a particular image format.
+ */
+ @InterfaceAudience.Private
+ public class LoaderFactory {
+ // Java doesn't support static methods on interfaces, which necessitates
+ // this factory class
+
+ /**
+ * Find an image loader capable of interpreting the specified
+ * layout version number. If none, return null;
+ *
+ * @param version fsimage layout version number to be processed
+ * @return ImageLoader that can interpret specified version, or null
+ */
+ static public ImageLoader getLoader(int version) {
+ // Easy to add more image processors as they are written
+ ImageLoader[] loaders = { new ImageLoaderCurrent() };
+
+ for (ImageLoader l : loaders) {
+ if (l.canLoadVersion(version))
+ return l;
+ }
+
+ return null;
+ }
+ }
+}
Added: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (added)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,465 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.namenode.FSImage;
+import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionCodecFactory;
+import org.apache.hadoop.security.token.delegation.DelegationKey;
+
+/**
+ * ImageLoaderCurrent processes Hadoop FSImage files and walks over
+ * them using a provided ImageVisitor, calling the visitor at each element
+ * enumerated below.
+ *
+ * The only difference between v18 and v19 was the utilization of the
+ * stickybit. Therefore, the same viewer can reader either format.
+ *
+ * Versions -19 fsimage layout (with changes from -16 up):
+ * Image version (int)
+ * Namepsace ID (int)
+ * NumFiles (long)
+ * Generation stamp (long)
+ * INodes (count = NumFiles)
+ * INode
+ * Path (String)
+ * Replication (short)
+ * Modification Time (long as date)
+ * Access Time (long) // added in -16
+ * Block size (long)
+ * Num blocks (int)
+ * Blocks (count = Num blocks)
+ * Block
+ * Block ID (long)
+ * Num bytes (long)
+ * Generation stamp (long)
+ * Namespace Quota (long)
+ * Diskspace Quota (long) // added in -18
+ * Permissions
+ * Username (String)
+ * Groupname (String)
+ * OctalPerms (short -> String) // Modified in -19
+ * Symlink (String) // added in -23
+ * NumINodesUnderConstruction (int)
+ * INodesUnderConstruction (count = NumINodesUnderConstruction)
+ * INodeUnderConstruction
+ * Path (bytes as string)
+ * Replication (short)
+ * Modification time (long as date)
+ * Preferred block size (long)
+ * Num blocks (int)
+ * Blocks
+ * Block
+ * Block ID (long)
+ * Num bytes (long)
+ * Generation stamp (long)
+ * Permissions
+ * Username (String)
+ * Groupname (String)
+ * OctalPerms (short -> String)
+ * Client Name (String)
+ * Client Machine (String)
+ * NumLocations (int)
+ * DatanodeDescriptors (count = numLocations) // not loaded into memory
+ * short // but still in file
+ * long
+ * string
+ * long
+ * int
+ * string
+ * string
+ * enum
+ * CurrentDelegationKeyId (int)
+ * NumDelegationKeys (int)
+ * DelegationKeys (count = NumDelegationKeys)
+ * DelegationKeyLength (vint)
+ * DelegationKey (bytes)
+ * DelegationTokenSequenceNumber (int)
+ * NumDelegationTokens (int)
+ * DelegationTokens (count = NumDelegationTokens)
+ * DelegationTokenIdentifier
+ * owner (String)
+ * renewer (String)
+ * realUser (String)
+ * issueDate (vlong)
+ * maxDate (vlong)
+ * sequenceNumber (vint)
+ * masterKeyId (vint)
+ * expiryTime (long)
+ *
+ */
+class ImageLoaderCurrent implements ImageLoader {
+ protected final DateFormat dateFormat =
+ new SimpleDateFormat("yyyy-MM-dd HH:mm");
+ private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
+ -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -41};
+ private int imageVersion = 0;
+
+ /* (non-Javadoc)
+ * @see ImageLoader#canProcessVersion(int)
+ */
+ @Override
+ public boolean canLoadVersion(int version) {
+ for(int v : versions)
+ if(v == version) return true;
+
+ return false;
+ }
+
+ /* (non-Javadoc)
+ * @see ImageLoader#processImage(java.io.DataInputStream, ImageVisitor, boolean)
+ */
+ @Override
+ public void loadImage(DataInputStream in, ImageVisitor v,
+ boolean skipBlocks) throws IOException {
+ boolean done = false;
+ try {
+ v.start();
+ v.visitEnclosingElement(ImageElement.FS_IMAGE);
+
+ imageVersion = in.readInt();
+ if( !canLoadVersion(imageVersion))
+ throw new IOException("Cannot process fslayout version " + imageVersion);
+
+ v.visit(ImageElement.IMAGE_VERSION, imageVersion);
+ v.visit(ImageElement.NAMESPACE_ID, in.readInt());
+
+ long numInodes = in.readLong();
+
+ v.visit(ImageElement.GENERATION_STAMP, in.readLong());
+
+ if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) {
+ boolean isCompressed = in.readBoolean();
+ v.visit(ImageElement.IS_COMPRESSED, String.valueOf(isCompressed));
+ if (isCompressed) {
+ String codecClassName = Text.readString(in);
+ v.visit(ImageElement.COMPRESS_CODEC, codecClassName);
+ CompressionCodecFactory codecFac = new CompressionCodecFactory(
+ new Configuration());
+ CompressionCodec codec = codecFac.getCodecByClassName(codecClassName);
+ if (codec == null) {
+ throw new IOException("Image compression codec not supported: "
+ + codecClassName);
+ }
+ in = new DataInputStream(codec.createInputStream(in));
+ }
+ }
+ processINodes(in, v, numInodes, skipBlocks);
+
+ processINodesUC(in, v, skipBlocks);
+
+ if (LayoutVersion.supports(Feature.DELEGATION_TOKEN, imageVersion)) {
+ processDelegationTokens(in, v);
+ }
+
+ v.leaveEnclosingElement(); // FSImage
+ done = true;
+ } finally {
+ if (done) {
+ v.finish();
+ } else {
+ v.finishAbnormally();
+ }
+ }
+ }
+
+ /**
+ * Process the Delegation Token related section in fsimage.
+ *
+ * @param in DataInputStream to process
+ * @param v Visitor to walk over records
+ */
+ private void processDelegationTokens(DataInputStream in, ImageVisitor v)
+ throws IOException {
+ v.visit(ImageElement.CURRENT_DELEGATION_KEY_ID, in.readInt());
+ int numDKeys = in.readInt();
+ v.visitEnclosingElement(ImageElement.DELEGATION_KEYS,
+ ImageElement.NUM_DELEGATION_KEYS, numDKeys);
+ for(int i =0; i < numDKeys; i++) {
+ DelegationKey key = new DelegationKey();
+ key.readFields(in);
+ v.visit(ImageElement.DELEGATION_KEY, key.toString());
+ }
+ v.leaveEnclosingElement();
+ v.visit(ImageElement.DELEGATION_TOKEN_SEQUENCE_NUMBER, in.readInt());
+ int numDTokens = in.readInt();
+ v.visitEnclosingElement(ImageElement.DELEGATION_TOKENS,
+ ImageElement.NUM_DELEGATION_TOKENS, numDTokens);
+ for(int i=0; i<numDTokens; i++){
+ DelegationTokenIdentifier id = new DelegationTokenIdentifier();
+ id.readFields(in);
+ long expiryTime = in.readLong();
+ v.visitEnclosingElement(ImageElement.DELEGATION_TOKEN_IDENTIFIER);
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_KIND,
+ id.getKind().toString());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_SEQNO,
+ id.getSequenceNumber());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_RENEWER,
+ id.getRenewer().toString());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE,
+ id.getIssueDate());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE,
+ id.getMaxDate());
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME,
+ expiryTime);
+ v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID,
+ id.getMasterKeyId());
+ v.leaveEnclosingElement(); // DELEGATION_TOKEN_IDENTIFIER
+ }
+ v.leaveEnclosingElement(); // DELEGATION_TOKENS
+ }
+
+ /**
+ * Process the INodes under construction section of the fsimage.
+ *
+ * @param in DataInputStream to process
+ * @param v Visitor to walk over inodes
+ * @param skipBlocks Walk over each block?
+ */
+ private void processINodesUC(DataInputStream in, ImageVisitor v,
+ boolean skipBlocks) throws IOException {
+ int numINUC = in.readInt();
+
+ v.visitEnclosingElement(ImageElement.INODES_UNDER_CONSTRUCTION,
+ ImageElement.NUM_INODES_UNDER_CONSTRUCTION, numINUC);
+
+ for(int i = 0; i < numINUC; i++) {
+ v.visitEnclosingElement(ImageElement.INODE_UNDER_CONSTRUCTION);
+ byte [] name = FSImage.readBytes(in);
+ String n = new String(name, "UTF8");
+ v.visit(ImageElement.INODE_PATH, n);
+ v.visit(ImageElement.REPLICATION, in.readShort());
+ v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
+
+ v.visit(ImageElement.PREFERRED_BLOCK_SIZE, in.readLong());
+ int numBlocks = in.readInt();
+ processBlocks(in, v, numBlocks, skipBlocks);
+
+ processPermission(in, v);
+ v.visit(ImageElement.CLIENT_NAME, FSImage.readString(in));
+ v.visit(ImageElement.CLIENT_MACHINE, FSImage.readString(in));
+
+ // Skip over the datanode descriptors, which are still stored in the
+ // file but are not used by the datanode or loaded into memory
+ int numLocs = in.readInt();
+ for(int j = 0; j < numLocs; j++) {
+ in.readShort();
+ in.readLong();
+ in.readLong();
+ in.readLong();
+ in.readInt();
+ FSImage.readString(in);
+ FSImage.readString(in);
+ WritableUtils.readEnum(in, AdminStates.class);
+ }
+
+ v.leaveEnclosingElement(); // INodeUnderConstruction
+ }
+
+ v.leaveEnclosingElement(); // INodesUnderConstruction
+ }
+
+ /**
+ * Process the blocks section of the fsimage.
+ *
+ * @param in Datastream to process
+ * @param v Visitor to walk over inodes
+ * @param skipBlocks Walk over each block?
+ */
+ private void processBlocks(DataInputStream in, ImageVisitor v,
+ int numBlocks, boolean skipBlocks) throws IOException {
+ v.visitEnclosingElement(ImageElement.BLOCKS,
+ ImageElement.NUM_BLOCKS, numBlocks);
+
+ // directory or symlink, no blocks to process
+ if(numBlocks == -1 || numBlocks == -2) {
+ v.leaveEnclosingElement(); // Blocks
+ return;
+ }
+
+ if(skipBlocks) {
+ int bytesToSkip = ((Long.SIZE * 3 /* fields */) / 8 /*bits*/) * numBlocks;
+ if(in.skipBytes(bytesToSkip) != bytesToSkip)
+ throw new IOException("Error skipping over blocks");
+
+ } else {
+ for(int j = 0; j < numBlocks; j++) {
+ v.visitEnclosingElement(ImageElement.BLOCK);
+ v.visit(ImageElement.BLOCK_ID, in.readLong());
+ v.visit(ImageElement.NUM_BYTES, in.readLong());
+ v.visit(ImageElement.GENERATION_STAMP, in.readLong());
+ v.leaveEnclosingElement(); // Block
+ }
+ }
+ v.leaveEnclosingElement(); // Blocks
+ }
+
+ /**
+ * Extract the INode permissions stored in the fsimage file.
+ *
+ * @param in Datastream to process
+ * @param v Visitor to walk over inodes
+ */
+ private void processPermission(DataInputStream in, ImageVisitor v)
+ throws IOException {
+ v.visitEnclosingElement(ImageElement.PERMISSIONS);
+ v.visit(ImageElement.USER_NAME, Text.readString(in));
+ v.visit(ImageElement.GROUP_NAME, Text.readString(in));
+ FsPermission fsp = new FsPermission(in.readShort());
+ v.visit(ImageElement.PERMISSION_STRING, fsp.toString());
+ v.leaveEnclosingElement(); // Permissions
+ }
+
+ /**
+ * Process the INode records stored in the fsimage.
+ *
+ * @param in Datastream to process
+ * @param v Visitor to walk over INodes
+ * @param numInodes Number of INodes stored in file
+ * @param skipBlocks Process all the blocks within the INode?
+ * @throws VisitException
+ * @throws IOException
+ */
+ private void processINodes(DataInputStream in, ImageVisitor v,
+ long numInodes, boolean skipBlocks) throws IOException {
+ v.visitEnclosingElement(ImageElement.INODES,
+ ImageElement.NUM_INODES, numInodes);
+
+ if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
+ processLocalNameINodes(in, v, numInodes, skipBlocks);
+ } else { // full path name
+ processFullNameINodes(in, v, numInodes, skipBlocks);
+ }
+
+
+ v.leaveEnclosingElement(); // INodes
+ }
+
+ /**
+ * Process image with full path name
+ *
+ * @param in image stream
+ * @param v visitor
+ * @param numInodes number of indoes to read
+ * @param skipBlocks skip blocks or not
+ * @throws IOException if there is any error occurs
+ */
+ private void processLocalNameINodes(DataInputStream in, ImageVisitor v,
+ long numInodes, boolean skipBlocks) throws IOException {
+ // process root
+ processINode(in, v, skipBlocks, "");
+ numInodes--;
+ while (numInodes > 0) {
+ numInodes -= processDirectory(in, v, skipBlocks);
+ }
+ }
+
+ private int processDirectory(DataInputStream in, ImageVisitor v,
+ boolean skipBlocks) throws IOException {
+ String parentName = FSImage.readString(in);
+ int numChildren = in.readInt();
+ for (int i=0; i<numChildren; i++) {
+ processINode(in, v, skipBlocks, parentName);
+ }
+ return numChildren;
+ }
+
+ /**
+ * Process image with full path name
+ *
+ * @param in image stream
+ * @param v visitor
+ * @param numInodes number of indoes to read
+ * @param skipBlocks skip blocks or not
+ * @throws IOException if there is any error occurs
+ */
+ private void processFullNameINodes(DataInputStream in, ImageVisitor v,
+ long numInodes, boolean skipBlocks) throws IOException {
+ for(long i = 0; i < numInodes; i++) {
+ processINode(in, v, skipBlocks, null);
+ }
+ }
+
+ /**
+ * Process an INode
+ *
+ * @param in image stream
+ * @param v visitor
+ * @param skipBlocks skip blocks or not
+ * @param parentName the name of its parent node
+ * @throws IOException
+ */
+ private void processINode(DataInputStream in, ImageVisitor v,
+ boolean skipBlocks, String parentName) throws IOException {
+ v.visitEnclosingElement(ImageElement.INODE);
+ String pathName = FSImage.readString(in);
+ if (parentName != null) { // local name
+ pathName = "/" + pathName;
+ if (!"/".equals(parentName)) { // children of non-root directory
+ pathName = parentName + pathName;
+ }
+ }
+
+ v.visit(ImageElement.INODE_PATH, pathName);
+ v.visit(ImageElement.REPLICATION, in.readShort());
+ v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
+ if(LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion))
+ v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
+ v.visit(ImageElement.BLOCK_SIZE, in.readLong());
+ int numBlocks = in.readInt();
+
+ processBlocks(in, v, numBlocks, skipBlocks);
+
+ // File or directory
+ if (numBlocks > 0 || numBlocks == -1) {
+ v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
+ if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
+ v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
+ }
+ if (numBlocks == -2) {
+ v.visit(ImageElement.SYMLINK, Text.readString(in));
+ }
+
+ processPermission(in, v);
+ v.leaveEnclosingElement(); // INode
+ }
+
+ /**
+ * Helper method to format dates during processing.
+ * @param date Date as read from image file
+ * @return String version of date format
+ */
+ private String formatDate(long date) {
+ return dateFormat.format(new Date(date));
+ }
+}
Added: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java (added)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import java.io.IOException;
+
+/**
+ * An implementation of ImageVisitor can traverse the structure of an
+ * Hadoop fsimage and respond to each of the structures within the file.
+ */
+abstract class ImageVisitor {
+
+ /**
+ * Structural elements of an FSImage that may be encountered within the
+ * file. ImageVisitors are able to handle processing any of these elements.
+ */
+ public enum ImageElement {
+ FS_IMAGE,
+ IMAGE_VERSION,
+ NAMESPACE_ID,
+ IS_COMPRESSED,
+ COMPRESS_CODEC,
+ LAYOUT_VERSION,
+ NUM_INODES,
+ GENERATION_STAMP,
+ INODES,
+ INODE,
+ INODE_PATH,
+ REPLICATION,
+ MODIFICATION_TIME,
+ ACCESS_TIME,
+ BLOCK_SIZE,
+ NUM_BLOCKS,
+ BLOCKS,
+ BLOCK,
+ BLOCK_ID,
+ NUM_BYTES,
+ NS_QUOTA,
+ DS_QUOTA,
+ PERMISSIONS,
+ SYMLINK,
+ NUM_INODES_UNDER_CONSTRUCTION,
+ INODES_UNDER_CONSTRUCTION,
+ INODE_UNDER_CONSTRUCTION,
+ PREFERRED_BLOCK_SIZE,
+ CLIENT_NAME,
+ CLIENT_MACHINE,
+ USER_NAME,
+ GROUP_NAME,
+ PERMISSION_STRING,
+ CURRENT_DELEGATION_KEY_ID,
+ NUM_DELEGATION_KEYS,
+ DELEGATION_KEYS,
+ DELEGATION_KEY,
+ DELEGATION_TOKEN_SEQUENCE_NUMBER,
+ NUM_DELEGATION_TOKENS,
+ DELEGATION_TOKENS,
+ DELEGATION_TOKEN_IDENTIFIER,
+ DELEGATION_TOKEN_IDENTIFIER_KIND,
+ DELEGATION_TOKEN_IDENTIFIER_SEQNO,
+ DELEGATION_TOKEN_IDENTIFIER_OWNER,
+ DELEGATION_TOKEN_IDENTIFIER_RENEWER,
+ DELEGATION_TOKEN_IDENTIFIER_REALUSER,
+ DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE,
+ DELEGATION_TOKEN_IDENTIFIER_MAX_DATE,
+ DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME,
+ DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID
+ }
+
+ /**
+ * Begin visiting the fsimage structure. Opportunity to perform
+ * any initialization necessary for the implementing visitor.
+ */
+ abstract void start() throws IOException;
+
+ /**
+ * Finish visiting the fsimage structure. Opportunity to perform any
+ * clean up necessary for the implementing visitor.
+ */
+ abstract void finish() throws IOException;
+
+ /**
+ * Finish visiting the fsimage structure after an error has occurred
+ * during the processing. Opportunity to perform any clean up necessary
+ * for the implementing visitor.
+ */
+ abstract void finishAbnormally() throws IOException;
+
+ /**
+ * Visit non enclosing element of fsimage with specified value.
+ *
+ * @param element FSImage element
+ * @param value Element's value
+ */
+ abstract void visit(ImageElement element, String value) throws IOException;
+
+ // Convenience methods to automatically convert numeric value types to strings
+ void visit(ImageElement element, int value) throws IOException {
+ visit(element, Integer.toString(value));
+ }
+
+ void visit(ImageElement element, long value) throws IOException {
+ visit(element, Long.toString(value));
+ }
+
+ /**
+ * Begin visiting an element that encloses another element, such as
+ * the beginning of the list of blocks that comprise a file.
+ *
+ * @param element Element being visited
+ */
+ abstract void visitEnclosingElement(ImageElement element)
+ throws IOException;
+
+ /**
+ * Begin visiting an element that encloses another element, such as
+ * the beginning of the list of blocks that comprise a file.
+ *
+ * Also provide an additional key and value for the element, such as the
+ * number items within the element.
+ *
+ * @param element Element being visited
+ * @param key Key describing the element being visited
+ * @param value Value associated with element being visited
+ */
+ abstract void visitEnclosingElement(ImageElement element,
+ ImageElement key, String value) throws IOException;
+
+ // Convenience methods to automatically convert value types to strings
+ void visitEnclosingElement(ImageElement element,
+ ImageElement key, int value)
+ throws IOException {
+ visitEnclosingElement(element, key, Integer.toString(value));
+ }
+
+ void visitEnclosingElement(ImageElement element,
+ ImageElement key, long value)
+ throws IOException {
+ visitEnclosingElement(element, key, Long.toString(value));
+ }
+
+ /**
+ * Leave current enclosing element. Called, for instance, at the end of
+ * processing the blocks that compromise a file.
+ */
+ abstract void leaveEnclosingElement() throws IOException;
+}
Added: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java (added)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import java.io.IOException;
+import java.util.Date;
+
+/**
+ * IndentedImageVisitor walks over an FSImage and displays its structure
+ * using indenting to organize sections within the image file.
+ */
+class IndentedImageVisitor extends TextWriterImageVisitor {
+
+ public IndentedImageVisitor(String filename) throws IOException {
+ super(filename);
+ }
+
+ public IndentedImageVisitor(String filename, boolean printToScreen) throws IOException {
+ super(filename, printToScreen);
+ }
+
+ final private DepthCounter dc = new DepthCounter();// to track leading spacing
+
+ @Override
+ void start() throws IOException {}
+
+ @Override
+ void finish() throws IOException { super.finish(); }
+
+ @Override
+ void finishAbnormally() throws IOException {
+ System.out.println("*** Image processing finished abnormally. Ending ***");
+ super.finishAbnormally();
+ }
+
+ @Override
+ void leaveEnclosingElement() throws IOException {
+ dc.decLevel();
+ }
+
+ @Override
+ void visit(ImageElement element, String value) throws IOException {
+ printIndents();
+ write(element + " = " + value + "\n");
+ }
+
+ @Override
+ void visit(ImageElement element, long value) throws IOException {
+ if ((element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME) ||
+ (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE) ||
+ (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE)) {
+ visit(element, new Date(value).toString());
+ } else {
+ visit(element, Long.toString(value));
+ }
+ }
+
+ @Override
+ void visitEnclosingElement(ImageElement element) throws IOException {
+ printIndents();
+ write(element + "\n");
+ dc.incLevel();
+ }
+
+ // Print element, along with associated key/value pair, in brackets
+ @Override
+ void visitEnclosingElement(ImageElement element,
+ ImageElement key, String value)
+ throws IOException {
+ printIndents();
+ write(element + " [" + key + " = " + value + "]\n");
+ dc.incLevel();
+ }
+
+ /**
+ * Print an appropriate number of spaces for the current level.
+ * FsImages can potentially be millions of lines long, so caching can
+ * significantly speed up output.
+ */
+ final private static String [] indents = { "",
+ " ",
+ " ",
+ " ",
+ " ",
+ " ",
+ " "};
+ private void printIndents() throws IOException {
+ try {
+ write(indents[dc.getLevel()]);
+ } catch (IndexOutOfBoundsException e) {
+ // There's no reason in an fsimage would need a deeper indent
+ for(int i = 0; i < dc.getLevel(); i++)
+ write(" ");
+ }
+ }
+}
Added: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java (added)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,178 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import java.io.IOException;
+import java.util.Formatter;
+import java.util.LinkedList;
+
+/**
+ * LsImageVisitor displays the blocks of the namespace in a format very similar
+ * to the output of ls/lsr. Entries are marked as directories or not,
+ * permissions listed, replication, username and groupname, along with size,
+ * modification date and full path.
+ *
+ * Note: A significant difference between the output of the lsr command
+ * and this image visitor is that this class cannot sort the file entries;
+ * they are listed in the order they are stored within the fsimage file.
+ * Therefore, the output of this class cannot be directly compared to the
+ * output of the lsr command.
+ */
+class LsImageVisitor extends TextWriterImageVisitor {
+ final private LinkedList<ImageElement> elemQ = new LinkedList<ImageElement>();
+
+ private int numBlocks;
+ private String perms;
+ private int replication;
+ private String username;
+ private String group;
+ private long filesize;
+ private String modTime;
+ private String path;
+ private String linkTarget;
+
+ private boolean inInode = false;
+ final private StringBuilder sb = new StringBuilder();
+ final private Formatter formatter = new Formatter(sb);
+
+ public LsImageVisitor(String filename) throws IOException {
+ super(filename);
+ }
+
+ public LsImageVisitor(String filename, boolean printToScreen) throws IOException {
+ super(filename, printToScreen);
+ }
+
+ /**
+ * Start a new line of output, reset values.
+ */
+ private void newLine() {
+ numBlocks = 0;
+ perms = username = group = path = linkTarget = "";
+ filesize = 0l;
+ replication = 0;
+
+ inInode = true;
+ }
+
+ /**
+ * All the values have been gathered. Print them to the console in an
+ * ls-style format.
+ */
+ private final static int widthRepl = 2;
+ private final static int widthUser = 8;
+ private final static int widthGroup = 10;
+ private final static int widthSize = 10;
+ private final static int widthMod = 10;
+ private final static String lsStr = " %" + widthRepl + "s %" + widthUser +
+ "s %" + widthGroup + "s %" + widthSize +
+ "d %" + widthMod + "s %s";
+ private void printLine() throws IOException {
+ sb.append(numBlocks < 0 ? "d" : "-");
+ sb.append(perms);
+
+ if (0 != linkTarget.length()) {
+ path = path + " -> " + linkTarget;
+ }
+ formatter.format(lsStr, replication > 0 ? replication : "-",
+ username, group, filesize, modTime, path);
+ sb.append("\n");
+
+ write(sb.toString());
+ sb.setLength(0); // clear string builder
+
+ inInode = false;
+ }
+
+ @Override
+ void start() throws IOException {}
+
+ @Override
+ void finish() throws IOException {
+ super.finish();
+ }
+
+ @Override
+ void finishAbnormally() throws IOException {
+ System.out.println("Input ended unexpectedly.");
+ super.finishAbnormally();
+ }
+
+ @Override
+ void leaveEnclosingElement() throws IOException {
+ ImageElement elem = elemQ.pop();
+
+ if(elem == ImageElement.INODE)
+ printLine();
+ }
+
+ // Maintain state of location within the image tree and record
+ // values needed to display the inode in ls-style format.
+ @Override
+ void visit(ImageElement element, String value) throws IOException {
+ if(inInode) {
+ switch(element) {
+ case INODE_PATH:
+ if(value.equals("")) path = "/";
+ else path = value;
+ break;
+ case PERMISSION_STRING:
+ perms = value;
+ break;
+ case REPLICATION:
+ replication = Integer.valueOf(value);
+ break;
+ case USER_NAME:
+ username = value;
+ break;
+ case GROUP_NAME:
+ group = value;
+ break;
+ case NUM_BYTES:
+ filesize += Long.valueOf(value);
+ break;
+ case MODIFICATION_TIME:
+ modTime = value;
+ break;
+ case SYMLINK:
+ linkTarget = value;
+ break;
+ default:
+ // This is OK. We're not looking for all the values.
+ break;
+ }
+ }
+ }
+
+ @Override
+ void visitEnclosingElement(ImageElement element) throws IOException {
+ elemQ.push(element);
+ if(element == ImageElement.INODE)
+ newLine();
+ }
+
+ @Override
+ void visitEnclosingElement(ImageElement element,
+ ImageElement key, String value) throws IOException {
+ elemQ.push(element);
+ if(element == ImageElement.INODE)
+ newLine();
+ else if (element == ImageElement.BLOCKS)
+ numBlocks = Integer.valueOf(value);
+ }
+}