You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cm...@apache.org on 2016/06/01 01:05:16 UTC
[1/2] hadoop git commit: HDFS-9466.
TestShortCircuitCache#testDataXceiverCleansUpSlotsOnFailure is flaky
(Wei-Chiu Chuang via cmccabe)
Repository: hadoop
Updated Branches:
refs/heads/trunk 29d6cadc5 -> 8ceb06e23
HDFS-9466. TestShortCircuitCache#testDataXceiverCleansUpSlotsOnFailure is flaky (Wei-Chiu Chuang via cmccabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7921c9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7921c9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7921c9b
Branch: refs/heads/trunk
Commit: c7921c9bddb79c9db5059b6c3f7a3a586a3cd95b
Parents: 29d6cad
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue May 31 17:37:52 2016 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Tue May 31 17:37:52 2016 -0700
----------------------------------------------------------------------
.../server/datanode/ShortCircuitRegistry.java | 6 ++--
.../shortcircuit/TestShortCircuitCache.java | 34 ++++++++++----------
2 files changed, 20 insertions(+), 20 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7921c9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
index 52856af..68ef24e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
@@ -394,12 +394,12 @@ public class ShortCircuitRegistry {
}
public static interface Visitor {
- void accept(HashMap<ShmId, RegisteredShm> segments,
+ boolean accept(HashMap<ShmId, RegisteredShm> segments,
HashMultimap<ExtendedBlockId, Slot> slots);
}
@VisibleForTesting
- public synchronized void visit(Visitor visitor) {
- visitor.accept(segments, slots);
+ public synchronized boolean visit(Visitor visitor) {
+ return visitor.accept(segments, slots);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7921c9b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index f788613..ac14438 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -32,6 +32,7 @@ import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
@@ -624,15 +625,22 @@ public class TestShortCircuitCache {
}
static private void checkNumberOfSegmentsAndSlots(final int expectedSegments,
- final int expectedSlots, ShortCircuitRegistry registry) {
- registry.visit(new ShortCircuitRegistry.Visitor() {
+ final int expectedSlots, final ShortCircuitRegistry registry)
+ throws InterruptedException, TimeoutException {
+ GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
- public void accept(HashMap<ShmId, RegisteredShm> segments,
- HashMultimap<ExtendedBlockId, Slot> slots) {
- Assert.assertEquals(expectedSegments, segments.size());
- Assert.assertEquals(expectedSlots, slots.size());
+ public Boolean get() {
+ return registry.visit(new ShortCircuitRegistry.Visitor() {
+ @Override
+ public boolean accept(HashMap<ShmId, RegisteredShm> segments,
+ HashMultimap<ExtendedBlockId, Slot> slots) {
+ return (expectedSegments == segments.size()) &&
+ (expectedSlots == slots.size());
+ }
+ });
}
- });
+ }, 100, 10000);
+
}
public static class TestCleanupFailureInjector
@@ -774,16 +782,8 @@ public class TestShortCircuitCache {
DFSTestUtil.createFile(fs, TEST_PATH2, 4096, (short)1, 0xFADE2);
DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
- ShortCircuitRegistry registry =
- cluster.getDataNodes().get(0).getShortCircuitRegistry();
- registry.visit(new ShortCircuitRegistry.Visitor() {
- @Override
- public void accept(HashMap<ShmId, RegisteredShm> segments,
- HashMultimap<ExtendedBlockId, Slot> slots) {
- Assert.assertEquals(1, segments.size());
- Assert.assertEquals(2, slots.size());
- }
- });
+ checkNumberOfSegmentsAndSlots(1, 2,
+ cluster.getDataNodes().get(0).getShortCircuitRegistry());
cluster.shutdown();
sockDir.close();
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[2/2] hadoop git commit: HADOOP-13137. TraceAdmin should support
Kerberized cluster (Wei-Chiu Chuang via cmccabe)
Posted by cm...@apache.org.
HADOOP-13137. TraceAdmin should support Kerberized cluster (Wei-Chiu Chuang via cmccabe)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ceb06e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ceb06e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ceb06e2
Branch: refs/heads/trunk
Commit: 8ceb06e2392763726210f96bb1c176e6a9fe7b53
Parents: c7921c9
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue May 31 17:54:34 2016 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Tue May 31 17:54:34 2016 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/tracing/TraceAdmin.java | 16 ++++-
.../hadoop-common/src/site/markdown/Tracing.md | 9 +++
.../apache/hadoop/tracing/TestTraceAdmin.java | 69 +++++++++++++++++++-
3 files changed, 92 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ceb06e2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
index 5fdfbfa..4cf1ead 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
@@ -29,6 +29,7 @@ import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
@@ -36,6 +37,8 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* A command-line tool for viewing and modifying tracing settings.
@@ -44,6 +47,7 @@ import org.apache.hadoop.util.Tool;
public class TraceAdmin extends Configured implements Tool {
private TraceAdminProtocolPB proxy;
private TraceAdminProtocolTranslatorPB remote;
+ private static final Logger LOG = LoggerFactory.getLogger(TraceAdmin.class);
private void usage() {
PrintStream err = System.err;
@@ -61,7 +65,9 @@ public class TraceAdmin extends Configured implements Tool {
" -list: List the current span receivers.\n" +
" -remove [id]\n" +
" Remove the span receiver with the specified id. Use -list to\n" +
- " find the id of each receiver.\n"
+ " find the id of each receiver.\n" +
+ " -principal: If the daemon is Kerberized, specify the service\n" +
+ " principal name."
);
}
@@ -166,6 +172,14 @@ public class TraceAdmin extends Configured implements Tool {
System.err.println("You must specify an operation.");
return 1;
}
+ String servicePrincipal = StringUtils.popOptionWithArgument("-principal",
+ args);
+ if (servicePrincipal != null) {
+ LOG.debug("Set service principal: {}", servicePrincipal);
+ getConf().set(
+ CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
+ servicePrincipal);
+ }
RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class,
ProtobufRpcEngine.class);
InetSocketAddress address = NetUtils.createSocketAddr(hostPort);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ceb06e2/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
index 7b0e9ee..cbdee8a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
@@ -84,6 +84,15 @@ You can specify the configuration associated with span receiver by `-Ckey=value`
ID CLASS
2 org.apache.htrace.core.LocalFileSpanReceiver
+If the cluster is Kerberized, the service principal name must be specified using `-principal` option.
+For example, to show list of span receivers of a namenode:
+
+ $ hadoop trace -list -host NN1:8020 -principal namenode/NN1@EXAMPLE.COM
+
+Or, for a datanode:
+
+ $ hadoop trace -list -host DN2:9867 -principal datanode/DN1@EXAMPLE.COM
+
### Starting tracing spans by HTrace API
In order to trace, you will need to wrap the traced logic with **tracing span** as shown below.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ceb06e2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
index 71c9c56..ad20310 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
@@ -19,8 +19,12 @@ package org.apache.hadoop.tracing;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.htrace.core.Tracer;
import org.junit.Assert;
import org.junit.Test;
@@ -28,9 +32,18 @@ import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
-public class TestTraceAdmin {
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test cases for TraceAdmin.
+ */
+public class TestTraceAdmin extends SaslDataTransferTestCase {
private static final String NEWLINE = System.getProperty("line.separator");
+ private final static int ONE_DATANODE = 1;
private String runTraceCommand(TraceAdmin trace, String... cmd)
throws Exception {
@@ -58,6 +71,12 @@ public class TestTraceAdmin {
return "127.0.0.1:" + cluster.getNameNodePort();
}
+ private String getHostPortForDN(MiniDFSCluster cluster, int index) {
+ ArrayList<DataNode> dns = cluster.getDataNodes();
+ assertTrue(index >= 0 && index < dns.size());
+ return "127.0.0.1:" + dns.get(index).getIpcPort();
+ }
+
@Test
public void testCreateAndDestroySpanReceiver() throws Exception {
Configuration conf = new Configuration();
@@ -102,4 +121,52 @@ public class TestTraceAdmin {
tempDir.close();
}
}
+
+ /**
+ * Test running hadoop trace commands with -principal option against
+ * Kerberized NN and DN.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testKerberizedTraceAdmin() throws Exception {
+ MiniDFSCluster cluster = null;
+ final HdfsConfiguration conf = createSecureConfig(
+ "authentication,privacy");
+ try {
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(ONE_DATANODE)
+ .build();
+ cluster.waitActive();
+ final String nnHost = getHostPortForNN(cluster);
+ final String dnHost = getHostPortForDN(cluster, 0);
+ // login using keytab and run commands
+ UserGroupInformation
+ .loginUserFromKeytabAndReturnUGI(getHdfsPrincipal(), getHdfsKeytab())
+ .doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ // send trace command to NN
+ TraceAdmin trace = new TraceAdmin();
+ trace.setConf(conf);
+ final String[] nnTraceCmd = new String[] {
+ "-list", "-host", nnHost, "-principal",
+ conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)};
+ int ret = trace.run(nnTraceCmd);
+ assertEquals(0, ret);
+ // send trace command to DN
+ final String[] dnTraceCmd = new String[] {
+ "-list", "-host", dnHost, "-principal",
+ conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)};
+ ret = trace.run(dnTraceCmd);
+ assertEquals(0, ret);
+ return null;
+ }
+ });
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org