You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2013/12/06 00:41:11 UTC
svn commit: r1548329 - in
/hadoop/common/branches/HDFS-2832/hadoop-common-project: hadoop-common/
hadoop-common/src/main/java/
hadoop-common/src/main/java/org/apache/hadoop/http/
hadoop-common/src/main/java/org/apache/hadoop/ipc/ hadoop-common/src/main...
Author: arp
Date: Thu Dec 5 23:41:09 2013
New Revision: 1548329
URL: http://svn.apache.org/r1548329
Log:
Merging r1547658 through r1548328 from trunk to branch HDFS-2832
Added:
hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
- copied unchanged from r1548328, hadoop/common/trunk/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
Modified:
hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt (contents, props changed)
hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/ (props changed)
hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm
hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt Thu Dec 5 23:41:09 2013
@@ -393,6 +393,8 @@ Release 2.4.0 - UNRELEASED
HADOOP-10127. Add ipc.client.connect.retry.interval to control the frequency
of connection retries (Karthik Kambatla via Sandy Ryza)
+ HADOOP-10102. Update commons IO from 2.1 to 2.4 (Akira Ajisaka via stevel)
+
OPTIMIZATIONS
HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
@@ -458,6 +460,11 @@ Release 2.4.0 - UNRELEASED
HADOOP-10135 writes to swift fs over partition size leave temp files and
empty output file (David Dobbins via stevel)
+ HADOOP-10129. Distcp may succeed when it fails (daryn)
+
+ HADOOP-10058. TestMetricsSystemImpl#testInitFirstVerifyStopInvokedImmediately
+ fails on trunk (Chen He via jeagles)
+
Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -525,6 +532,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5560. Trash configuration log statements prints incorrect units.
(Josh Elser via Andrew Wang)
+ HADOOP-10081. Client.setupIOStreams can leak socket resources on exception
+ or error (Tsuyoshi OZAWA via jlowe)
+
Release 2.2.0 - 2013-10-13
INCOMPATIBLE CHANGES
@@ -2332,6 +2342,20 @@ Release 2.0.0-alpha - 05-23-2012
HADOOP-7606. Upgrade Jackson to version 1.7.1 to match the version required
by Jersey (Alejandro Abdelnur via atm)
+Release 0.23.11 - UNRELEASED
+
+ INCOMPATIBLE CHANGES
+
+ NEW FEATURES
+
+ IMPROVEMENTS
+
+ OPTIMIZATIONS
+
+ BUG FIXES
+
+ HADOOP-10129. Distcp may succeed when it fails (daryn)
+
Release 0.23.10 - UNRELEASED
INCOMPATIBLE CHANGES
Propchange: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/CHANGES.txt
------------------------------------------------------------------------------
Merged /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt:r1547755
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt:r1547658-1548328
Propchange: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java:r1547658-1548328
Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java Thu Dec 5 23:41:09 2013
@@ -31,15 +31,25 @@ public class HttpConfig {
private static Policy policy;
public enum Policy {
HTTP_ONLY,
- HTTPS_ONLY;
+ HTTPS_ONLY,
+ HTTP_AND_HTTPS;
public static Policy fromString(String value) {
- if (value.equalsIgnoreCase(CommonConfigurationKeysPublic
- .HTTP_POLICY_HTTPS_ONLY)) {
+ if (HTTPS_ONLY.name().equalsIgnoreCase(value)) {
return HTTPS_ONLY;
+ } else if (HTTP_AND_HTTPS.name().equalsIgnoreCase(value)) {
+ return HTTP_AND_HTTPS;
}
return HTTP_ONLY;
}
+
+ public boolean isHttpEnabled() {
+ return this == HTTP_ONLY || this == HTTP_AND_HTTPS;
+ }
+
+ public boolean isHttpsEnabled() {
+ return this == HTTPS_ONLY || this == HTTP_AND_HTTPS;
+ }
}
static {
Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java Thu Dec 5 23:41:09 2013
@@ -1158,6 +1158,7 @@ public class Client {
// cleanup calls
cleanupCalls();
}
+ closeConnection();
if (LOG.isDebugEnabled())
LOG.debug(getName() + ": closed");
}
Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml Thu Dec 5 23:41:09 2013
@@ -1133,9 +1133,7 @@
<name>hadoop.ssl.enabled</name>
<value>false</value>
<description>
- Whether to use SSL for the HTTP endpoints. If set to true, the
- NameNode, DataNode, ResourceManager, NodeManager, HistoryServer and
- MapReduceAppMaster web UIs will be served over HTTPS instead HTTP.
+ Deprecated. Use dfs.http.policy and yarn.http.policy instead.
</description>
</property>
Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm Thu Dec 5 23:41:09 2013
@@ -754,6 +754,10 @@ KVNO Timestamp Principal
| | | Enable HDFS block access tokens for secure operations. |
*-------------------------+-------------------------+------------------------+
| <<<dfs.https.enable>>> | <true> | |
+| | | This value is deprecated. Use dfs.http.policy |
+*-------------------------+-------------------------+------------------------+
+| <<<dfs.http.policy>>> | <HTTP_ONLY> or <HTTPS_ONLY> or <HTTP_AND_HTTPS> | |
+| | | HTTPS_ONLY turns off http access |
*-------------------------+-------------------------+------------------------+
| <<<dfs.namenode.https-address>>> | <nn_host_fqdn:50470> | |
*-------------------------+-------------------------+------------------------+
Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java Thu Dec 5 23:41:09 2013
@@ -85,6 +85,7 @@ public class TestMetricsSystemImpl {
}
@Test public void testInitFirstVerifyStopInvokedImmediately() throws Exception {
+ DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period", 8)
//.add("test.sink.plugin.urls", getPluginUrlsAsString())
.add("test.sink.test.class", TestSink.class.getName())
Modified: hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java?rev=1548329&r1=1548328&r2=1548329&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java Thu Dec 5 23:41:09 2013
@@ -24,6 +24,7 @@ import java.io.InputStreamReader;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
@@ -44,13 +45,21 @@ public class IdUserGroup {
// Do update every 15 minutes
final static long TIMEOUT = 15 * 60 * 1000; // ms
- // Maps for id to name map. Guarded by this object monitor lock */
+ // Maps for id to name map. Guarded by this object monitor lock
private BiMap<Integer, String> uidNameMap = HashBiMap.create();
private BiMap<Integer, String> gidNameMap = HashBiMap.create();
private long lastUpdateTime = 0; // Last time maps were updated
- public IdUserGroup() {
+ static public class DuplicateNameOrIdException extends IOException {
+ private static final long serialVersionUID = 1L;
+
+ public DuplicateNameOrIdException(String msg) {
+ super(msg);
+ }
+ }
+
+ public IdUserGroup() throws IOException {
updateMaps();
}
@@ -58,18 +67,34 @@ public class IdUserGroup {
return lastUpdateTime - System.currentTimeMillis() > TIMEOUT;
}
+ // If can't update the maps, will keep using the old ones
private void checkAndUpdateMaps() {
if (isExpired()) {
LOG.info("Update cache now");
- updateMaps();
+ try {
+ updateMaps();
+ } catch (IOException e) {
+ LOG.error("Can't update the maps. Will use the old ones,"
+ + " which can potentially cause problem.", e);
+ }
}
}
+ private static final String DUPLICATE_NAME_ID_DEBUG_INFO = "NFS gateway can't start with duplicate name or id on the host system.\n"
+ + "This is because HDFS (non-kerberos cluster) uses name as the only way to identify a user or group.\n"
+ + "The host system with duplicated user/group name or id might work fine most of the time by itself.\n"
+ + "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n"
+ + "Therefore, same name means the same user or same group. To find the duplicated names/ids, one can do:\n"
+ + "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systms,\n"
+ + "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS.";
+
/**
* Get the whole list of users and groups and save them in the maps.
+ * @throws IOException
*/
- private void updateMapInternal(BiMap<Integer, String> map, String name,
- String command, String regex) throws IOException {
+ @VisibleForTesting
+ public static void updateMapInternal(BiMap<Integer, String> map, String mapName,
+ String command, String regex) throws IOException {
BufferedReader br = null;
try {
Process process = Runtime.getRuntime().exec(
@@ -79,15 +104,31 @@ public class IdUserGroup {
while ((line = br.readLine()) != null) {
String[] nameId = line.split(regex);
if ((nameId == null) || (nameId.length != 2)) {
- throw new IOException("Can't parse " + name + " list entry:" + line);
+ throw new IOException("Can't parse " + mapName + " list entry:" + line);
+ }
+ LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
+ // HDFS can't differentiate duplicate names with simple authentication
+ Integer key = Integer.valueOf(nameId[1]);
+ String value = nameId[0];
+ if (map.containsKey(key)) {
+ LOG.error(String.format(
+ "Got duplicate id:(%d, %s), existing entry: (%d, %s).\n%s", key,
+ value, key, map.get(key), DUPLICATE_NAME_ID_DEBUG_INFO));
+ throw new DuplicateNameOrIdException("Got duplicate id.");
+ }
+ if (map.containsValue(nameId[0])) {
+ LOG.error(String.format(
+ "Got duplicate name:(%d, %s), existing entry: (%d, %s) \n%s",
+ key, value, map.inverse().get(value), value,
+ DUPLICATE_NAME_ID_DEBUG_INFO));
+ throw new DuplicateNameOrIdException("Got duplicate name");
}
- LOG.debug("add " + name + ":" + nameId[0] + " id:" + nameId[1]);
map.put(Integer.valueOf(nameId[1]), nameId[0]);
}
- LOG.info("Updated " + name + " map size:" + map.size());
+ LOG.info("Updated " + mapName + " map size:" + map.size());
} catch (IOException e) {
- LOG.error("Can't update map " + name);
+ LOG.error("Can't update " + mapName + " map");
throw e;
} finally {
if (br != null) {
@@ -101,24 +142,26 @@ public class IdUserGroup {
}
}
- synchronized public void updateMaps() {
+ synchronized public void updateMaps() throws IOException {
BiMap<Integer, String> uMap = HashBiMap.create();
BiMap<Integer, String> gMap = HashBiMap.create();
- try {
- if (OS.startsWith("Linux")) {
- updateMapInternal(uMap, "user", LINUX_GET_ALL_USERS_CMD, ":");
- updateMapInternal(gMap, "group", LINUX_GET_ALL_GROUPS_CMD, ":");
- } else if (OS.startsWith("Mac")) {
- updateMapInternal(uMap, "user", MAC_GET_ALL_USERS_CMD, "\\s+");
- updateMapInternal(gMap, "group", MAC_GET_ALL_GROUPS_CMD, "\\s+");
- } else {
- throw new IOException("Platform is not supported:" + OS);
- }
- } catch (IOException e) {
- LOG.error("Can't update maps:" + e);
+ if (!OS.startsWith("Linux") && !OS.startsWith("Mac")) {
+ LOG.error("Platform is not supported:" + OS
+ + ". Can't update user map and group map and"
+ + " 'nobody' will be used for any user and group.");
return;
}
+
+ if (OS.startsWith("Linux")) {
+ updateMapInternal(uMap, "user", LINUX_GET_ALL_USERS_CMD, ":");
+ updateMapInternal(gMap, "group", LINUX_GET_ALL_GROUPS_CMD, ":");
+ } else {
+ // Mac
+ updateMapInternal(uMap, "user", MAC_GET_ALL_USERS_CMD, "\\s+");
+ updateMapInternal(gMap, "group", MAC_GET_ALL_GROUPS_CMD, "\\s+");
+ }
+
uidNameMap = uMap;
gidNameMap = gMap;
lastUpdateTime = System.currentTimeMillis();