You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/07/02 20:32:18 UTC
[01/45] hadoop git commit: YARN-8214. Change default RegistryDNS
port. Contributed by Billie Rinaldi [Forced Update!]
Repository: hadoop
Updated Branches:
refs/heads/HDDS-4 dc5760b0a -> 7ca014424 (forced update)
YARN-8214. Change default RegistryDNS port.
Contributed by Billie Rinaldi
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e586330
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e586330
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e586330
Branch: refs/heads/HDDS-4
Commit: 3e586330eb2c7db0b884fe328b171fb27ce545fa
Parents: 238fe00
Author: Eric Yang <ey...@apache.org>
Authored: Tue Jun 26 14:25:15 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Tue Jun 26 14:25:15 2018 -0400
----------------------------------------------------------------------
.../hadoop/registry/client/api/RegistryConstants.java | 2 +-
.../src/site/markdown/yarn-service/RegistryDNS.md | 10 +++++-----
.../src/site/markdown/yarn-service/ServiceDiscovery.md | 6 +++---
3 files changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e586330/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
index cfa2d65..bd97a5a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
@@ -95,7 +95,7 @@ public interface RegistryConstants {
/**
* Default DNS port number.
*/
- int DEFAULT_DNS_PORT = 5353;
+ int DEFAULT_DNS_PORT = 5335;
/**
* DNSSEC Enabled?
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e586330/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md
index 2307e5c..642d26e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/RegistryDNS.md
@@ -58,7 +58,7 @@ primary DNS server can be configured to forward a zone to the registry DNS
server.
2. The DNS Server exposes a port that can receive both TCP and UDP requests per
DNS standards. The default port for DNS protocols is not in the restricted
-range (5353). However, existing DNS assets may only allow zone forwarding to
+range (5335). However, existing DNS assets may only allow zone forwarding to
non-custom ports. To support this, the registry DNS server can be started in
privileged mode.
@@ -136,7 +136,7 @@ standard DNS requests from users or other DNS servers (for example, DNS servers
RegistryDNS service configured as a forwarder).
## Start the DNS Server
-By default, the DNS server runs on non-privileged port `5353`. Start the server
+By default, the DNS server runs on non-privileged port `5335`. Start the server
with:
```
yarn --daemon start registrydns
@@ -157,7 +157,7 @@ The Registry DNS server reads its configuration properties from the yarn-site.xm
| hadoop.registry.dns.enabled | The DNS functionality is enabled for the cluster. Default is false. |
| hadoop.registry.dns.domain-name | The domain name for Hadoop cluster associated records. |
| hadoop.registry.dns.bind-address | Address associated with the network interface to which the DNS listener should bind. |
-| hadoop.registry.dns.bind-port | The port number for the DNS listener. The default port is 5353. |
+| hadoop.registry.dns.bind-port | The port number for the DNS listener. The default port is 5335. |
| hadoop.registry.dns.dnssec.enabled | Indicates whether the DNSSEC support is enabled. Default is false. |
| hadoop.registry.dns.public-key | The base64 representation of the server’s public key. Leveraged for creating the DNSKEY Record provided for DNSSEC client requests. |
| hadoop.registry.dns.private-key-file | The path to the standard DNSSEC private key file. Must only be readable by the DNS launching identity. See [dnssec-keygen](https://ftp.isc.org/isc/bind/cur/9.9/doc/arm/man.dnssec-keygen.html) documentation. |
@@ -174,10 +174,10 @@ The Registry DNS server reads its configuration properties from the yarn-site.xm
</property>
<property>
- <description>The port number for the DNS listener. The default port is 5353.
+ <description>The port number for the DNS listener. The default port is 5335.
If the standard privileged port 53 is used, make sure start the DNS with jsvc support.</description>
<name>hadoop.registry.dns.bind-port</name>
- <value>5353</value>
+ <value>5335</value>
</property>
<property>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e586330/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md
index 7ee16dd..6b93f3d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceDiscovery.md
@@ -94,10 +94,10 @@ section of [Registry DNS](RegistryDNS.html).
</property>
<property>
- <description>The port number for the DNS listener. The default port is 5353.
+ <description>The port number for the DNS listener. The default port is 5335.
If the standard privileged port 53 is used, make sure start the DNS with jsvc support.</description>
<name>hadoop.registry.dns.bind-port</name>
- <value>5353</value>
+ <value>5335</value>
</property>
<property>
@@ -135,7 +135,7 @@ To configure Registry DNS to serve reverse lookup for `172.17.0.0/24`
</property>
```
## Start Registry DNS Server
-By default, the DNS server runs on non-privileged port `5353`. Start the server
+By default, the DNS server runs on non-privileged port `5335`. Start the server
with:
```
yarn --daemon start registrydns
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[05/45] hadoop git commit: YARN-8464. Async scheduling thread could
be interrupted when there are no NodeManagers in cluster. (Sunil G via
wangda)
Posted by xy...@apache.org.
YARN-8464. Async scheduling thread could be interrupted when there are no NodeManagers in cluster. (Sunil G via wangda)
Change-Id: I4f5f856373378685713e77752ba6cf0988a66065
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bedc4fe0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bedc4fe0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bedc4fe0
Branch: refs/heads/HDDS-4
Commit: bedc4fe0799cf3b161100acc521fc62a97793427
Parents: ada8f63
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Jun 26 19:27:17 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue Jun 26 19:27:17 2018 -0700
----------------------------------------------------------------------
.../scheduler/capacity/CapacityScheduler.java | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bedc4fe0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 50ab70d..54bbf24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -521,7 +521,14 @@ public class CapacityScheduler extends
// First randomize the start point
int current = 0;
Collection<FiCaSchedulerNode> nodes = cs.nodeTracker.getAllNodes();
- int start = random.nextInt(nodes.size());
+
+ // If nodes size is 0 (when there are no node managers registered,
+ // we can return from here itself.
+ int nodeSize = nodes.size();
+ if(nodeSize == 0) {
+ return;
+ }
+ int start = random.nextInt(nodeSize);
// To avoid too verbose DEBUG logging, only print debug log once for
// every 10 secs.
@@ -574,6 +581,7 @@ public class CapacityScheduler extends
@Override
public void run() {
+ int debuggingLogCounter = 0;
while (!Thread.currentThread().isInterrupted()) {
try {
if (!runSchedules.get()) {
@@ -585,6 +593,14 @@ public class CapacityScheduler extends
Thread.sleep(1);
} else{
schedule(cs);
+ if(LOG.isDebugEnabled()) {
+ // Adding a debug log here to ensure that the thread is alive
+ // and running fine.
+ if (debuggingLogCounter++ > 10000) {
+ debuggingLogCounter = 0;
+ LOG.debug("AsyncScheduleThread[" + getName() + "] is running!");
+ }
+ }
}
}
} catch (InterruptedException ie) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[40/45] hadoop git commit: HDDS-7. Enable kerberos auth for Ozone
client in hadoop rpc. Contributed by Ajay Kumar.
Posted by xy...@apache.org.
HDDS-7. Enable kerberos auth for Ozone client in hadoop rpc. Contributed by Ajay Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c73c726
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c73c726
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c73c726
Branch: refs/heads/HDDS-4
Commit: 4c73c7264301f00725b4223f92dd5c024cf97f7c
Parents: 3a0db7f
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri May 18 13:09:17 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Jul 2 13:19:02 2018 -0700
----------------------------------------------------------------------
.../src/test/compose/compose-secure/.env | 17 ++++
.../compose/compose-secure/docker-compose.yaml | 66 ++++++++++++++
.../test/compose/compose-secure/docker-config | 66 ++++++++++++++
.../acceptance/ozone-secure.robot | 95 ++++++++++++++++++++
.../hadoop/ozone/client/rest/RestClient.java | 4 +-
.../hadoop/ozone/client/rpc/RpcClient.java | 6 +-
6 files changed, 248 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c73c726/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
new file mode 100644
index 0000000..3254735
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+OZONEDIR=../../../hadoop-dist/target/ozone
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c73c726/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
new file mode 100644
index 0000000..2661163
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+ ozone.kdc:
+ image: ahadoop/kdc:v1
+ namenode:
+ image: ahadoop/ozone:v1
+ hostname: namenode
+ volumes:
+ - ${OZONEDIR}:/opt/hadoop
+ ports:
+ - 9000:9000
+ environment:
+ ENSURE_NAMENODE_DIR: /data/namenode
+ env_file:
+ - ./docker-config
+ command: ["/opt/hadoop/bin/hdfs","namenode"]
+ datanode:
+ image: ahadoop/ozone:v1
+ hostname: datanode
+ volumes:
+ - ${OZONEDIR}:/opt/hadoop
+ ports:
+ - 9874
+ env_file:
+ - ./docker-config
+ command: ["/opt/hadoop/bin/ozone","datanode"]
+ ksm:
+ image: ahadoop/ozone:v1
+ hostname: ksm
+ volumes:
+ - ${OZONEDIR}:/opt/hadoop
+ ports:
+ - 9874:9874
+ environment:
+ ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+ env_file:
+ - ./docker-config
+ command: ["/opt/hadoop/bin/ozone","ksm"]
+ scm:
+ image: ahadoop/ozone:v1
+ hostname: scm
+ volumes:
+ - ${OZONEDIR}:/opt/hadoop
+ ports:
+ - 9876:9876
+ env_file:
+ - ./docker-config
+ environment:
+ ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
+ command: ["/opt/hadoop/bin/ozone","scm"]
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c73c726/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
new file mode 100644
index 0000000..678c75a
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.scm.names=scm
+OZONE-SITE.XML_ozone.enabled=True
+OZONE-SITE.XML_hdds.scm.datanode.id=/data/datanode.id
+OZONE-SITE.XML_hdds.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
+OZONE-SITE.XML_ozone.handler.type=distributed
+OZONE-SITE.XML_hdds.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
+OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
+OZONE-SITE.XML_ozone.ksm.kerberos.principal=ksm/ksm@EXAMPLE.COM
+OZONE-SITE.XML_ozone.ksm.kerberos.keytab.file=/etc/security/keytabs/ksm.keytab
+OZONE-SITE.XML_ozone.security.enabled=true
+OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.principal=HTTP/scm@EXAMPLE.COM
+OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.principal=HTTP/ksm@EXAMPLE.COM
+OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.scm.client.address=scm
+HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
+HDFS-SITE.XML_dfs.block.access.token.enable=true
+HDFS-SITE.XML_dfs.namenode.kerberos.principal=nn/namenode@EXAMPLE.COM
+HDFS-SITE.XML_dfs.namenode.keytab.file=/etc/security/keytabs/nn.keytab
+HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/datanode@EXAMPLE.COM
+HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
+HDFS-SITE.XML_dfs.namenode.kerberos.internal.spnego.principal=HTTP/namenode@EXAMPLE.COM
+HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM
+HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
+HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
+HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
+CORE-SITE.XML_dfs.data.transfer.protection=authentication
+CORE-SITE.XML_hadoop.security.authentication=kerberos
+CORE-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*)s/.*/root/
+LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
+LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
+
+OZONE_DATANODE_SECURE_USER=root
+CONF_DIR=/etc/security/keytabs
+KERBEROS_KEYTABS=dn nn ksm scm HTTP testuser
+KERBEROS_KEYSTORES=hadoop
+KERBEROS_SERVER=ozone.kdc
+JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/
+JSVC_HOME=/usr/bin
+SLEEP_SECONDS=10
+KERBEROS_ENABLED=true
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c73c726/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
new file mode 100644
index 0000000..4a78980
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
@@ -0,0 +1,95 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation Smoke test to start cluster with docker-compose environments.
+Library OperatingSystem
+Suite Setup Startup Ozone Cluster
+Suite Teardown Teardown Ozone Cluster
+
+*** Variables ***
+${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root"
+${version}
+
+*** Test Cases ***
+
+Daemons are running
+ Is daemon running ksm
+ Is daemon running scm
+ Is daemon running datanode
+ Is daemon running ozone.kdc
+
+Check if datanode is connected to the scm
+ Wait Until Keyword Succeeds 3min 5sec Have healthy datanodes 1
+
+Test rest interface
+ ${result} = Execute on 0 datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
+ Should contain ${result} 201 Created
+ ${result} = Execute on 0 datanode curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
+ Should contain ${result} 201 Created
+ ${result} = Execute on 0 datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
+ Should contain ${result} 200 OK
+ ${result} = Execute on 0 datanode curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
+ Should contain ${result} 200 OK
+
+Test ozone cli
+ ${result} = Execute on 1 datanode ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
+ Should contain ${result} Client cannot authenticate via
+ # Authenticate testuser
+ Execute on 0 datanode kinit -k testuser/datanode@EXAMPLE.COM -t /etc/security/keytabs/testuser.keytab
+ Execute on 0 datanode ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
+ ${result} = Execute on 0 datanode ozone oz -listVolume o3://ksm/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+ Should contain ${result} createdOn
+ Execute on 0 datanode ozone oz -updateVolume o3://ksm/hive -user bill -quota 10TB
+ ${result} = Execute on 0 datanode ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+ Should Be Equal ${result} bill
+
+*** Keywords ***
+
+Startup Ozone Cluster
+ ${rc} ${output} = Run docker compose 0 down
+ ${rc} ${output} = Run docker compose 0 up -d
+ Should Be Equal As Integers ${rc} 0
+ Wait Until Keyword Succeeds 3min 10sec Is Daemon started ksm KSM is listening
+
+Teardown Ozone Cluster
+ Run docker compose 0 down
+
+Is daemon running
+ [arguments] ${name}
+ ${result} = Run docker ps
+ Should contain ${result} _${name}_1
+
+Is Daemon started
+ [arguments] ${name} ${expression}
+ ${rc} ${result} = Run docker compose 0 logs
+ Should contain ${result} ${expression}
+
+Have healthy datanodes
+ [arguments] ${requirednodes}
+ ${result} = Execute on 0 scm curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value'
+ Should Be Equal ${result} ${requirednodes}
+
+Execute on
+ [arguments] ${expected_rc} ${componentname} ${command}
+ ${rc} ${return} = Run docker compose ${expected_rc} exec ${componentname} ${command}
+ [return] ${return}
+
+Run docker compose
+ [arguments] ${expected_rc} ${command}
+ Set Environment Variable OZONEDIR ${basedir}/hadoop-dist/target/ozone
+ ${rc} ${output} = Run And Return Rc And Output docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml ${command}
+ Should Be Equal As Integers ${rc} ${expected_rc}
+ [return] ${rc} ${output}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c73c726/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index 6e3f617..5585e4c 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -115,7 +115,7 @@ public class RestClient implements ClientProtocol {
try {
Preconditions.checkNotNull(conf);
this.conf = conf;
-
+ this.ugi = UserGroupInformation.getCurrentUser();
long socketTimeout = conf.getTimeDuration(
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
@@ -151,7 +151,7 @@ public class RestClient implements ClientProtocol {
.setConnectTimeout(Math.toIntExact(connectionTimeout))
.build())
.build();
- this.ugi = UserGroupInformation.getCurrentUser();
+
this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c73c726/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 43b94a1..f8e92b2 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -122,8 +122,7 @@ public class RpcClient implements ClientProtocol {
this.keySpaceManagerClient =
new KeySpaceManagerProtocolClientSideTranslatorPB(
RPC.getProxy(KeySpaceManagerProtocolPB.class, ksmVersion,
- ksmAddress, UserGroupInformation.getCurrentUser(), conf,
- NetUtils.getDefaultSocketFactory(conf),
+ ksmAddress, ugi, conf, NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)));
long scmVersion =
@@ -134,8 +133,7 @@ public class RpcClient implements ClientProtocol {
this.storageContainerLocationClient =
new StorageContainerLocationProtocolClientSideTranslatorPB(
RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion,
- scmAddress, UserGroupInformation.getCurrentUser(), conf,
- NetUtils.getDefaultSocketFactory(conf),
+ scmAddress, ugi, conf, NetUtils.getDefaultSocketFactory(conf),
Client.getRpcTimeout(conf)));
this.xceiverClientManager = new XceiverClientManager(conf);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[34/45] hadoop git commit: HADOOP-15554. Improve JIT performance for
Configuration parsing. Contributed by Todd Lipcon.
Posted by xy...@apache.org.
HADOOP-15554. Improve JIT performance for Configuration parsing. Contributed by Todd Lipcon.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f51da9c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f51da9c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f51da9c4
Branch: refs/heads/HDDS-4
Commit: f51da9c4d1423c2ac92eb4f40e973264e7e968cc
Parents: 5d748bd
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jul 2 18:31:21 2018 +0200
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jul 2 18:31:21 2018 +0200
----------------------------------------------------------------------
.../org/apache/hadoop/conf/Configuration.java | 458 +++++++++++--------
1 file changed, 276 insertions(+), 182 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f51da9c4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index b1125e5..a78e311 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -41,6 +41,7 @@ import java.io.Writer;
import java.lang.ref.WeakReference;
import java.net.InetSocketAddress;
import java.net.JarURLConnection;
+import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
@@ -2981,187 +2982,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
if(returnCachedProperties) {
toAddTo = new Properties();
}
- DeprecationContext deprecations = deprecationContext.get();
- StringBuilder token = new StringBuilder();
- String confName = null;
- String confValue = null;
- String confInclude = null;
- String confTag = null;
- boolean confFinal = false;
- boolean fallbackAllowed = false;
- boolean fallbackEntered = false;
- boolean parseToken = false;
- LinkedList<String> confSource = new LinkedList<String>();
-
- while (reader.hasNext()) {
- switch (reader.next()) {
- case XMLStreamConstants.START_ELEMENT:
- switch (reader.getLocalName()) {
- case "property":
- confName = null;
- confValue = null;
- confFinal = false;
- confTag = null;
- confSource.clear();
-
- // First test for short format configuration
- int attrCount = reader.getAttributeCount();
- for (int i = 0; i < attrCount; i++) {
- String propertyAttr = reader.getAttributeLocalName(i);
- if ("name".equals(propertyAttr)) {
- confName = StringInterner.weakIntern(
- reader.getAttributeValue(i));
- } else if ("value".equals(propertyAttr)) {
- confValue = StringInterner.weakIntern(
- reader.getAttributeValue(i));
- } else if ("final".equals(propertyAttr)) {
- confFinal = "true".equals(reader.getAttributeValue(i));
- } else if ("source".equals(propertyAttr)) {
- confSource.add(StringInterner.weakIntern(
- reader.getAttributeValue(i)));
- } else if ("tag".equals(propertyAttr)) {
- confTag = StringInterner
- .weakIntern(reader.getAttributeValue(i));
- }
- }
- break;
- case "name":
- case "value":
- case "final":
- case "source":
- case "tag":
- parseToken = true;
- token.setLength(0);
- break;
- case "include":
- // Determine href for xi:include
- confInclude = null;
- attrCount = reader.getAttributeCount();
- for (int i = 0; i < attrCount; i++) {
- String attrName = reader.getAttributeLocalName(i);
- if ("href".equals(attrName)) {
- confInclude = reader.getAttributeValue(i);
- }
- }
- if (confInclude == null) {
- break;
- }
- if (isRestricted) {
- throw new RuntimeException("Error parsing resource " + wrapper
- + ": XInclude is not supported for restricted resources");
- }
- // Determine if the included resource is a classpath resource
- // otherwise fallback to a file resource
- // xi:include are treated as inline and retain current source
- URL include = getResource(confInclude);
- if (include != null) {
- Resource classpathResource = new Resource(include, name,
- wrapper.isParserRestricted());
- loadResource(properties, classpathResource, quiet);
- } else {
- URL url;
- try {
- url = new URL(confInclude);
- url.openConnection().connect();
- } catch (IOException ioe) {
- File href = new File(confInclude);
- if (!href.isAbsolute()) {
- // Included resources are relative to the current resource
- File baseFile = new File(name).getParentFile();
- href = new File(baseFile, href.getPath());
- }
- if (!href.exists()) {
- // Resource errors are non-fatal iff there is 1 xi:fallback
- fallbackAllowed = true;
- break;
- }
- url = href.toURI().toURL();
- }
- Resource uriResource = new Resource(url, name,
- wrapper.isParserRestricted());
- loadResource(properties, uriResource, quiet);
- }
- break;
- case "fallback":
- fallbackEntered = true;
- break;
- case "configuration":
- break;
- default:
- break;
- }
- break;
-
- case XMLStreamConstants.CHARACTERS:
- if (parseToken) {
- char[] text = reader.getTextCharacters();
- token.append(text, reader.getTextStart(), reader.getTextLength());
- }
- break;
-
- case XMLStreamConstants.END_ELEMENT:
- switch (reader.getLocalName()) {
- case "name":
- if (token.length() > 0) {
- confName = StringInterner.weakIntern(token.toString().trim());
- }
- break;
- case "value":
- if (token.length() > 0) {
- confValue = StringInterner.weakIntern(token.toString());
- }
- break;
- case "final":
- confFinal = "true".equals(token.toString());
- break;
- case "source":
- confSource.add(StringInterner.weakIntern(token.toString()));
- break;
- case "tag":
- if (token.length() > 0) {
- confTag = StringInterner.weakIntern(token.toString());
- }
- break;
- case "include":
- if (fallbackAllowed && !fallbackEntered) {
- throw new IOException("Fetch fail on include for '"
- + confInclude + "' with no fallback while loading '"
- + name + "'");
- }
- fallbackAllowed = false;
- fallbackEntered = false;
- break;
- case "property":
- if (confName == null || (!fallbackAllowed && fallbackEntered)) {
- break;
- }
- confSource.add(name);
- // Read tags and put them in propertyTagsMap
- if (confTag != null) {
- readTagFromConfig(confTag, confName, confValue, confSource);
- }
-
- DeprecatedKeyInfo keyInfo =
- deprecations.getDeprecatedKeyMap().get(confName);
- if (keyInfo != null) {
- keyInfo.clearAccessed();
- for (String key : keyInfo.newKeys) {
- // update new keys with deprecated key's value
- loadProperty(toAddTo, name, key, confValue, confFinal,
- confSource.toArray(new String[confSource.size()]));
- }
- } else {
- loadProperty(toAddTo, name, confName, confValue, confFinal,
- confSource.toArray(new String[confSource.size()]));
- }
- break;
- default:
- break;
- }
- default:
- break;
- }
+ List<ParsedItem> items = new Parser(reader, wrapper, quiet).parse();
+ for (ParsedItem item : items) {
+ loadProperty(toAddTo, item.name, item.key, item.value,
+ item.isFinal, item.sources);
}
reader.close();
@@ -3179,6 +3004,275 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
}
}
+ private static class ParsedItem {
+ String name;
+ String key;
+ String value;
+ boolean isFinal;
+ String[] sources;
+
+ ParsedItem(String name, String key, String value,
+ boolean isFinal, String[] sources) {
+ this.name = name;
+ this.key = key;
+ this.value = value;
+ this.isFinal = isFinal;
+ this.sources = sources;
+ }
+ }
+
+ /**
+ * Parser to consume SAX stream of XML elements from a Configuration.
+ */
+ private class Parser {
+ private final XMLStreamReader2 reader;
+ private final Resource wrapper;
+ private final String name;
+ private final String[] nameSingletonArray;
+ private final boolean isRestricted;
+ private final boolean quiet;
+
+ DeprecationContext deprecations = deprecationContext.get();
+
+ private StringBuilder token = new StringBuilder();
+ private String confName = null;
+ private String confValue = null;
+ private String confInclude = null;
+ private String confTag = null;
+ private boolean confFinal = false;
+ private boolean fallbackAllowed = false;
+ private boolean fallbackEntered = false;
+ private boolean parseToken = false;
+ private List<String> confSource = new ArrayList<>();
+ private List<ParsedItem> results = new ArrayList<>();
+
+ Parser(XMLStreamReader2 reader,
+ Resource wrapper,
+ boolean quiet) {
+ this.reader = reader;
+ this.wrapper = wrapper;
+ this.name = wrapper.getName();
+ this.nameSingletonArray = new String[]{ name };
+ this.isRestricted = wrapper.isParserRestricted();
+ this.quiet = quiet;
+
+ }
+
+ List<ParsedItem> parse() throws IOException, XMLStreamException {
+ while (reader.hasNext()) {
+ parseNext();
+ }
+ return results;
+ }
+
+ private void handleStartElement() throws MalformedURLException {
+ switch (reader.getLocalName()) {
+ case "property":
+ handleStartProperty();
+ break;
+
+ case "name":
+ case "value":
+ case "final":
+ case "source":
+ case "tag":
+ parseToken = true;
+ token.setLength(0);
+ break;
+ case "include":
+ handleInclude();
+ break;
+ case "fallback":
+ fallbackEntered = true;
+ break;
+ case "configuration":
+ break;
+ default:
+ break;
+ }
+ }
+
+ private void handleStartProperty() {
+ confName = null;
+ confValue = null;
+ confFinal = false;
+ confTag = null;
+ confSource.clear();
+
+ // First test for short format configuration
+ int attrCount = reader.getAttributeCount();
+ for (int i = 0; i < attrCount; i++) {
+ String propertyAttr = reader.getAttributeLocalName(i);
+ if ("name".equals(propertyAttr)) {
+ confName = StringInterner.weakIntern(
+ reader.getAttributeValue(i));
+ } else if ("value".equals(propertyAttr)) {
+ confValue = StringInterner.weakIntern(
+ reader.getAttributeValue(i));
+ } else if ("final".equals(propertyAttr)) {
+ confFinal = "true".equals(reader.getAttributeValue(i));
+ } else if ("source".equals(propertyAttr)) {
+ confSource.add(StringInterner.weakIntern(
+ reader.getAttributeValue(i)));
+ } else if ("tag".equals(propertyAttr)) {
+ confTag = StringInterner
+ .weakIntern(reader.getAttributeValue(i));
+ }
+ }
+ }
+
+ private void handleInclude() throws MalformedURLException {
+ // Determine href for xi:include
+ confInclude = null;
+ int attrCount = reader.getAttributeCount();
+ for (int i = 0; i < attrCount; i++) {
+ String attrName = reader.getAttributeLocalName(i);
+ if ("href".equals(attrName)) {
+ confInclude = reader.getAttributeValue(i);
+ }
+ }
+ if (confInclude == null) {
+ return;
+ }
+ if (isRestricted) {
+ throw new RuntimeException("Error parsing resource " + wrapper
+ + ": XInclude is not supported for restricted resources");
+ }
+ // Determine if the included resource is a classpath resource
+ // otherwise fallback to a file resource
+ // xi:include are treated as inline and retain current source
+ URL include = getResource(confInclude);
+ if (include != null) {
+ Resource classpathResource = new Resource(include, name,
+ wrapper.isParserRestricted());
+ // This is only called recursively while the lock is already held
+ // by this thread, but synchronizing avoids a findbugs warning.
+ synchronized (Configuration.this) {
+ loadResource(properties, classpathResource, quiet);
+ }
+ } else {
+ URL url;
+ try {
+ url = new URL(confInclude);
+ url.openConnection().connect();
+ } catch (IOException ioe) {
+ File href = new File(confInclude);
+ if (!href.isAbsolute()) {
+ // Included resources are relative to the current resource
+ File baseFile = new File(name).getParentFile();
+ href = new File(baseFile, href.getPath());
+ }
+ if (!href.exists()) {
+ // Resource errors are non-fatal iff there is 1 xi:fallback
+ fallbackAllowed = true;
+ return;
+ }
+ url = href.toURI().toURL();
+ }
+ Resource uriResource = new Resource(url, name,
+ wrapper.isParserRestricted());
+ // This is only called recursively while the lock is already held
+ // by this thread, but synchronizing avoids a findbugs warning.
+ synchronized (Configuration.this) {
+ loadResource(properties, uriResource, quiet);
+ }
+ }
+ }
+
+ void handleEndElement() throws IOException {
+ String tokenStr = token.toString();
+ switch (reader.getLocalName()) {
+ case "name":
+ if (token.length() > 0) {
+ confName = StringInterner.weakIntern(tokenStr.trim());
+ }
+ break;
+ case "value":
+ if (token.length() > 0) {
+ confValue = StringInterner.weakIntern(tokenStr);
+ }
+ break;
+ case "final":
+ confFinal = "true".equals(tokenStr);
+ break;
+ case "source":
+ confSource.add(StringInterner.weakIntern(tokenStr));
+ break;
+ case "tag":
+ if (token.length() > 0) {
+ confTag = StringInterner.weakIntern(tokenStr);
+ }
+ break;
+ case "include":
+ if (fallbackAllowed && !fallbackEntered) {
+ throw new IOException("Fetch fail on include for '"
+ + confInclude + "' with no fallback while loading '"
+ + name + "'");
+ }
+ fallbackAllowed = false;
+ fallbackEntered = false;
+ break;
+ case "property":
+ handleEndProperty();
+ break;
+ default:
+ break;
+ }
+ }
+
+ void handleEndProperty() {
+ if (confName == null || (!fallbackAllowed && fallbackEntered)) {
+ return;
+ }
+ String[] confSourceArray;
+ if (confSource.isEmpty()) {
+ confSourceArray = nameSingletonArray;
+ } else {
+ confSource.add(name);
+ confSourceArray = confSource.toArray(new String[confSource.size()]);
+ }
+
+ // Read tags and put them in propertyTagsMap
+ if (confTag != null) {
+ readTagFromConfig(confTag, confName, confValue, confSourceArray);
+ }
+
+ DeprecatedKeyInfo keyInfo =
+ deprecations.getDeprecatedKeyMap().get(confName);
+
+ if (keyInfo != null) {
+ keyInfo.clearAccessed();
+ for (String key : keyInfo.newKeys) {
+ // update new keys with deprecated key's value
+ results.add(new ParsedItem(
+ name, key, confValue, confFinal, confSourceArray));
+ }
+ } else {
+ results.add(new ParsedItem(name, confName, confValue, confFinal,
+ confSourceArray));
+ }
+ }
+
+ void parseNext() throws IOException, XMLStreamException {
+ switch (reader.next()) {
+ case XMLStreamConstants.START_ELEMENT:
+ handleStartElement();
+ break;
+ case XMLStreamConstants.CHARACTERS:
+ if (parseToken) {
+ char[] text = reader.getTextCharacters();
+ token.append(text, reader.getTextStart(), reader.getTextLength());
+ }
+ break;
+ case XMLStreamConstants.END_ELEMENT:
+ handleEndElement();
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
/**
* Add tags defined in HADOOP_TAGS_SYSTEM, HADOOP_TAGS_CUSTOM.
* @param prop
@@ -3225,7 +3319,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* @param confSource
*/
private void readTagFromConfig(String attributeValue, String confName, String
- confValue, List<String> confSource) {
+ confValue, String[] confSource) {
for (String tagStr : attributeValue.split(",")) {
try {
tagStr = tagStr.trim();
@@ -3243,7 +3337,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
} catch (Exception ex) {
// Log the exception at trace level.
LOG.trace("Tag '{}' for property:{} Source:{}", tagStr, confName,
- Arrays.toString(confSource.toArray()), ex);
+ confSource, ex);
}
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[25/45] hadoop git commit: HDFS-13707. [PROVIDED Storage] Fix failing
integration tests in ITestProvidedImplementation. Contributed by Virajith
Jalaparti.
Posted by xy...@apache.org.
HDFS-13707. [PROVIDED Storage] Fix failing integration tests in ITestProvidedImplementation. Contributed by Virajith Jalaparti.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73746c5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73746c5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73746c5d
Branch: refs/heads/HDDS-4
Commit: 73746c5da76d5e39df131534a1ec35dfc5d2529b
Parents: e4d7227
Author: Inigo Goiri <in...@apache.org>
Authored: Fri Jun 29 09:56:13 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri Jun 29 09:56:13 2018 -0700
----------------------------------------------------------------------
.../common/blockaliasmap/impl/TextFileRegionAliasMap.java | 5 +++--
.../hdfs/server/namenode/ITestProvidedImplementation.java | 2 +-
2 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/73746c5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
index abe92e3..4d65142 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TextFileRegionAliasMap.java
@@ -28,6 +28,7 @@ import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.nio.charset.Charset;
import java.util.ArrayList;
+import java.util.Base64;
import java.util.Iterator;
import java.util.Map;
import java.util.Collections;
@@ -359,7 +360,7 @@ public class TextFileRegionAliasMap
}
byte[] nonce = new byte[0];
if (f.length == 6) {
- nonce = f[5].getBytes(Charset.forName("UTF-8"));
+ nonce = Base64.getDecoder().decode(f[5]);
}
return new FileRegion(Long.parseLong(f[0]), new Path(f[1]),
Long.parseLong(f[2]), Long.parseLong(f[3]), Long.parseLong(f[4]),
@@ -451,7 +452,7 @@ public class TextFileRegionAliasMap
out.append(Long.toString(block.getGenerationStamp()));
if (psl.getNonce().length > 0) {
out.append(delim)
- .append(new String(psl.getNonce(), Charset.forName("UTF-8")));
+ .append(Base64.getEncoder().encodeToString(psl.getNonce()));
}
out.append("\n");
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/73746c5d/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
index 49c9bcf..7d3ab0e 100644
--- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
@@ -132,7 +132,7 @@ public class ITestProvidedImplementation {
nnDirPath.toString());
conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_READ_FILE,
new Path(nnDirPath, fileNameFromBlockPoolID(bpid)).toString());
- conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_DELIMITER, ",");
+ conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_TEXT_DELIMITER, "\t");
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR_PROVIDED,
new File(providedPath.toUri()).toString());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[26/45] hadoop git commit: YARN-8455. Add basic ACL check for all ATS
v2 REST APIs. Contributed by Rohith Sharma K S.
Posted by xy...@apache.org.
YARN-8455. Add basic ACL check for all ATS v2 REST APIs. Contributed by Rohith Sharma K S.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/469b29c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/469b29c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/469b29c0
Branch: refs/heads/HDDS-4
Commit: 469b29c0817b7bf1902c9195c4f8d031a909e1c9
Parents: 73746c5
Author: Sunil G <su...@apache.org>
Authored: Fri Jun 29 10:02:53 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Fri Jun 29 10:02:53 2018 -0700
----------------------------------------------------------------------
.../reader/TimelineFromIdConverter.java | 93 +++++++++
.../reader/TimelineReaderWebServices.java | 198 +++++++++++++++----
.../TestTimelineReaderWebServicesBasicAcl.java | 154 +++++++++++++++
3 files changed, 407 insertions(+), 38 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/469b29c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineFromIdConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineFromIdConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineFromIdConverter.java
new file mode 100644
index 0000000..5f5f0b1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineFromIdConverter.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import java.util.List;
+
+/**
+ * Used for decoding FROM_ID
+ */
+enum TimelineFromIdConverter {
+
+ APPLICATION_FROMID {
+ @Override TimelineReaderContext decodeUID(String fromId) throws Exception {
+ if (fromId == null) {
+ return null;
+ }
+
+ List<String> appTupleList = TimelineReaderUtils.split(fromId);
+ if (appTupleList == null || appTupleList.size() != 5) {
+ throw new IllegalArgumentException(
+ "Invalid row key for application table.");
+ }
+
+ return new TimelineReaderContext(appTupleList.get(0), appTupleList.get(1),
+ appTupleList.get(2), Long.parseLong(appTupleList.get(3)),
+ appTupleList.get(4), null, null);
+ }
+ },
+
+ SUB_APPLICATION_ENTITY_FROMID {
+ @Override TimelineReaderContext decodeUID(String fromId) throws Exception {
+ if (fromId == null) {
+ return null;
+ }
+ List<String> split = TimelineReaderUtils.split(fromId);
+ if (split == null || split.size() != 6) {
+ throw new IllegalArgumentException(
+ "Invalid row key for sub app table.");
+ }
+
+ String subAppUserId = split.get(0);
+ String clusterId = split.get(1);
+ String entityType = split.get(2);
+ Long entityIdPrefix = Long.valueOf(split.get(3));
+ String entityId = split.get(4);
+ String userId = split.get(5);
+ return new TimelineReaderContext(clusterId, userId, null, null, null,
+ entityType, entityIdPrefix, entityId, subAppUserId);
+ }
+ },
+
+ GENERIC_ENTITY_FROMID {
+ @Override TimelineReaderContext decodeUID(String fromId) throws Exception {
+ if (fromId == null) {
+ return null;
+ }
+ List<String> split = TimelineReaderUtils.split(fromId);
+ if (split == null || split.size() != 8) {
+ throw new IllegalArgumentException("Invalid row key for entity table.");
+ }
+ Long flowRunId = Long.valueOf(split.get(3));
+ Long entityIdPrefix = Long.valueOf(split.get(6));
+ return new TimelineReaderContext(split.get(0), split.get(1), split.get(2),
+ flowRunId, split.get(4), split.get(5), entityIdPrefix, split.get(7));
+ }
+ };
+
+ /**
+ * Decodes FROM_ID depending on FROM_ID implementation.
+ *
+ * @param fromId FROM_ID to be decoded.
+ * @return a {@link TimelineReaderContext} object if FROM_ID passed can be
+ * decoded, null otherwise.
+ * @throws Exception if any problem occurs while decoding.
+ */
+ abstract TimelineReaderContext decodeUID(String fromId) throws Exception;
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/469b29c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index 7bf66b0..7f96bfb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
import org.apache.hadoop.yarn.webapp.BadRequestException;
+import org.apache.hadoop.yarn.webapp.ForbiddenException;
import org.apache.hadoop.yarn.webapp.NotFoundException;
import com.google.common.annotations.VisibleForTesting;
@@ -188,6 +189,8 @@ public class TimelineReaderWebServices {
"Filter Parsing failed." : e.getMessage());
} else if (e instanceof BadRequestException) {
throw (BadRequestException)e;
+ } else if (e instanceof ForbiddenException) {
+ throw (ForbiddenException) e;
} else {
LOG.error("Error while processing REST request", e);
throw new WebApplicationException(e,
@@ -339,6 +342,7 @@ public class TimelineReaderWebServices {
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
+ checkAccessForGenericEntities(entities, callerUGI, entityType);
} catch (Exception e) {
handleException(e, url, startTime,
"createdTime start/end or limit or flowrunid");
@@ -607,13 +611,15 @@ public class TimelineReaderWebServices {
.createTimelineReaderContext(clusterId, userId, flowName, flowRunId,
appId, entityType, null, null);
entities = timelineReaderManager.getEntities(context,
- TimelineReaderWebServicesUtils.createTimelineEntityFilters(
- limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
- infofilters, conffilters, metricfilters, eventfilters,
- fromId),
- TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
- confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
- metricsTimeStart, metricsTimeEnd));
+ TimelineReaderWebServicesUtils
+ .createTimelineEntityFilters(limit, createdTimeStart,
+ createdTimeEnd, relatesTo, isRelatedTo, infofilters,
+ conffilters, metricfilters, eventfilters, fromId),
+ TimelineReaderWebServicesUtils
+ .createTimelineDataToRetrieve(confsToRetrieve, metricsToRetrieve,
+ fields, metricsLimit, metricsTimeStart, metricsTimeEnd));
+
+ checkAccessForGenericEntities(entities, callerUGI, entityType);
} catch (Exception e) {
handleException(e, url, startTime,
"createdTime start/end or limit or flowrunid");
@@ -704,6 +710,7 @@ public class TimelineReaderWebServices {
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
+ checkAccessForGenericEntity(entity, callerUGI);
} catch (Exception e) {
handleException(e, url, startTime, "flowrunid");
}
@@ -893,6 +900,7 @@ public class TimelineReaderWebServices {
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
+ checkAccessForGenericEntity(entity, callerUGI);
} catch (Exception e) {
handleException(e, url, startTime, "flowrunid");
}
@@ -956,6 +964,8 @@ public class TimelineReaderWebServices {
if (context == null) {
throw new BadRequestException("Incorrect UID " + uId);
}
+ // TODO to be removed or modified once ACL story is played
+ checkAccess(timelineReaderManager, callerUGI, context.getUserId());
context.setEntityType(TimelineEntityType.YARN_FLOW_RUN.toString());
entity = timelineReaderManager.getEntity(context,
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
@@ -1063,12 +1073,16 @@ public class TimelineReaderWebServices {
TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
TimelineEntity entity = null;
try {
- entity = timelineReaderManager.getEntity(
- TimelineReaderWebServicesUtils.createTimelineReaderContext(
- clusterId, userId, flowName, flowRunId, null,
- TimelineEntityType.YARN_FLOW_RUN.toString(), null, null),
- TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
- null, metricsToRetrieve, null, null, null, null));
+ TimelineReaderContext context = TimelineReaderWebServicesUtils
+ .createTimelineReaderContext(clusterId, userId, flowName, flowRunId,
+ null, TimelineEntityType.YARN_FLOW_RUN.toString(), null, null);
+ // TODO to be removed or modified once ACL story is played
+ checkAccess(timelineReaderManager, callerUGI, context.getUserId());
+
+ entity = timelineReaderManager.getEntity(context,
+ TimelineReaderWebServicesUtils
+ .createTimelineDataToRetrieve(null, metricsToRetrieve, null, null,
+ null, null));
} catch (Exception e) {
handleException(e, url, startTime, "flowrunid");
}
@@ -1156,6 +1170,8 @@ public class TimelineReaderWebServices {
if (context == null) {
throw new BadRequestException("Incorrect UID " + uId);
}
+ // TODO to be removed or modified once ACL story is played
+ checkAccess(timelineReaderManager, callerUGI, context.getUserId());
context.setEntityType(TimelineEntityType.YARN_FLOW_RUN.toString());
entities = timelineReaderManager.getEntities(context,
TimelineReaderWebServicesUtils.createTimelineEntityFilters(
@@ -1304,15 +1320,21 @@ public class TimelineReaderWebServices {
TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
Set<TimelineEntity> entities = null;
try {
- entities = timelineReaderManager.getEntities(
- TimelineReaderWebServicesUtils.createTimelineReaderContext(
- clusterId, userId, flowName, null, null,
- TimelineEntityType.YARN_FLOW_RUN.toString(), null, null),
- TimelineReaderWebServicesUtils.createTimelineEntityFilters(
- limit, createdTimeStart, createdTimeEnd, null, null, null,
- null, null, null, fromId),
- TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
- null, metricsToRetrieve, fields, null, null, null));
+ TimelineReaderContext timelineReaderContext = TimelineReaderWebServicesUtils
+ .createTimelineReaderContext(clusterId, userId, flowName, null,
+ null, TimelineEntityType.YARN_FLOW_RUN.toString(), null,
+ null);
+ // TODO to be removed or modified once ACL story is played
+ checkAccess(timelineReaderManager, callerUGI,
+ timelineReaderContext.getUserId());
+
+ entities = timelineReaderManager.getEntities(timelineReaderContext,
+ TimelineReaderWebServicesUtils
+ .createTimelineEntityFilters(limit, createdTimeStart,
+ createdTimeEnd, null, null, null, null, null, null, fromId),
+ TimelineReaderWebServicesUtils
+ .createTimelineDataToRetrieve(null, metricsToRetrieve, fields,
+ null, null, null));
} catch (Exception e) {
handleException(e, url, startTime,
"createdTime start/end or limit or fromId");
@@ -1435,7 +1457,6 @@ public class TimelineReaderWebServices {
long startTime = Time.monotonicNow();
init(res);
TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
- Configuration config = timelineReaderManager.getConfig();
Set<TimelineEntity> entities = null;
try {
DateRange range = parseDateRange(dateRange);
@@ -1455,19 +1476,9 @@ public class TimelineReaderWebServices {
long endTime = Time.monotonicNow();
if (entities == null) {
entities = Collections.emptySet();
- } else if (isDisplayEntityPerUserFilterEnabled(config)) {
- Set<TimelineEntity> userEntities = new LinkedHashSet<>();
- userEntities.addAll(entities);
- for (TimelineEntity entity : userEntities) {
- if (entity.getInfo() != null) {
- String userId =
- (String) entity.getInfo().get(FlowActivityEntity.USER_INFO_KEY);
- if (!validateAuthUserWithEntityUser(timelineReaderManager, callerUGI,
- userId)) {
- entities.remove(entity);
- }
- }
- }
+ } else {
+ checkAccess(timelineReaderManager, callerUGI, entities,
+ FlowActivityEntity.USER_INFO_KEY, true);
}
LOG.info("Processed URL " + url +
" (Took " + (endTime - startTime) + " ms.)");
@@ -1552,6 +1563,7 @@ public class TimelineReaderWebServices {
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
+ checkAccessForAppEntity(entity, callerUGI);
} catch (Exception e) {
handleException(e, url, startTime, "flowrunid");
}
@@ -1722,6 +1734,7 @@ public class TimelineReaderWebServices {
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
+ checkAccessForAppEntity(entity, callerUGI);
} catch (Exception e) {
handleException(e, url, startTime, "flowrunid");
}
@@ -1852,6 +1865,8 @@ public class TimelineReaderWebServices {
if (context == null) {
throw new BadRequestException("Incorrect UID " + uId);
}
+ // TODO to be removed or modified once ACL story is played
+ checkAccess(timelineReaderManager, callerUGI, context.getUserId());
context.setEntityType(TimelineEntityType.YARN_APPLICATION.toString());
entities = timelineReaderManager.getEntities(context,
TimelineReaderWebServicesUtils.createTimelineEntityFilters(
@@ -3343,6 +3358,7 @@ public class TimelineReaderWebServices {
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
+ checkAccessForSubAppEntities(entities,callerUGI);
} catch (Exception e) {
handleException(e, url, startTime,
"createdTime start/end or limit");
@@ -3410,6 +3426,7 @@ public class TimelineReaderWebServices {
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
+ checkAccessForSubAppEntities(entities,callerUGI);
} catch (Exception e) {
handleException(e, url, startTime, "");
}
@@ -3422,7 +3439,7 @@ public class TimelineReaderWebServices {
return entities;
}
- private boolean isDisplayEntityPerUserFilterEnabled(Configuration config) {
+ static boolean isDisplayEntityPerUserFilterEnabled(Configuration config) {
return !config
.getBoolean(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READ_AUTH_ENABLED)
@@ -3430,8 +3447,76 @@ public class TimelineReaderWebServices {
.getBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, false);
}
+ // TODO to be removed or modified once ACL story is played
+ private void checkAccessForSubAppEntities(Set<TimelineEntity> entities,
+ UserGroupInformation callerUGI) throws Exception {
+ if (entities != null && entities.size() > 0
+ && isDisplayEntityPerUserFilterEnabled(
+ getTimelineReaderManager().getConfig())) {
+ TimelineReaderContext timelineReaderContext = null;
+ TimelineEntity entity = entities.iterator().next();
+ String fromId =
+ (String) entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
+ timelineReaderContext =
+ TimelineFromIdConverter.SUB_APPLICATION_ENTITY_FROMID
+ .decodeUID(fromId);
+ checkAccess(getTimelineReaderManager(), callerUGI,
+ timelineReaderContext.getDoAsUser());
+ }
+ }
+
+ // TODO to be removed or modified once ACL story is played
+ private void checkAccessForAppEntity(TimelineEntity entity,
+ UserGroupInformation callerUGI) throws Exception {
+ if (entity != null && isDisplayEntityPerUserFilterEnabled(
+ getTimelineReaderManager().getConfig())) {
+ String fromId =
+ (String) entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
+ TimelineReaderContext timelineReaderContext =
+ TimelineFromIdConverter.APPLICATION_FROMID.decodeUID(fromId);
+ checkAccess(getTimelineReaderManager(), callerUGI,
+ timelineReaderContext.getUserId());
+ }
+ }
+
+ // TODO to be removed or modified once ACL story is played
+ private void checkAccessForGenericEntity(TimelineEntity entity,
+ UserGroupInformation callerUGI) throws Exception {
+ if (entity != null && isDisplayEntityPerUserFilterEnabled(
+ getTimelineReaderManager().getConfig())) {
+ String fromId =
+ (String) entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
+ TimelineReaderContext timelineReaderContext =
+ TimelineFromIdConverter.GENERIC_ENTITY_FROMID.decodeUID(fromId);
+ checkAccess(getTimelineReaderManager(), callerUGI,
+ timelineReaderContext.getUserId());
+ }
+ }
+
+ // TODO to be removed or modified once ACL story is played
+ private void checkAccessForGenericEntities(Set<TimelineEntity> entities,
+ UserGroupInformation callerUGI, String entityType) throws Exception {
+ if (entities != null && entities.size() > 0
+ && isDisplayEntityPerUserFilterEnabled(
+ getTimelineReaderManager().getConfig())) {
+ TimelineReaderContext timelineReaderContext = null;
+ TimelineEntity entity = entities.iterator().next();
+ String uid =
+ (String) entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
+ if (TimelineEntityType.YARN_APPLICATION.matches(entityType)) {
+ timelineReaderContext =
+ TimelineFromIdConverter.APPLICATION_FROMID.decodeUID(uid);
+ } else {
+ timelineReaderContext =
+ TimelineFromIdConverter.GENERIC_ENTITY_FROMID.decodeUID(uid);
+ }
+ checkAccess(getTimelineReaderManager(), callerUGI,
+ timelineReaderContext.getUserId());
+ }
+ }
+
// TODO to be removed/modified once ACL story has played
- private boolean validateAuthUserWithEntityUser(
+ static boolean validateAuthUserWithEntityUser(
TimelineReaderManager readerManager, UserGroupInformation ugi,
String entityUser) {
String authUser = TimelineReaderWebServicesUtils.getUserName(ugi);
@@ -3442,4 +3527,41 @@ public class TimelineReaderWebServices {
}
return (readerManager.checkAccess(ugi) || authUser.equals(requestedUser));
}
+
+ // TODO to be removed/modified once ACL story has played
+ static boolean checkAccess(TimelineReaderManager readerManager,
+ UserGroupInformation ugi, String entityUser) {
+ if (isDisplayEntityPerUserFilterEnabled(readerManager.getConfig())) {
+ if (!validateAuthUserWithEntityUser(readerManager, ugi, entityUser)) {
+ String userName = ugi.getShortUserName();
+ String msg = "User " + userName
+ + " is not allowed to read TimelineService V2 data.";
+ throw new ForbiddenException(msg);
+ }
+ }
+ return true;
+ }
+
+ // TODO to be removed or modified once ACL story is played
+ static void checkAccess(TimelineReaderManager readerManager,
+ UserGroupInformation callerUGI, Set<TimelineEntity> entities,
+ String entityUserKey, boolean verifyForAllEntity) {
+ if (entities.size() > 0 && isDisplayEntityPerUserFilterEnabled(
+ readerManager.getConfig())) {
+ Set<TimelineEntity> userEntities = new LinkedHashSet<>();
+ userEntities.addAll(entities);
+ for (TimelineEntity entity : userEntities) {
+ if (entity.getInfo() != null) {
+ String userId = (String) entity.getInfo().get(entityUserKey);
+ if (!validateAuthUserWithEntityUser(readerManager, callerUGI,
+ userId)) {
+ entities.remove(entity);
+ if (!verifyForAllEntity) {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/469b29c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
new file mode 100644
index 0000000..4239bf0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.webapp.ForbiddenException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.LinkedHashSet;
+import java.util.Set;
+
+public class TestTimelineReaderWebServicesBasicAcl {
+
+ private TimelineReaderManager manager;
+ private static String adminUser = "admin";
+ private static UserGroupInformation adminUgi =
+ UserGroupInformation.createRemoteUser(adminUser);
+ private Configuration config;
+
+ @Before public void setUp() throws Exception {
+ config = new YarnConfiguration();
+ }
+
+ @After public void tearDown() throws Exception {
+ if (manager != null) {
+ manager.stop();
+ manager = null;
+ }
+ config = null;
+ }
+
+ @Test public void testTimelineReaderManagerAclsWhenDisabled()
+ throws Exception {
+ config.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, false);
+ config.set(YarnConfiguration.YARN_ADMIN_ACL, adminUser);
+ manager = new TimelineReaderManager(null);
+ manager.init(config);
+ manager.start();
+
+ // when acls are disabled, always return true
+ Assert.assertTrue(manager.checkAccess(null));
+
+ // filter is disabled, so should return false
+ Assert.assertFalse(
+ TimelineReaderWebServices.isDisplayEntityPerUserFilterEnabled(config));
+ }
+
+ @Test public void testTimelineReaderManagerAclsWhenEnabled()
+ throws Exception {
+ Configuration config = new YarnConfiguration();
+ config.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
+ config.setBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, true);
+ config.set(YarnConfiguration.YARN_ADMIN_ACL, adminUser);
+ manager = new TimelineReaderManager(null);
+ manager.init(config);
+ manager.start();
+
+ String user1 = "user1";
+ String user2 = "user2";
+ UserGroupInformation user1Ugi =
+ UserGroupInformation.createRemoteUser(user1);
+ UserGroupInformation user2Ugi =
+ UserGroupInformation.createRemoteUser(user2);
+
+ // false because ugi is null
+ Assert.assertFalse(TimelineReaderWebServices
+ .validateAuthUserWithEntityUser(manager, null, user1));
+
+ // incoming ugi is admin asking for entity owner user1
+ Assert.assertTrue(
+ TimelineReaderWebServices.checkAccess(manager, adminUgi, user1));
+
+ // incoming ugi is admin asking for entity owner user1
+ Assert.assertTrue(
+ TimelineReaderWebServices.checkAccess(manager, adminUgi, user2));
+
+ // incoming ugi is non-admin i.e user1Ugi asking for entity owner user2
+ try {
+ TimelineReaderWebServices.checkAccess(manager, user1Ugi, user2);
+ Assert.fail("user1Ugi is not allowed to view user2");
+ } catch (ForbiddenException e) {
+ // expected
+ }
+
+ // incoming ugi is non-admin i.e user2Ugi asking for entity owner user1
+ try {
+ TimelineReaderWebServices.checkAccess(manager, user1Ugi, user2);
+ Assert.fail("user2Ugi is not allowed to view user1");
+ } catch (ForbiddenException e) {
+ // expected
+ }
+
+ String userKey = "user";
+ // incoming ugi is admin asking for entities
+ Set<TimelineEntity> entities = createEntities(10, userKey);
+ TimelineReaderWebServices
+ .checkAccess(manager, adminUgi, entities, userKey, true);
+ // admin is allowed to view other entities
+ Assert.assertTrue(entities.size() == 10);
+
+ // incoming ugi is user1Ugi asking for entities
+ // only user1 entities are allowed to view
+ entities = createEntities(5, userKey);
+ TimelineReaderWebServices
+ .checkAccess(manager, user1Ugi, entities, userKey, true);
+ Assert.assertTrue(entities.size() == 1);
+ Assert
+ .assertEquals(user1, entities.iterator().next().getInfo().get(userKey));
+
+ // incoming ugi is user2Ugi asking for entities
+ // only user2 entities are allowed to view
+ entities = createEntities(8, userKey);
+ TimelineReaderWebServices
+ .checkAccess(manager, user2Ugi, entities, userKey, true);
+ Assert.assertTrue(entities.size() == 1);
+ Assert
+ .assertEquals(user2, entities.iterator().next().getInfo().get(userKey));
+ }
+
+ Set<TimelineEntity> createEntities(int noOfUsers, String userKey) {
+ Set<TimelineEntity> entities = new LinkedHashSet<>();
+ for (int i = 0; i < noOfUsers; i++) {
+ TimelineEntity e = new TimelineEntity();
+ e.setType("user" + i);
+ e.setId("user" + i);
+ e.getInfo().put(userKey, "user" + i);
+ entities.add(e);
+ }
+ return entities;
+ }
+
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[22/45] hadoop git commit: YARN-8379. Improve balancing resources in
already satisfied queues by using Capacity Scheduler preemption. Contributed
by Zian Chen.
Posted by xy...@apache.org.
YARN-8379. Improve balancing resources in already satisfied queues by using Capacity Scheduler preemption. Contributed by Zian Chen.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29119430
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29119430
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29119430
Branch: refs/heads/HDDS-4
Commit: 291194302cc1a875d6d94ea93cf1184a3f1fc2cc
Parents: 384764c
Author: Sunil G <su...@apache.org>
Authored: Thu Jun 28 10:23:31 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Thu Jun 28 10:23:31 2018 -0700
----------------------------------------------------------------------
.../AbstractPreemptableResourceCalculator.java | 21 +-
.../CapacitySchedulerPreemptionContext.java | 2 +
.../CapacitySchedulerPreemptionUtils.java | 23 +-
.../capacity/FifoCandidatesSelector.java | 45 ++--
.../capacity/IntraQueueCandidatesSelector.java | 9 +-
.../capacity/PreemptableResourceCalculator.java | 7 +-
.../capacity/PreemptionCandidatesSelector.java | 11 +
.../ProportionalCapacityPreemptionPolicy.java | 129 +++++++---
...QueuePriorityContainerCandidateSelector.java | 16 +-
.../ReservedContainerCandidatesSelector.java | 16 +-
.../monitor/capacity/TempQueuePerPartition.java | 8 +-
.../CapacitySchedulerConfiguration.java | 17 ++
.../TestPreemptionForQueueWithPriorities.java | 58 +++++
...apacityPreemptionPolicyPreemptToBalance.java | 254 +++++++++++++++++++
...TestCapacitySchedulerSurgicalPreemption.java | 111 ++++++++
15 files changed, 637 insertions(+), 90 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
index 64b3615..5b8360a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
@@ -42,6 +42,7 @@ public class AbstractPreemptableResourceCalculator {
protected final ResourceCalculator rc;
protected boolean isReservedPreemptionCandidatesSelector;
private Resource stepFactor;
+ private boolean allowQueuesBalanceAfterAllQueuesSatisfied;
static class TQComparator implements Comparator<TempQueuePerPartition> {
private ResourceCalculator rc;
@@ -83,15 +84,28 @@ public class AbstractPreemptableResourceCalculator {
* this will be set by different implementation of candidate
* selectors, please refer to TempQueuePerPartition#offer for
* details.
+ * @param allowQueuesBalanceAfterAllQueuesSatisfied
+ * Should resources be preempted from an over-served queue when the
+ * requesting queues are all at or over their guarantees?
+ * An example is, there're 10 queues under root, guaranteed resource
+ * of them are all 10%.
+ * Assume there're two queues are using resources, queueA uses 10%
+ * queueB uses 90%. For all queues are guaranteed, but it's not fair
+ * for queueA.
+ * We wanna make this behavior can be configured. By default it is
+ * not allowed.
+ *
*/
public AbstractPreemptableResourceCalculator(
CapacitySchedulerPreemptionContext preemptionContext,
- boolean isReservedPreemptionCandidatesSelector) {
+ boolean isReservedPreemptionCandidatesSelector,
+ boolean allowQueuesBalanceAfterAllQueuesSatisfied) {
context = preemptionContext;
rc = preemptionContext.getResourceCalculator();
this.isReservedPreemptionCandidatesSelector =
isReservedPreemptionCandidatesSelector;
-
+ this.allowQueuesBalanceAfterAllQueuesSatisfied =
+ allowQueuesBalanceAfterAllQueuesSatisfied;
stepFactor = Resource.newInstance(0, 0);
for (ResourceInformation ri : stepFactor.getResources()) {
ri.setValue(1);
@@ -193,7 +207,8 @@ public class AbstractPreemptableResourceCalculator {
wQavail = Resources.componentwiseMin(wQavail, unassigned);
Resource wQidle = sub.offer(wQavail, rc, totGuarant,
- isReservedPreemptionCandidatesSelector);
+ isReservedPreemptionCandidatesSelector,
+ allowQueuesBalanceAfterAllQueuesSatisfied);
Resource wQdone = Resources.subtract(wQavail, wQidle);
if (Resources.greaterThan(rc, totGuarant, wQdone, Resources.none())) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java
index 098acdd..7985296 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java
@@ -70,6 +70,8 @@ public interface CapacitySchedulerPreemptionContext {
float getMaxAllowableLimitForIntraQueuePreemption();
+ long getDefaultMaximumKillWaitTimeout();
+
@Unstable
IntraQueuePreemptionOrderPolicy getIntraQueuePreemptionOrderPolicy();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
index 690eb02..ed50eff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java
@@ -151,6 +151,7 @@ public class CapacitySchedulerPreemptionUtils {
Map<String, Resource> resourceToObtainByPartitions,
RMContainer rmContainer, Resource clusterResource,
Map<ApplicationAttemptId, Set<RMContainer>> preemptMap,
+ Map<ApplicationAttemptId, Set<RMContainer>> curCandidates,
Resource totalPreemptionAllowed, boolean conservativeDRF) {
ApplicationAttemptId attemptId = rmContainer.getApplicationAttemptId();
@@ -218,7 +219,7 @@ public class CapacitySchedulerPreemptionUtils {
}
// Add to preemptMap
- addToPreemptMap(preemptMap, attemptId, rmContainer);
+ addToPreemptMap(preemptMap, curCandidates, attemptId, rmContainer);
return true;
}
@@ -230,15 +231,23 @@ public class CapacitySchedulerPreemptionUtils {
return context.getScheduler().getSchedulerNode(nodeId).getPartition();
}
- private static void addToPreemptMap(
+ protected static void addToPreemptMap(
Map<ApplicationAttemptId, Set<RMContainer>> preemptMap,
+ Map<ApplicationAttemptId, Set<RMContainer>> curCandidates,
ApplicationAttemptId appAttemptId, RMContainer containerToPreempt) {
- Set<RMContainer> set = preemptMap.get(appAttemptId);
- if (null == set) {
- set = new HashSet<>();
- preemptMap.put(appAttemptId, set);
+ Set<RMContainer> setForToPreempt = preemptMap.get(appAttemptId);
+ Set<RMContainer> setForCurCandidates = curCandidates.get(appAttemptId);
+ if (null == setForToPreempt) {
+ setForToPreempt = new HashSet<>();
+ preemptMap.put(appAttemptId, setForToPreempt);
}
- set.add(containerToPreempt);
+ setForToPreempt.add(containerToPreempt);
+
+ if (null == setForCurCandidates) {
+ setForCurCandidates = new HashSet<>();
+ curCandidates.put(appAttemptId, setForCurCandidates);
+ }
+ setForCurCandidates.add(containerToPreempt);
}
private static boolean preemptMapContains(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
index 3b2fcbb..c2735f1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv
import org.apache.hadoop.yarn.util.resource.Resources;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -42,19 +43,25 @@ public class FifoCandidatesSelector
private static final Log LOG =
LogFactory.getLog(FifoCandidatesSelector.class);
private PreemptableResourceCalculator preemptableAmountCalculator;
+ private boolean allowQueuesBalanceAfterAllQueuesSatisfied;
FifoCandidatesSelector(CapacitySchedulerPreemptionContext preemptionContext,
- boolean includeReservedResource) {
+ boolean includeReservedResource,
+ boolean allowQueuesBalanceAfterAllQueuesSatisfied) {
super(preemptionContext);
+ this.allowQueuesBalanceAfterAllQueuesSatisfied =
+ allowQueuesBalanceAfterAllQueuesSatisfied;
preemptableAmountCalculator = new PreemptableResourceCalculator(
- preemptionContext, includeReservedResource);
+ preemptionContext, includeReservedResource,
+ allowQueuesBalanceAfterAllQueuesSatisfied);
}
@Override
public Map<ApplicationAttemptId, Set<RMContainer>> selectCandidates(
Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates,
Resource clusterResource, Resource totalPreemptionAllowed) {
+ Map<ApplicationAttemptId, Set<RMContainer>> curCandidates = new HashMap<>();
// Calculate how much resources we need to preempt
preemptableAmountCalculator.computeIdealAllocation(clusterResource,
totalPreemptionAllowed);
@@ -110,7 +117,7 @@ public class FifoCandidatesSelector
boolean preempted = CapacitySchedulerPreemptionUtils
.tryPreemptContainerAndDeductResToObtain(rc,
preemptionContext, resToObtainByPartition, c,
- clusterResource, selectedCandidates,
+ clusterResource, selectedCandidates, curCandidates,
totalPreemptionAllowed, false);
if (!preempted) {
continue;
@@ -134,7 +141,7 @@ public class FifoCandidatesSelector
preemptFrom(fc, clusterResource, resToObtainByPartition,
skippedAMContainerlist, skippedAMSize, selectedCandidates,
- totalPreemptionAllowed);
+ curCandidates, totalPreemptionAllowed);
}
// Can try preempting AMContainers (still saving atmost
@@ -145,15 +152,15 @@ public class FifoCandidatesSelector
leafQueue.getEffectiveCapacity(RMNodeLabelsManager.NO_LABEL),
leafQueue.getMaxAMResourcePerQueuePercent());
- preemptAMContainers(clusterResource, selectedCandidates, skippedAMContainerlist,
- resToObtainByPartition, skippedAMSize, maxAMCapacityForThisQueue,
- totalPreemptionAllowed);
+ preemptAMContainers(clusterResource, selectedCandidates, curCandidates,
+ skippedAMContainerlist, resToObtainByPartition, skippedAMSize,
+ maxAMCapacityForThisQueue, totalPreemptionAllowed);
} finally {
leafQueue.getReadLock().unlock();
}
}
- return selectedCandidates;
+ return curCandidates;
}
/**
@@ -169,6 +176,7 @@ public class FifoCandidatesSelector
*/
private void preemptAMContainers(Resource clusterResource,
Map<ApplicationAttemptId, Set<RMContainer>> preemptMap,
+ Map<ApplicationAttemptId, Set<RMContainer>> curCandidates,
List<RMContainer> skippedAMContainerlist,
Map<String, Resource> resToObtainByPartition, Resource skippedAMSize,
Resource maxAMCapacityForThisQueue, Resource totalPreemptionAllowed) {
@@ -187,7 +195,7 @@ public class FifoCandidatesSelector
boolean preempted = CapacitySchedulerPreemptionUtils
.tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
resToObtainByPartition, c, clusterResource, preemptMap,
- totalPreemptionAllowed, false);
+ curCandidates, totalPreemptionAllowed, false);
if (preempted) {
Resources.subtractFrom(skippedAMSize, c.getAllocatedResource());
}
@@ -203,6 +211,7 @@ public class FifoCandidatesSelector
Resource clusterResource, Map<String, Resource> resToObtainByPartition,
List<RMContainer> skippedAMContainerlist, Resource skippedAMSize,
Map<ApplicationAttemptId, Set<RMContainer>> selectedContainers,
+ Map<ApplicationAttemptId, Set<RMContainer>> curCandidates,
Resource totalPreemptionAllowed) {
ApplicationAttemptId appId = app.getApplicationAttemptId();
@@ -219,9 +228,10 @@ public class FifoCandidatesSelector
}
// Try to preempt this container
- CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain(
- rc, preemptionContext, resToObtainByPartition, c, clusterResource,
- selectedContainers, totalPreemptionAllowed, false);
+ CapacitySchedulerPreemptionUtils
+ .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
+ resToObtainByPartition, c, clusterResource, selectedContainers,
+ curCandidates, totalPreemptionAllowed, false);
if (!preemptionContext.isObserveOnly()) {
preemptionContext.getRMContext().getDispatcher().getEventHandler()
@@ -262,9 +272,14 @@ public class FifoCandidatesSelector
}
// Try to preempt this container
- CapacitySchedulerPreemptionUtils.tryPreemptContainerAndDeductResToObtain(
- rc, preemptionContext, resToObtainByPartition, c, clusterResource,
- selectedContainers, totalPreemptionAllowed, false);
+ CapacitySchedulerPreemptionUtils
+ .tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
+ resToObtainByPartition, c, clusterResource, selectedContainers,
+ curCandidates, totalPreemptionAllowed, false);
}
}
+
+ public boolean getAllowQueuesBalanceAfterAllQueuesSatisfied() {
+ return allowQueuesBalanceAfterAllQueuesSatisfied;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index 8ab9507..c52fd95 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -122,7 +122,7 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
public Map<ApplicationAttemptId, Set<RMContainer>> selectCandidates(
Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates,
Resource clusterResource, Resource totalPreemptedResourceAllowed) {
-
+ Map<ApplicationAttemptId, Set<RMContainer>> curCandidates = new HashMap<>();
// 1. Calculate the abnormality within each queue one by one.
computeIntraQueuePreemptionDemand(
clusterResource, totalPreemptedResourceAllowed, selectedCandidates);
@@ -182,7 +182,7 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
leafQueue.getReadLock().lock();
for (FiCaSchedulerApp app : apps) {
preemptFromLeastStarvedApp(leafQueue, app, selectedCandidates,
- clusterResource, totalPreemptedResourceAllowed,
+ curCandidates, clusterResource, totalPreemptedResourceAllowed,
resToObtainByPartition, rollingResourceUsagePerUser);
}
} finally {
@@ -191,7 +191,7 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
}
}
- return selectedCandidates;
+ return curCandidates;
}
private void initializeUsageAndUserLimitForCompute(Resource clusterResource,
@@ -211,6 +211,7 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
private void preemptFromLeastStarvedApp(LeafQueue leafQueue,
FiCaSchedulerApp app,
Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates,
+ Map<ApplicationAttemptId, Set<RMContainer>> curCandidates,
Resource clusterResource, Resource totalPreemptedResourceAllowed,
Map<String, Resource> resToObtainByPartition,
Map<String, Resource> rollingResourceUsagePerUser) {
@@ -270,7 +271,7 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
boolean ret = CapacitySchedulerPreemptionUtils
.tryPreemptContainerAndDeductResToObtain(rc, preemptionContext,
resToObtainByPartition, c, clusterResource, selectedCandidates,
- totalPreemptedResourceAllowed, true);
+ curCandidates, totalPreemptedResourceAllowed, true);
// Subtract from respective user's resource usage once a container is
// selected for preemption.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
index 08d834e..89a015e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
@@ -48,11 +48,14 @@ public class PreemptableResourceCalculator
* @param isReservedPreemptionCandidatesSelector this will be set by
* different implementation of candidate selectors, please refer to
* TempQueuePerPartition#offer for details.
+ * @param allowQueuesBalanceAfterAllQueuesSatisfied
*/
public PreemptableResourceCalculator(
CapacitySchedulerPreemptionContext preemptionContext,
- boolean isReservedPreemptionCandidatesSelector) {
- super(preemptionContext, isReservedPreemptionCandidatesSelector);
+ boolean isReservedPreemptionCandidatesSelector,
+ boolean allowQueuesBalanceAfterAllQueuesSatisfied) {
+ super(preemptionContext, isReservedPreemptionCandidatesSelector,
+ allowQueuesBalanceAfterAllQueuesSatisfied);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptionCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptionCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptionCandidatesSelector.java
index 4d8afaf..3c97364 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptionCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptionCandidatesSelector.java
@@ -34,6 +34,7 @@ import java.util.Set;
public abstract class PreemptionCandidatesSelector {
protected CapacitySchedulerPreemptionContext preemptionContext;
protected ResourceCalculator rc;
+ private long maximumKillWaitTime = -1;
PreemptionCandidatesSelector(
CapacitySchedulerPreemptionContext preemptionContext) {
@@ -77,4 +78,14 @@ public abstract class PreemptionCandidatesSelector {
});
}
+ public long getMaximumKillWaitTimeMs() {
+ if (maximumKillWaitTime > 0) {
+ return maximumKillWaitTime;
+ }
+ return preemptionContext.getDefaultMaximumKillWaitTimeout();
+ }
+
+ public void setMaximumKillWaitTime(long maximumKillWaitTime) {
+ this.maximumKillWaitTime = maximumKillWaitTime;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index cc69fba..036fd2f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -131,6 +131,8 @@ public class ProportionalCapacityPreemptionPolicy
private List<PreemptionCandidatesSelector> candidatesSelectionPolicies;
private Set<String> allPartitions;
private Set<String> leafQueueNames;
+ Map<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
+ Set<RMContainer>>> pcsMap;
// Preemptable Entities, synced from scheduler at every run
private Map<String, PreemptableQueue> preemptableQueues;
@@ -249,7 +251,21 @@ public class ProportionalCapacityPreemptionPolicy
// initialize candidates preemption selection policies
candidatesSelectionPolicies.add(new FifoCandidatesSelector(this,
- additionalPreemptionBasedOnReservedResource));
+ additionalPreemptionBasedOnReservedResource, false));
+
+ // Do we need to do preemption to balance queue even after queues get satisfied?
+ boolean isPreemptionToBalanceRequired = config.getBoolean(
+ CapacitySchedulerConfiguration.PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED,
+ CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED);
+ long maximumKillWaitTimeForPreemptionToQueueBalance = config.getLong(
+ CapacitySchedulerConfiguration.MAX_WAIT_BEFORE_KILL_FOR_QUEUE_BALANCE_PREEMPTION,
+ CapacitySchedulerConfiguration.DEFAULT_MAX_WAIT_BEFORE_KILL_FOR_QUEUE_BALANCE_PREEMPTION);
+ if (isPreemptionToBalanceRequired) {
+ PreemptionCandidatesSelector selector = new FifoCandidatesSelector(this,
+ false, true);
+ selector.setMaximumKillWaitTime(maximumKillWaitTimeForPreemptionToQueueBalance);
+ candidatesSelectionPolicies.add(selector);
+ }
// Do we need to specially consider intra queue
boolean isIntraQueuePreemptionEnabled = config.getBoolean(
@@ -282,7 +298,8 @@ public class ProportionalCapacityPreemptionPolicy
"select_based_on_reserved_containers = " +
selectCandidatesForResevedContainers + "\n" +
"additional_res_balance_based_on_reserved_containers = " +
- additionalPreemptionBasedOnReservedResource);
+ additionalPreemptionBasedOnReservedResource + "\n" +
+ "Preemption-to-balance-queue-enabled = " + isPreemptionToBalanceRequired);
csConfig = config;
}
@@ -308,44 +325,60 @@ public class ProportionalCapacityPreemptionPolicy
}
private void preemptOrkillSelectedContainerAfterWait(
- Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates,
- long currentTime) {
+ Map<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
+ Set<RMContainer>>> toPreemptPerSelector, long currentTime) {
+ int toPreemptCount = 0;
+ for (Map<ApplicationAttemptId, Set<RMContainer>> containers :
+ toPreemptPerSelector.values()) {
+ toPreemptCount += containers.size();
+ }
if (LOG.isDebugEnabled()) {
LOG.debug(
"Starting to preempt containers for selectedCandidates and size:"
- + selectedCandidates.size());
+ + toPreemptCount);
}
// preempt (or kill) the selected containers
- for (Map.Entry<ApplicationAttemptId, Set<RMContainer>> e : selectedCandidates
+ // We need toPreemptPerSelector here to match list of containers to
+ // its selector so that we can get custom timeout per selector when
+ // checking if current container should be killed or not
+ for (Map.Entry<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
+ Set<RMContainer>>> pc : toPreemptPerSelector
.entrySet()) {
- ApplicationAttemptId appAttemptId = e.getKey();
- if (LOG.isDebugEnabled()) {
- LOG.debug("Send to scheduler: in app=" + appAttemptId
- + " #containers-to-be-preemptionCandidates=" + e.getValue().size());
- }
- for (RMContainer container : e.getValue()) {
- // if we tried to preempt this for more than maxWaitTime
- if (preemptionCandidates.get(container) != null
- && preemptionCandidates.get(container)
- + maxWaitTime <= currentTime) {
- // kill it
- rmContext.getDispatcher().getEventHandler().handle(
- new ContainerPreemptEvent(appAttemptId, container,
- SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE));
- preemptionCandidates.remove(container);
- } else {
- if (preemptionCandidates.get(container) != null) {
- // We already updated the information to scheduler earlier, we need
- // not have to raise another event.
- continue;
+ Map<ApplicationAttemptId, Set<RMContainer>> cMap = pc.getValue();
+ if (cMap.size() > 0) {
+ for (Map.Entry<ApplicationAttemptId,
+ Set<RMContainer>> e : cMap.entrySet()) {
+ ApplicationAttemptId appAttemptId = e.getKey();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Send to scheduler: in app=" + appAttemptId
+ + " #containers-to-be-preemptionCandidates=" + e.getValue().size());
+ }
+ for (RMContainer container : e.getValue()) {
+ // if we tried to preempt this for more than maxWaitTime, this
+ // should be based on custom timeout per container per selector
+ if (preemptionCandidates.get(container) != null
+ && preemptionCandidates.get(container)
+ + pc.getKey().getMaximumKillWaitTimeMs() <= currentTime) {
+ // kill it
+ rmContext.getDispatcher().getEventHandler().handle(
+ new ContainerPreemptEvent(appAttemptId, container,
+ SchedulerEventType.MARK_CONTAINER_FOR_KILLABLE));
+ preemptionCandidates.remove(container);
+ } else {
+ if (preemptionCandidates.get(container) != null) {
+ // We already updated the information to scheduler earlier, we need
+ // not have to raise another event.
+ continue;
+ }
+
+ //otherwise just send preemption events
+ rmContext.getDispatcher().getEventHandler().handle(
+ new ContainerPreemptEvent(appAttemptId, container,
+ SchedulerEventType.MARK_CONTAINER_FOR_PREEMPTION));
+ preemptionCandidates.put(container, currentTime);
+ }
}
-
- //otherwise just send preemption events
- rmContext.getDispatcher().getEventHandler().handle(
- new ContainerPreemptEvent(appAttemptId, container,
- SchedulerEventType.MARK_CONTAINER_FOR_PREEMPTION));
- preemptionCandidates.put(container, currentTime);
}
}
}
@@ -438,6 +471,8 @@ public class ProportionalCapacityPreemptionPolicy
// queue and each application
Map<ApplicationAttemptId, Set<RMContainer>> toPreempt =
new HashMap<>();
+ Map<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
+ Set<RMContainer>>> toPreemptPerSelector = new HashMap<>();;
for (PreemptionCandidatesSelector selector :
candidatesSelectionPolicies) {
long startTime = 0;
@@ -447,20 +482,27 @@ public class ProportionalCapacityPreemptionPolicy
selector.getClass().getName()));
startTime = clock.getTime();
}
- toPreempt = selector.selectCandidates(toPreempt,
- clusterResources, totalPreemptionAllowed);
+ Map<ApplicationAttemptId, Set<RMContainer>> curCandidates =
+ selector.selectCandidates(toPreempt, clusterResources,
+ totalPreemptionAllowed);
+ toPreemptPerSelector.putIfAbsent(selector, curCandidates);
if (LOG.isDebugEnabled()) {
LOG.debug(MessageFormat
.format("{0} uses {1} millisecond to run",
selector.getClass().getName(), clock.getTime() - startTime));
int totalSelected = 0;
+ int curSelected = 0;
for (Set<RMContainer> set : toPreempt.values()) {
totalSelected += set.size();
}
+ for (Set<RMContainer> set : curCandidates.values()) {
+ curSelected += set.size();
+ }
LOG.debug(MessageFormat
- .format("So far, total {0} containers selected to be preempted",
- totalSelected));
+ .format("So far, total {0} containers selected to be preempted, {1}"
+ + " containers selected this round\n",
+ totalSelected, curSelected));
}
}
@@ -483,8 +525,10 @@ public class ProportionalCapacityPreemptionPolicy
long currentTime = clock.getTime();
+ pcsMap = toPreemptPerSelector;
+
// preempt (or kill) the selected containers
- preemptOrkillSelectedContainerAfterWait(toPreempt, currentTime);
+ preemptOrkillSelectedContainerAfterWait(toPreemptPerSelector, currentTime);
// cleanup staled preemption candidates
cleanupStaledPreemptionCandidates(currentTime);
@@ -689,6 +733,12 @@ public class ProportionalCapacityPreemptionPolicy
return queueToPartitions;
}
+ @VisibleForTesting
+ Map<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
+ Set<RMContainer>>> getToPreemptCandidatesPerSelector() {
+ return pcsMap;
+ }
+
@Override
public int getClusterMaxApplicationPriority() {
return scheduler.getMaxClusterLevelAppPriority().getPriority();
@@ -730,4 +780,9 @@ public class ProportionalCapacityPreemptionPolicy
public IntraQueuePreemptionOrderPolicy getIntraQueuePreemptionOrderPolicy() {
return intraQueuePreemptionOrderPolicy;
}
+
+ @Override
+ public long getDefaultMaximumKillWaitTimeout() {
+ return maxWaitTime;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
index 4a169af..78a9988 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java
@@ -380,6 +380,7 @@ public class QueuePriorityContainerCandidateSelector
Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates,
Resource clusterResource,
Resource totalPreemptedResourceAllowed) {
+ Map<ApplicationAttemptId, Set<RMContainer>> curCandidates = new HashMap<>();
// Initialize digraph from queues
// TODO (wangda): only do this when queue refreshed.
priorityDigraph.clear();
@@ -388,7 +389,7 @@ public class QueuePriorityContainerCandidateSelector
// When all queues are set to same priority, or priority is not respected,
// direct return.
if (priorityDigraph.isEmpty()) {
- return selectedCandidates;
+ return curCandidates;
}
// Save parameters to be shared by other methods
@@ -478,13 +479,9 @@ public class QueuePriorityContainerCandidateSelector
.getReservedResource());
}
- Set<RMContainer> containers = selectedCandidates.get(
- c.getApplicationAttemptId());
- if (null == containers) {
- containers = new HashSet<>();
- selectedCandidates.put(c.getApplicationAttemptId(), containers);
- }
- containers.add(c);
+ // Add to preemptMap
+ CapacitySchedulerPreemptionUtils.addToPreemptMap(selectedCandidates,
+ curCandidates, c.getApplicationAttemptId(), c);
// Update totalPreemptionResourceAllowed
Resources.subtractFrom(totalPreemptedResourceAllowed,
@@ -504,7 +501,6 @@ public class QueuePriorityContainerCandidateSelector
}
}
}
-
- return selectedCandidates;
+ return curCandidates;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
index ff100d9..bdb7e8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ReservedContainerCandidatesSelector.java
@@ -31,7 +31,6 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -63,7 +62,7 @@ public class ReservedContainerCandidatesSelector
CapacitySchedulerPreemptionContext preemptionContext) {
super(preemptionContext);
preemptableAmountCalculator = new PreemptableResourceCalculator(
- preemptionContext, true);
+ preemptionContext, true, false);
}
@Override
@@ -71,6 +70,7 @@ public class ReservedContainerCandidatesSelector
Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates,
Resource clusterResource,
Resource totalPreemptedResourceAllowed) {
+ Map<ApplicationAttemptId, Set<RMContainer>> curCandidates = new HashMap<>();
// Calculate how much resources we need to preempt
preemptableAmountCalculator.computeIdealAllocation(clusterResource,
totalPreemptedResourceAllowed);
@@ -101,14 +101,10 @@ public class ReservedContainerCandidatesSelector
selectedCandidates, totalPreemptedResourceAllowed, false);
if (null != preemptionResult) {
for (RMContainer c : preemptionResult.selectedContainers) {
- ApplicationAttemptId appId = c.getApplicationAttemptId();
- Set<RMContainer> containers = selectedCandidates.get(appId);
- if (null == containers) {
- containers = new HashSet<>();
- selectedCandidates.put(appId, containers);
- }
+ // Add to preemptMap
+ CapacitySchedulerPreemptionUtils.addToPreemptMap(selectedCandidates,
+ curCandidates, c.getApplicationAttemptId(), c);
- containers.add(c);
if (LOG.isDebugEnabled()) {
LOG.debug(this.getClass().getName() + " Marked container=" + c
.getContainerId() + " from queue=" + c.getQueueName()
@@ -118,7 +114,7 @@ public class ReservedContainerCandidatesSelector
}
}
- return selectedCandidates;
+ return curCandidates;
}
private Resource getPreemptableResource(String queueName,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
index 4214acc..4fb1862 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
@@ -138,7 +138,8 @@ public class TempQueuePerPartition extends AbstractPreemptionEntity {
// This function "accepts" all the resources it can (pending) and return
// the unused ones
Resource offer(Resource avail, ResourceCalculator rc,
- Resource clusterResource, boolean considersReservedResource) {
+ Resource clusterResource, boolean considersReservedResource,
+ boolean allowQueueBalanceAfterAllSafisfied) {
Resource absMaxCapIdealAssignedDelta = Resources.componentwiseMax(
Resources.subtract(getMax(), idealAssigned),
Resource.newInstance(0, 0));
@@ -179,7 +180,10 @@ public class TempQueuePerPartition extends AbstractPreemptionEntity {
// leaf queues. Such under-utilized leaf queue could preemption resources
// from over-utilized leaf queue located at other hierarchies.
- accepted = filterByMaxDeductAssigned(rc, clusterResource, accepted);
+ // Allow queues can continue grow and balance even if all queues are satisfied.
+ if (!allowQueueBalanceAfterAllSafisfied) {
+ accepted = filterByMaxDeductAssigned(rc, clusterResource, accepted);
+ }
// accepted so far contains the "quota acceptable" amount, we now filter by
// locality acceptable
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 76eaac0..f94654e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -1460,6 +1460,23 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
public static final String DEFAULT_INTRAQUEUE_PREEMPTION_ORDER_POLICY = "userlimit_first";
/**
+ * Should we allow queues continue grow after all queue reaches their
+ * guaranteed capacity.
+ */
+ public static final String PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED =
+ PREEMPTION_CONFIG_PREFIX + "preemption-to-balance-queue-after-satisfied.enabled";
+ public static final boolean DEFAULT_PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED = false;
+
+ /**
+ * How long we will wait to balance queues, by default it is 5 mins.
+ */
+ public static final String MAX_WAIT_BEFORE_KILL_FOR_QUEUE_BALANCE_PREEMPTION =
+ PREEMPTION_CONFIG_PREFIX + "preemption-to-balance-queue-after-satisfied.max-wait-before-kill";
+ public static final long
+ DEFAULT_MAX_WAIT_BEFORE_KILL_FOR_QUEUE_BALANCE_PREEMPTION =
+ 300 * 1000;
+
+ /**
* Maximum application for a queue to be used when application per queue is
* not defined.To be consistent with previous version the default value is set
* as UNDEFINED.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
index 6a953cf..38c2a2a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -538,4 +539,61 @@ public class TestPreemptionForQueueWithPriorities
new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
getAppAttemptId(1))));
}
+
+ @Test
+ public void testPriorityPreemptionForBalanceBetweenSatisfiedQueues()
+ throws IOException {
+ /**
+ * All queues are beyond guarantee, c has higher priority than b.
+ * c ask for more resource, and there is no idle left, c should preempt
+ * some resource from b but won’t let b under its guarantee.
+ *
+ * Queue structure is:
+ *
+ * <pre>
+ * root
+ * / | \
+ * a b c
+ * </pre>
+ *
+ * For priorities
+ * - a=1
+ * - b=1
+ * - c=2
+ *
+ */
+ String labelsConfig = "=100,true"; // default partition
+ String nodesConfig = "n1="; // only one node
+ String queuesConfig =
+ // guaranteed,max,used,pending
+ "root(=[100 100 100 100]);" + //root
+ "-a(=[30 100 0 0]){priority=1};" + // a
+ "-b(=[30 100 40 50]){priority=1};" + // b
+ "-c(=[40 100 60 25]){priority=2}"; // c
+ String appsConfig =
+ //queueName\t(priority,resource,host,expression,#repeat,reserved)
+ "b\t(1,1,n1,,40,false);" + // app1 in b
+ "c\t(1,1,n1,,60,false)"; // app2 in c
+
+ buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+ CapacitySchedulerConfiguration newConf =
+ new CapacitySchedulerConfiguration(conf);
+ boolean isPreemptionToBalanceRequired = true;
+ newConf.setBoolean(
+ CapacitySchedulerConfiguration.PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED,
+ isPreemptionToBalanceRequired);
+ when(cs.getConfiguration()).thenReturn(newConf);
+ policy.editSchedule();
+
+ // IdealAssigned b: 30 c: 70. initIdealAssigned: b: 30 c: 40, even though
+ // b and c has same relativeAssigned=1.0f(idealAssigned / guaranteed),
+ // since c has higher priority, c will be put in mostUnderServedQueue and
+ // get all remain 30 capacity.
+ verify(mDisp, times(10)).handle(argThat(
+ new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+ getAppAttemptId(1))));
+ verify(mDisp, never()).handle(argThat(
+ new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+ getAppAttemptId(2))));
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
new file mode 100644
index 0000000..22e8f63
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyPreemptToBalance.java
@@ -0,0 +1,254 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.junit.Test;
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+public class TestProportionalCapacityPreemptionPolicyPreemptToBalance
+ extends ProportionalCapacityPreemptionPolicyMockFramework {
+
+ @Test
+ public void testPreemptionToBalanceDisabled() throws IOException {
+ String labelsConfig = "=100,true"; // default partition
+ String nodesConfig = "n1="; // only one node
+ String queuesConfig =
+ // guaranteed,max,used,pending
+ "root(=[100 100 100 100]);" + //root
+ "-a(=[30 100 10 30]);" + // a
+ "-b(=[30 100 40 30]);" + // b
+ "-c(=[30 100 50 30]);" + // c
+ "-d(=[10 100 0 0])"; // d
+ String appsConfig =
+ //queueName\t(priority,resource,host,expression,#repeat,reserved)
+ "a\t(1,1,n1,,10,false);" + // app1 in a
+ "b\t(1,1,n1,,40,false);" + // app2 in b
+ "c\t(1,1,n1,,50,false)"; // app3 in c
+
+ buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+ policy.editSchedule();
+
+ // I_A: A:30 B:35 C:35, preempt 5 from B and 15 from C to A
+ verify(mDisp, times(5)).handle(argThat(
+ new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+ getAppAttemptId(2))));
+ verify(mDisp, times(15)).handle(argThat(
+ new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+ getAppAttemptId(3))));
+
+ assertEquals(30, policy.getQueuePartitions().get("a")
+ .get("").getIdealAssigned().getMemorySize());
+ assertEquals(35, policy.getQueuePartitions().get("b")
+ .get("").getIdealAssigned().getMemorySize());
+ assertEquals(35, policy.getQueuePartitions().get("c")
+ .get("").getIdealAssigned().getMemorySize());
+ }
+
+ @Test
+ public void testPreemptionToBalanceEnabled() throws IOException {
+ String labelsConfig = "=100,true"; // default partition
+ String nodesConfig = "n1="; // only one node
+ String queuesConfig =
+ // guaranteed,max,used,pending
+ "root(=[100 100 100 100]);" + //root
+ "-a(=[30 100 10 30]);" + // a
+ "-b(=[30 100 40 30]);" + // b
+ "-c(=[30 100 50 30]);" + // c
+ "-d(=[10 100 0 0])"; // d
+ String appsConfig =
+ //queueName\t(priority,resource,host,expression,#repeat,reserved)
+ "a\t(1,1,n1,,10,false);" + // app1 in a
+ "b\t(1,1,n1,,40,false);" + // app2 in b
+ "c\t(1,1,n1,,50,false)"; // app3 in c
+
+ // enable preempt to balance and ideal assignment will change.
+ boolean isPreemptionToBalanceEnabled = true;
+ conf.setBoolean(
+ CapacitySchedulerConfiguration.PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED,
+ isPreemptionToBalanceEnabled);
+
+ buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+ policy.editSchedule();
+
+ // I_A: A:33 B:33 C:33, preempt 7 from B and 17 from C to A
+ verify(mDisp, times(7)).handle(argThat(
+ new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+ getAppAttemptId(2))));
+ verify(mDisp, times(17)).handle(argThat(
+ new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+ getAppAttemptId(3))));
+
+ assertEquals(33, policy.getQueuePartitions().get("a")
+ .get("").getIdealAssigned().getMemorySize());
+ assertEquals(33, policy.getQueuePartitions().get("b")
+ .get("").getIdealAssigned().getMemorySize());
+ assertEquals(33, policy.getQueuePartitions().get("c")
+ .get("").getIdealAssigned().getMemorySize());
+ }
+
+
+ @Test
+ public void testPreemptionToBalanceUsedPlusPendingLessThanGuaranteed()
+ throws IOException{
+ String labelsConfig = "=100,true"; // default partition
+ String nodesConfig = "n1="; // only one node
+ String queuesConfig =
+ // guaranteed,max,used,pending
+ "root(=[100 100 100 100]);" + //root
+ "-a(=[30 100 10 6]);" + // a
+ "-b(=[30 100 40 30]);" + // b
+ "-c(=[30 100 50 30]);" + // c
+ "-d(=[10 100 0 0])"; // d
+ String appsConfig =
+ //queueName\t(priority,resource,host,expression,#repeat,reserved)
+ "a\t(1,1,n1,,10,false);" + // app1 in a
+ "b\t(1,1,n1,,40,false);" + // app2 in b
+ "c\t(1,1,n1,,50,false)"; // app3 in c
+
+ boolean isPreemptionToBalanceEnabled = true;
+ conf.setBoolean(
+ CapacitySchedulerConfiguration.PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED,
+ isPreemptionToBalanceEnabled);
+
+ buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
+ policy.editSchedule();
+
+ // I_A: A:15 B:42 C:43, preempt 7 from B and 17 from C to A
+ verify(mDisp, times(8)).handle(argThat(
+ new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
+ getAppAttemptId(3))));
+
+ assertEquals(16, policy.getQueuePartitions().get("a")
+ .get("").getIdealAssigned().getMemorySize());
+ assertEquals(42, policy.getQueuePartitions().get("b")
+ .get("").getIdealAssigned().getMemorySize());
+ assertEquals(42, policy.getQueuePartitions().get("c")
+ .get("").getIdealAssigned().getMemorySize());
+ }
+
+ @Test
+ public void testPreemptionToBalanceWithVcoreResource() throws IOException {
+ Logger.getRootLogger().setLevel(Level.DEBUG);
+ String labelsConfig = "=100:100,true"; // default partition
+ String nodesConfig = "n1="; // only one node
+ String queuesConfig =
+ // guaranteed,max,used,pending
+ "root(=[100:100 100:100 100:100 120:140]);" + //root
+ "-a(=[60:60 100:100 40:40 70:40]);" + // a
+ "-b(=[40:40 100:100 60:60 50:100])"; // b
+
+ String appsConfig =
+ //queueName\t(priority,resource,host,expression,#repeat,reserved)
+ "a\t(1,1:1,n1,,40,false);" + // app1 in a
+ "b\t(1,1:1,n1,,60,false)"; // app2 in b
+
+ boolean isPreemptionToBalanceEnabled = true;
+ conf.setBoolean(
+ CapacitySchedulerConfiguration.PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED,
+ isPreemptionToBalanceEnabled);
+ buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig, true);
+ policy.editSchedule();
+
+ // 21 containers will be preempted here
+ verify(mDisp, times(21)).handle(argThat(
+ new TestProportionalCapacityPreemptionPolicy.
+ IsPreemptionRequestFor(getAppAttemptId(2))));
+
+ assertEquals(60, policy.getQueuePartitions().get("a")
+ .get("").getIdealAssigned().getMemorySize());
+ assertEquals(60, policy.getQueuePartitions().get("a")
+ .get("").getIdealAssigned().getVirtualCores());
+ assertEquals(40, policy.getQueuePartitions().get("b")
+ .get("").getIdealAssigned().getMemorySize());
+ assertEquals(40, policy.getQueuePartitions().get("b")
+ .get("").getIdealAssigned().getVirtualCores());
+ }
+
+ @Test
+ public void testPreemptionToBalanceWithConfiguredTimeout() throws IOException {
+ Logger.getRootLogger().setLevel(Level.DEBUG);
+ String labelsConfig = "=100:100,true"; // default partition
+ String nodesConfig = "n1="; // only one node
+ String queuesConfig =
+ // guaranteed,max,used,pending
+ "root(=[100:100 100:100 100:100 120:140]);" + //root
+ "-a(=[60:60 100:100 40:40 70:40]);" + // a
+ "-b(=[40:40 100:100 60:60 50:100])"; // b
+
+ String appsConfig =
+ //queueName\t(priority,resource,host,expression,#repeat,reserved)
+ "a\t(1,1:1,n1,,40,false);" + // app1 in a
+ "b\t(1,1:1,n1,,60,false)"; // app2 in b
+
+ boolean isPreemptionToBalanceEnabled = true;
+ conf.setBoolean(
+ CapacitySchedulerConfiguration.PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED,
+ isPreemptionToBalanceEnabled);
+ final long FB_MAX_BEFORE_KILL = 60 *1000;
+ conf.setLong(
+ CapacitySchedulerConfiguration.MAX_WAIT_BEFORE_KILL_FOR_QUEUE_BALANCE_PREEMPTION,
+ FB_MAX_BEFORE_KILL);
+
+ buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig, true);
+ policy.editSchedule();
+
+ Map<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
+ Set<RMContainer>>> pcps= policy.getToPreemptCandidatesPerSelector();
+
+ String FIFO_CANDIDATE_SELECTOR = "FifoCandidatesSelector";
+ boolean hasFifoSelector = false;
+ for (Map.Entry<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
+ Set<RMContainer>>> pc : pcps.entrySet()) {
+ if (pc.getKey().getClass().getSimpleName().equals(FIFO_CANDIDATE_SELECTOR)) {
+ FifoCandidatesSelector pcs = (FifoCandidatesSelector) pc.getKey();
+ if (pcs.getAllowQueuesBalanceAfterAllQueuesSatisfied() == true) {
+ hasFifoSelector = true;
+ assertEquals(pcs.getMaximumKillWaitTimeMs(), FB_MAX_BEFORE_KILL);
+ }
+ }
+ }
+
+ assertEquals(hasFifoSelector, true);
+
+ // 21 containers will be preempted here
+ verify(mDisp, times(21)).handle(argThat(
+ new TestProportionalCapacityPreemptionPolicy.
+ IsPreemptionRequestFor(getAppAttemptId(2))));
+
+ assertEquals(60, policy.getQueuePartitions().get("a")
+ .get("").getIdealAssigned().getMemorySize());
+ assertEquals(60, policy.getQueuePartitions().get("a")
+ .get("").getIdealAssigned().getVirtualCores());
+ assertEquals(40, policy.getQueuePartitions().get("b")
+ .get("").getIdealAssigned().getMemorySize());
+ assertEquals(40, policy.getQueuePartitions().get("b")
+ .get("").getIdealAssigned().getVirtualCores());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/29119430/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
index 2aff82d..800789a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java
@@ -1111,5 +1111,116 @@ public class TestCapacitySchedulerSurgicalPreemption
rm1.close();
}
+ @Test(timeout = 600000)
+ public void testPreemptionToBalanceWithCustomTimeout() throws Exception {
+ /**
+ * Test case: Submit two application (app1/app2) to different queues, queue
+ * structure:
+ *
+ * <pre>
+ * Root
+ * / | \
+ * a b c
+ * 10 20 70
+ * </pre>
+ *
+ * 1) Two nodes (n1/n2) in the cluster, each of them has 20G.
+ *
+ * 2) app1 submit to queue-b, asks for 1G * 5
+ *
+ * 3) app2 submit to queue-c, ask for one 4G container (for AM)
+ *
+ * After preemption, we should expect:
+ * 1. Preempt 4 containers from app1
+ * 2. the selected containers will be killed after configured timeout.
+ * 3. AM of app2 successfully allocated.
+ */
+ conf.setBoolean(
+ CapacitySchedulerConfiguration.PREEMPTION_TO_BALANCE_QUEUES_BEYOND_GUARANTEED,
+ true);
+ conf.setLong(
+ CapacitySchedulerConfiguration.MAX_WAIT_BEFORE_KILL_FOR_QUEUE_BALANCE_PREEMPTION,
+ 20*1000);
+ CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(
+ this.conf);
+
+ MockRM rm1 = new MockRM(conf);
+ rm1.getRMContext().setNodeLabelManager(mgr);
+ rm1.start();
+
+ MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB);
+ MockNM nm2 = rm1.registerNode("h2:1234", 20 * GB);
+ CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
+ RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
+ RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
+
+ // launch an app to queue, AM container should be launched in nm1
+ RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a");
+ MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
+
+ am1.allocate("*", 1 * GB, 38, new ArrayList<ContainerId>());
+
+ // Do allocation for node1/node2
+ for (int i = 0; i < 38; i++) {
+ cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
+ cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
+ }
+
+ // App1 should have 39 containers now
+ FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(
+ am1.getApplicationAttemptId());
+ Assert.assertEquals(39, schedulerApp1.getLiveContainers().size());
+ // 20 from n1 and 19 from n2
+ waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode1.getNodeID()),
+ am1.getApplicationAttemptId(), 20);
+ waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode2.getNodeID()),
+ am1.getApplicationAttemptId(), 19);
+
+
+ // Submit app2 to queue-c and asks for a 4G container for AM
+ RMApp app2 = rm1.submitApp(4 * GB, "app", "user", null, "c");
+ FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(
+ ApplicationAttemptId.newInstance(app2.getApplicationId(), 1));
+
+ // Call editSchedule: containers are selected to be preemption candidate
+ SchedulingMonitorManager smm = ((CapacityScheduler) rm1.
+ getResourceScheduler()).getSchedulingMonitorManager();
+ SchedulingMonitor smon = smm.getAvailableSchedulingMonitor();
+ ProportionalCapacityPreemptionPolicy editPolicy =
+ (ProportionalCapacityPreemptionPolicy) smon.getSchedulingEditPolicy();
+ editPolicy.editSchedule();
+ Assert.assertEquals(4, editPolicy.getToPreemptContainers().size());
+
+ // check live containers immediately, nothing happen
+ Assert.assertEquals(39, schedulerApp1.getLiveContainers().size());
+
+ Thread.sleep(20*1000);
+ // Call editSchedule again: selected containers are killed
+ editPolicy.editSchedule();
+ waitNumberOfLiveContainersFromApp(schedulerApp1, 35);
+
+ // Call allocation, containers are reserved
+ cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
+ cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
+ waitNumberOfReservedContainersFromApp(schedulerApp2, 1);
+
+ // Call editSchedule twice and allocation once, container should get allocated
+ editPolicy.editSchedule();
+ editPolicy.editSchedule();
+
+ int tick = 0;
+ while (schedulerApp2.getLiveContainers().size() != 1 && tick < 10) {
+ cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
+ cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
+ tick++;
+ Thread.sleep(100);
+ }
+ waitNumberOfReservedContainersFromApp(schedulerApp2, 0);
+
+ rm1.close();
+
+
+ }
+
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[41/45] hadoop git commit: HDDS-5. Enable OzoneManager kerberos auth.
Contributed by Ajay Kumar.
Posted by xy...@apache.org.
HDDS-5. Enable OzoneManager kerberos auth. Contributed by Ajay Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/729465e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/729465e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/729465e2
Branch: refs/heads/HDDS-4
Commit: 729465e29ab3faf3ccc11254dbdae781e8147120
Parents: 40c6c19
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon May 14 09:36:57 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Jul 2 13:19:02 2018 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hdds/HddsConfigKeys.java | 4 +
.../common/src/main/resources/ozone-default.xml | 33 +++-
.../apache/hadoop/ozone/ksm/KSMConfigKeys.java | 5 +
.../ksm/protocol/KeySpaceManagerProtocol.java | 4 +
.../protocolPB/KeySpaceManagerProtocolPB.java | 5 +
.../hadoop/ozone/MiniOzoneClusterImpl.java | 3 +-
.../ozone/TestOzoneConfigurationFields.java | 3 +-
.../hadoop/ozone/TestSecureOzoneCluster.java | 169 +++++++++++++++----
.../hadoop/ozone/ksm/KeySpaceManager.java | 53 +++++-
.../ozone/ksm/KeySpaceManagerHttpServer.java | 5 +-
10 files changed, 238 insertions(+), 46 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/729465e2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index dec2c1c..a12d6ac 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -20,4 +20,8 @@ package org.apache.hadoop.hdds;
public final class HddsConfigKeys {
private HddsConfigKeys() {
}
+ public static final String HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY = "hdds.ksm."
+ + "kerberos.keytab.file";
+ public static final String HDDS_KSM_KERBEROS_PRINCIPAL_KEY = "hdds.ksm"
+ + ".kerberos.principal";
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/729465e2/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 1b1d530..4ada591 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1071,7 +1071,23 @@
<name>ozone.scm.kerberos.principal</name>
<value></value>
<tag> OZONE, SECURITY</tag>
- <description>The SCM service principal. Ex scm/_HOST@REALM.TLD.</description>
+ <description>The SCM service principal. Ex scm/_HOST@REALM.COM</description>
+ </property>
+
+ <property>
+ <name>hdds.ksm.kerberos.keytab.file</name>
+ <value></value>
+ <tag> HDDS, SECURITY</tag>
+ <description> The keytab file used by KSM daemon to login as its
+ service principal. The principal name is configured with
+ hdds.ksm.kerberos.principal.
+ </description>
+ </property>
+ <property>
+ <name>hdds.ksm.kerberos.principal</name>
+ <value></value>
+ <tag> HDDS, SECURITY</tag>
+ <description>The KSM service principal. Ex ksm/_HOST@REALM.COM</description>
</property>
<property>
@@ -1083,4 +1099,19 @@
<value>/etc/security/keytabs/HTTP.keytab</value>
</property>
+ <property>
+ <name>hdds.ksm.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@EXAMPLE.COM</value>
+ <description>
+ KSM http server kerberos principal.
+ </description>
+ </property>
+ <property>
+ <name>hdds.ksm.web.authentication.kerberos.keytab</name>
+ <value>/etc/security/keytabs/HTTP.keytab</value>
+ <description>
+ KSM http server kerberos keytab.
+ </description>
+ </property>
+
</configuration>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/729465e2/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
index 75cf613..d911bcb 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
@@ -78,4 +78,9 @@ public final class KSMConfigKeys {
public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
"ozone.key.deleting.limit.per.task";
public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
+
+ public static final String KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
+ "hdds.ksm.web.authentication.kerberos.principal";
+ public static final String KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
+ "hdds.ksm.web.authentication.kerberos.keytab";
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/729465e2/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
index 54862d3..de27108 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone.ksm.protocol;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
@@ -29,10 +30,13 @@ import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.OzoneAclInfo;
import java.io.IOException;
import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol to talk to KSM.
*/
+@KerberosInfo(
+ serverPrincipal = HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY)
public interface KeySpaceManagerProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/729465e2/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
index 8acca8a..71b9da0 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
@@ -18,9 +18,12 @@
package org.apache.hadoop.ozone.ksm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.protocol.proto
.KeySpaceManagerProtocolProtos.KeySpaceManagerService;
+import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol used to communicate with KSM.
@@ -28,6 +31,8 @@ import org.apache.hadoop.ozone.protocol.proto
@ProtocolInfo(protocolName =
"org.apache.hadoop.ozone.protocol.KeySpaceManagerProtocol",
protocolVersion = 1)
+@KerberosInfo(
+ serverPrincipal = HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private
public interface KeySpaceManagerProtocolPB
extends KeySpaceManagerService.BlockingInterface {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/729465e2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index fbd9565..0455e19 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -346,7 +346,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
*
* @throws IOException
*/
- private KeySpaceManager createKSM() throws IOException {
+ private KeySpaceManager createKSM()
+ throws IOException, AuthenticationException {
configureKSM();
KSMStorage ksmStore = new KSMStorage(conf);
ksmStore.setClusterId(clusterId);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/729465e2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 4898a1b..92e2ffc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone;
import org.apache.hadoop.conf.TestConfigurationFieldsBase;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -31,7 +32,7 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
xmlFilename = new String("ozone-default.xml");
configurationClasses =
new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class,
- KSMConfigKeys.class};
+ KSMConfigKeys.class, HddsConfigKeys.class};
errorIfMissingConfigProps = true;
errorIfMissingXmlProps = true;
xmlPropsToSkipCompare.add("hadoop.tags.custom");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/729465e2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 9c430ad..b917dfe 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -26,24 +26,34 @@ import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Properties;
import java.util.UUID;
+import java.util.concurrent.Callable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.server.SCMStorage;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.ksm.KSMStorage;
+import org.apache.hadoop.ozone.ksm.KeySpaceManager;
import org.apache.hadoop.security.KerberosAuthException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -56,13 +66,23 @@ public final class TestSecureOzoneCluster {
private Logger LOGGER = LoggerFactory
.getLogger(TestSecureOzoneCluster.class);
+ @Rule
+ public Timeout timeout = new Timeout(80000);
+
private MiniKdc miniKdc;
private OzoneConfiguration conf;
private File workDir;
private static Properties securityProperties;
private File scmKeytab;
private File spnegoKeytab;
+ private File ksmKeyTab;
private String curUser;
+ private StorageContainerManager scm;
+ private KeySpaceManager ksm;
+
+ private static String clusterId;
+ private static String scmId;
+ private static String ksmId;
@Before
public void init() {
@@ -71,6 +91,10 @@ public final class TestSecureOzoneCluster {
startMiniKdc();
setSecureConfig(conf);
createCredentialsInKDC(conf, miniKdc);
+
+ clusterId = UUID.randomUUID().toString();
+ scmId = UUID.randomUUID().toString();
+ ksmId = UUID.randomUUID().toString();
} catch (IOException e) {
LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
} catch (Exception e) {
@@ -78,12 +102,30 @@ public final class TestSecureOzoneCluster {
}
}
+ @After
+ public void stop() {
+ try {
+ stopMiniKdc();
+ if (scm != null) {
+ scm.stop();
+ }
+ if (ksm != null) {
+ ksm.stop();
+ }
+ } catch (Exception e) {
+ LOGGER.error("Failed to stop TestSecureOzoneCluster", e);
+ }
+ }
+
private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
throws Exception {
createPrincipal(scmKeytab,
conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
createPrincipal(spnegoKeytab,
- conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+ conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
+ conf.get(KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL));
+ createPrincipal(ksmKeyTab,
+ conf.get(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY));
}
private void createPrincipal(File keytab, String... principal)
@@ -99,6 +141,10 @@ public final class TestSecureOzoneCluster {
miniKdc.start();
}
+ private void stopMiniKdc() throws Exception {
+ miniKdc.stop();
+ }
+
private void setSecureConfig(Configuration conf) throws IOException {
conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
String host = KerberosUtil.getLocalHostName();
@@ -114,59 +160,56 @@ public final class TestSecureOzoneCluster {
conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
"HTTP_SCM/" + host + "@" + realm);
+ conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY,
+ "ksm/" + host + "@" + realm);
+ conf.set(KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL,
+ "HTTP_KSM/" + host + "@" + realm);
+
scmKeytab = new File(workDir, "scm.keytab");
spnegoKeytab = new File(workDir, "http.keytab");
+ ksmKeyTab = new File(workDir, "ksm.keytab");
conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
scmKeytab.getAbsolutePath());
conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
spnegoKeytab.getAbsolutePath());
+ conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY,
+ ksmKeyTab.getAbsolutePath());
}
@Test
public void testSecureScmStartupSuccess() throws Exception {
+
+ initSCM();
+ scm = StorageContainerManager.createSCM(null, conf);
+ //Reads the SCM Info from SCM instance
+ ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
+ Assert.assertEquals(clusterId, scmInfo.getClusterId());
+ Assert.assertEquals(scmId, scmInfo.getScmId());
+ }
+
+ private void initSCM()
+ throws IOException, AuthenticationException {
final String path = GenericTestUtils
.getTempPath(UUID.randomUUID().toString());
Path scmPath = Paths.get(path, "scm-meta");
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
SCMStorage scmStore = new SCMStorage(conf);
- String clusterId = UUID.randomUUID().toString();
- String scmId = UUID.randomUUID().toString();
scmStore.setClusterId(clusterId);
scmStore.setScmId(scmId);
// writes the version file properties
scmStore.initialize();
- StorageContainerManager scm = StorageContainerManager.createSCM(null, conf);
- //Reads the SCM Info from SCM instance
- ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
- Assert.assertEquals(clusterId, scmInfo.getClusterId());
- Assert.assertEquals(scmId, scmInfo.getScmId());
}
@Test
public void testSecureScmStartupFailure() throws Exception {
- final String path = GenericTestUtils
- .getTempPath(UUID.randomUUID().toString());
- Path scmPath = Paths.get(path, "scm-meta");
-
- OzoneConfiguration conf = new OzoneConfiguration();
- conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
- conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
- conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
- conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
- "scm@" + miniKdc.getRealm());
+ initSCM();
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
- SCMStorage scmStore = new SCMStorage(conf);
- String clusterId = UUID.randomUUID().toString();
- String scmId = UUID.randomUUID().toString();
- scmStore.setClusterId(clusterId);
- scmStore.setScmId(scmId);
- // writes the version file properties
- scmStore.initialize();
LambdaTestUtils.intercept(IOException.class,
"Running in secure mode, but config doesn't have a keytab",
() -> {
@@ -178,28 +221,82 @@ public final class TestSecureOzoneCluster {
conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
"/etc/security/keytabs/scm.keytab");
+ testCommonKerberosFailures(
+ () -> StorageContainerManager.createSCM(null, conf));
+
+ }
+
+ private void testCommonKerberosFailures(Callable callable) throws Exception {
LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
- + "to login: for principal:",
- () -> {
- StorageContainerManager.createSCM(null, conf);
- });
+ + "to login: for principal:", callable);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"OAuth2");
LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
+ " attribute value for hadoop.security.authentication of OAuth2",
- () -> {
- StorageContainerManager.createSCM(null, conf);
- });
+ callable);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"KERBEROS_SSL");
LambdaTestUtils.intercept(AuthenticationException.class,
- "KERBEROS_SSL authentication method not support.",
- () -> {
- StorageContainerManager.createSCM(null, conf);
- });
+ "KERBEROS_SSL authentication method not",
+ callable);
+ }
+ /**
+ * Tests the secure KSM Initialization Failure.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testSecureKsmInitializationFailure() throws Exception {
+ initSCM();
+ // Create a secure SCM instance as ksm client will connect to it
+ scm = StorageContainerManager.createSCM(null, conf);
+
+ final String path = GenericTestUtils
+ .getTempPath(UUID.randomUUID().toString());
+ KSMStorage ksmStore = new KSMStorage(conf);
+ ksmStore.setClusterId("testClusterId");
+ ksmStore.setScmId("testScmId");
+ // writes the version file properties
+ ksmStore.initialize();
+ conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY,
+ "non-existent-user@EXAMPLE.com");
+ testCommonKerberosFailures(() -> KeySpaceManager.createKSM(null, conf));
+ }
+
+ /**
+ * Tests the secure KSM Initialization success.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testSecureKsmInitializationSuccess() throws Exception {
+ initSCM();
+ // Create a secure SCM instance as ksm client will connect to it
+ scm = StorageContainerManager.createSCM(null, conf);
+ LogCapturer logs = LogCapturer.captureLogs(KeySpaceManager.LOG);
+ GenericTestUtils
+ .setLogLevel(LoggerFactory.getLogger(KeySpaceManager.class.getName()),
+ org.slf4j.event.Level.INFO);
+
+ final String path = GenericTestUtils
+ .getTempPath(UUID.randomUUID().toString());
+ Path metaDirPath = Paths.get(path, "ksm-meta");
+
+ KSMStorage ksmStore = new KSMStorage(conf);
+ ksmStore.setClusterId("testClusterId");
+ ksmStore.setScmId("testScmId");
+ // writes the version file properties
+ ksmStore.initialize();
+ try {
+ ksm = KeySpaceManager.createKSM(null, conf);
+ } catch (Exception ex) {
+ // Expects timeout failure from scmClient in KSM but KSM user login via
+ // kerberos should succeed
+ Assert.assertTrue(logs.getOutput().contains("KSM login successful"));
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/729465e2/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
index dc8fc91..be747d2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.common.Storage.StorageState;
import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
@@ -60,7 +61,10 @@ import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
import org.apache.hadoop.hdds.scm.protocolPB
.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils;
@@ -84,6 +88,8 @@ import java.util.List;
import java.util.Map;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
.OZONE_KSM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
@@ -103,7 +109,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
public final class KeySpaceManager extends ServiceRuntimeInfoImpl
implements KeySpaceManagerProtocol, KSMMXBean {
- private static final Logger LOG =
+ public static final Logger LOG =
LoggerFactory.getLogger(KeySpaceManager.class);
private static final String USAGE =
@@ -154,8 +160,8 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
private KeySpaceManager(OzoneConfiguration conf) throws IOException {
Preconditions.checkNotNull(conf);
configuration = conf;
+
ksmStorage = new KSMStorage(conf);
- scmBlockClient = getScmBlockClient(configuration);
scmContainerClient = getScmContainerClient(configuration);
if (ksmStorage.getState() != StorageState.INITIALIZED) {
throw new KSMException("KSM not initialized.",
@@ -163,6 +169,7 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
}
// verifies that the SCM info in the KSM Version file is correct.
+ scmBlockClient = getScmBlockClient(configuration);
ScmInfo scmInfo = scmBlockClient.getScmInfo();
if (!(scmInfo.getClusterId().equals(ksmStorage.getClusterID()) && scmInfo
.getScmId().equals(ksmStorage.getScmId()))) {
@@ -195,6 +202,34 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
}
/**
+ * Login KSM service user if security and Kerberos are enabled.
+ *
+ * @param conf
+ * @throws IOException, AuthenticationException
+ */
+ private static void loginKSMUser(OzoneConfiguration conf)
+ throws IOException, AuthenticationException {
+
+ if (SecurityUtil.getAuthenticationMethod(conf).equals
+ (AuthenticationMethod.KERBEROS)) {
+ LOG.debug("Ozone security is enabled. Attempting login for KSM user. "
+ + "Principal: {},keytab: {}", conf.get(HDDS_KSM_KERBEROS_PRINCIPAL_KEY),
+ conf.get(HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY));
+
+ UserGroupInformation.setConfiguration(conf);
+
+ InetSocketAddress socAddr = getKsmAddress(conf);
+ SecurityUtil.login(conf, HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY,
+ HDDS_KSM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+ } else {
+ throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
+ (conf) + " authentication method not supported. KSM user login "
+ + "failed.");
+ }
+ LOG.info("KSM login successful.");
+ }
+
+ /**
* Create a scm block client, used by putKey() and getKey().
*
* @return {@link ScmBlockLocationProtocol}
@@ -338,7 +373,7 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
*/
public static KeySpaceManager createKSM(String[] argv,
- OzoneConfiguration conf) throws IOException {
+ OzoneConfiguration conf) throws IOException, AuthenticationException {
if (!isHddsEnabled(conf)) {
System.err.println("KSM cannot be started in secure mode or when " +
OZONE_ENABLED + " is set to false");
@@ -350,6 +385,10 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
terminate(1);
return null;
}
+ // Authenticate KSM if security is enabled
+ if (conf.getBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true)) {
+ loginKSMUser(conf);
+ }
switch (startOpt) {
case CREATEOBJECTSTORE:
terminate(ksmInit(conf) ? 0 : 1);
@@ -444,7 +483,13 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
metadataManager.start();
keyManager.start();
ksmRpcServer.start();
- httpServer.start();
+ try {
+ httpServer.start();
+ } catch (Exception ex) {
+ // Allow KSM to start as Http Server failure is not fatal.
+ LOG.error("KSM HttpServer failed to start.", ex);
+ }
+
registerMXBean();
setStartTime();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/729465e2/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
index 478804b..a0d15b3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.ksm;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.server.BaseHttpServer;
@@ -65,11 +64,11 @@ public class KeySpaceManagerHttpServer extends BaseHttpServer {
}
@Override protected String getKeytabFile() {
- return KSMConfigKeys.OZONE_KSM_KEYTAB_FILE;
+ return KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE;
}
@Override protected String getSpnegoPrincipal() {
- return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+ return KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
}
@Override protected String getEnabledKey() {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[24/45] hadoop git commit: HDDS-178: DN should update transactionId
on block delete. Contributed by Lokesh Jain.
Posted by xy...@apache.org.
HDDS-178: DN should update transactionId on block delete. Contributed by Lokesh Jain.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4d7227a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4d7227a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4d7227a
Branch: refs/heads/HDDS-4
Commit: e4d7227aad586f055b47bdc90c65361f9fb23146
Parents: 852ca6b
Author: Nanda kumar <na...@apache.org>
Authored: Fri Jun 29 04:37:16 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Fri Jun 29 04:37:16 2018 +0530
----------------------------------------------------------------------
.../common/impl/ContainerManagerImpl.java | 7 +-
.../common/interfaces/ContainerManager.java | 2 +
.../DeleteBlocksCommandHandler.java | 29 ++-
.../commandhandler/TestBlockDeletion.java | 211 +++++++++++++++++++
4 files changed, 243 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4d7227a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 02572a8..e81f1c6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -1111,5 +1111,10 @@ public class ContainerManagerImpl implements ContainerManager {
return cData.getKeyCount();
}
-
+ @Override
+ public void updateDeleteTransactionId(long containerId,
+ long deleteTransactionId) {
+ containerMap.get(containerId)
+ .updateDeleteTransactionId(deleteTransactionId);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4d7227a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
index 49b68dc..cf68b08 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
@@ -264,4 +264,6 @@ public interface ContainerManager extends RwLock {
*/
long getNumKeys(long containerId);
+ void updateDeleteTransactionId(long containerId, long deleteTransactionId);
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4d7227a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index f954d98..d215da9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -158,6 +158,13 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
containerInfo.getDBPath());
}
+ if (delTX.getTxID() < containerInfo.getDeleteTransactionId()) {
+ LOG.debug(String.format("Ignoring delete blocks for containerId: %d."
+ + " Outdated delete transactionId %d < %d", containerId,
+ delTX.getTxID(), containerInfo.getDeleteTransactionId()));
+ return;
+ }
+
int newDeletionBlocks = 0;
MetadataStore containerDB = KeyUtils.getDB(containerInfo, config);
for (Long blk : delTX.getLocalIDList()) {
@@ -165,10 +172,20 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
byte[] blkBytes = Longs.toByteArray(blk);
byte[] blkInfo = containerDB.get(blkBytes);
if (blkInfo != null) {
+ byte[] deletingKeyBytes =
+ DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk);
+ byte[] deletedKeyBytes =
+ DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk);
+ if (containerDB.get(deletingKeyBytes) != null
+ || containerDB.get(deletedKeyBytes) != null) {
+ LOG.debug(String.format(
+ "Ignoring delete for block %d in container %d."
+ + " Entry already added.", blk, containerId));
+ continue;
+ }
// Found the block in container db,
// use an atomic update to change its state to deleting.
- batch.put(DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk),
- blkInfo);
+ batch.put(deletingKeyBytes, blkInfo);
batch.delete(blkBytes);
try {
containerDB.writeBatch(batch);
@@ -186,11 +203,13 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
LOG.debug("Block {} not found or already under deletion in"
+ " container {}, skip deleting it.", blk, containerId);
}
- containerDB.put(DFSUtil.string2Bytes(
- OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX + delTX.getContainerID()),
- Longs.toByteArray(delTX.getTxID()));
}
+ containerDB.put(DFSUtil.string2Bytes(
+ OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX + delTX.getContainerID()),
+ Longs.toByteArray(delTX.getTxID()));
+ containerManager
+ .updateDeleteTransactionId(delTX.getContainerID(), delTX.getTxID());
// update pending deletion blocks count in in-memory container status
containerManager.incrPendingDeletionBlocks(newDeletionBlocks, containerId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4d7227a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
new file mode 100644
index 0000000..43e3f50
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -0,0 +1,211 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
+
+import com.google.common.primitives.Longs;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
+import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
+import org.apache.hadoop.ozone.ksm.KeySpaceManager;
+import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
+import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.ozShell.TestOzoneShell;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.utils.MetadataStore;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+
+public class TestBlockDeletion {
+ private static OzoneConfiguration conf = null;
+ private static ObjectStore store;
+ private static ContainerManagerImpl dnContainerManager = null;
+ private static StorageContainerManager scm = null;
+ private static KeySpaceManager ksm = null;
+ private static Set<Long> containerIdsWithDeletedBlocks;
+
+ @BeforeClass
+ public static void init() throws Exception {
+ conf = new OzoneConfiguration();
+
+ String path =
+ GenericTestUtils.getTempPath(TestOzoneShell.class.getSimpleName());
+ File baseDir = new File(path);
+ baseDir.mkdirs();
+
+ path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+ OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
+
+ conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
+ conf.setQuietMode(false);
+ conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
+ TimeUnit.MILLISECONDS);
+
+ MiniOzoneCluster cluster =
+ MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
+ cluster.waitForClusterToBeReady();
+ store = OzoneClientFactory.getRpcClient(conf).getObjectStore();
+ dnContainerManager =
+ (ContainerManagerImpl) cluster.getHddsDatanodes().get(0)
+ .getDatanodeStateMachine().getContainer().getContainerManager();
+ ksm = cluster.getKeySpaceManager();
+ scm = cluster.getStorageContainerManager();
+ containerIdsWithDeletedBlocks = new HashSet<>();
+ }
+
+ @Test(timeout = 60000)
+ public void testBlockDeletion()
+ throws IOException, InterruptedException {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+
+ String value = RandomStringUtils.random(1000000);
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+
+ String keyName = UUID.randomUUID().toString();
+
+ OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length,
+ ReplicationType.STAND_ALONE, ReplicationFactor.ONE);
+ out.write(value.getBytes());
+ out.close();
+
+ KsmKeyArgs keyArgs = new KsmKeyArgs.Builder().setVolumeName(volumeName)
+ .setBucketName(bucketName).setKeyName(keyName).setDataSize(0)
+ .setType(HddsProtos.ReplicationType.STAND_ALONE)
+ .setFactor(HddsProtos.ReplicationFactor.ONE).build();
+ List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroupList =
+ ksm.lookupKey(keyArgs).getKeyLocationVersions();
+
+ // verify key blocks were created in DN.
+ Assert.assertTrue(verifyBlocksCreated(ksmKeyLocationInfoGroupList));
+ // No containers with deleted blocks
+ Assert.assertTrue(containerIdsWithDeletedBlocks.isEmpty());
+ // Delete transactionIds for the containers should be 0
+ matchContainerTransactionIds();
+ ksm.deleteKey(keyArgs);
+ Thread.sleep(5000);
+ // The blocks should be deleted in the DN.
+ Assert.assertTrue(verifyBlocksDeleted(ksmKeyLocationInfoGroupList));
+
+ // Few containers with deleted blocks
+ Assert.assertTrue(!containerIdsWithDeletedBlocks.isEmpty());
+ // Containers in the DN and SCM should have same delete transactionIds
+ matchContainerTransactionIds();
+ }
+
+ private void matchContainerTransactionIds() throws IOException {
+ List<ContainerData> containerDataList = new ArrayList<>();
+ dnContainerManager.listContainer(0, 10000, containerDataList);
+ for (ContainerData containerData : containerDataList) {
+ long containerId = containerData.getContainerID();
+ if (containerIdsWithDeletedBlocks.contains(containerId)) {
+ Assert.assertTrue(
+ scm.getContainerInfo(containerId).getDeleteTransactionId() > 0);
+ } else {
+ Assert.assertEquals(
+ scm.getContainerInfo(containerId).getDeleteTransactionId(), 0);
+ }
+ Assert.assertEquals(dnContainerManager.readContainer(containerId)
+ .getDeleteTransactionId(),
+ scm.getContainerInfo(containerId).getDeleteTransactionId());
+ }
+ }
+
+ private boolean verifyBlocksCreated(
+ List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups)
+ throws IOException {
+ return performOperationOnKeyContainers((blockID) -> {
+ try {
+ MetadataStore db = KeyUtils.getDB(
+ dnContainerManager.getContainerMap().get(blockID.getContainerID()),
+ conf);
+ Assert.assertNotNull(db.get(Longs.toByteArray(blockID.getLocalID())));
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }, ksmKeyLocationInfoGroups);
+ }
+
+ private boolean verifyBlocksDeleted(
+ List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups)
+ throws IOException {
+ return performOperationOnKeyContainers((blockID) -> {
+ try {
+ MetadataStore db = KeyUtils.getDB(
+ dnContainerManager.getContainerMap().get(blockID.getContainerID()),
+ conf);
+ Assert.assertNull(db.get(Longs.toByteArray(blockID.getLocalID())));
+ Assert.assertNull(db.get(DFSUtil.string2Bytes(
+ OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID())));
+ Assert.assertNotNull(DFSUtil.string2Bytes(
+ OzoneConsts.DELETED_KEY_PREFIX + blockID.getLocalID()));
+ containerIdsWithDeletedBlocks.add(blockID.getContainerID());
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }, ksmKeyLocationInfoGroups);
+ }
+
+ private boolean performOperationOnKeyContainers(Consumer<BlockID> consumer,
+ List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups)
+ throws IOException {
+
+ try {
+ for (KsmKeyLocationInfoGroup ksmKeyLocationInfoGroup : ksmKeyLocationInfoGroups) {
+ List<KsmKeyLocationInfo> ksmKeyLocationInfos =
+ ksmKeyLocationInfoGroup.getLocationList();
+ for (KsmKeyLocationInfo ksmKeyLocationInfo : ksmKeyLocationInfos) {
+ BlockID blockID = ksmKeyLocationInfo.getBlockID();
+ consumer.accept(blockID);
+ }
+ }
+ } catch (Error e) {
+ e.printStackTrace();
+ return false;
+ }
+ return true;
+ }
+}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[07/45] hadoop git commit: HDDS-194. Remove NodePoolManager and node
pool handling from SCM. Contributed by Elek Marton
Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index 2bd43fb..edc0d7b 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.scm.cli;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
-import com.google.protobuf.ByteString;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
@@ -60,13 +59,11 @@ import java.sql.Statement;
import java.util.HashSet;
import java.util.Set;
-import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
/**
@@ -111,15 +108,6 @@ public class SQLCLI extends Configured implements Tool {
private static final String INSERT_CONTAINER_MEMBERS =
"INSERT INTO containerMembers (containerName, datanodeUUID) " +
"VALUES (\"%s\", \"%s\")";
- // for nodepool.db
- private static final String CREATE_NODE_POOL =
- "CREATE TABLE nodePool (" +
- "datanodeUUID TEXT NOT NULL," +
- "poolName TEXT NOT NULL," +
- "PRIMARY KEY(datanodeUUID, poolName))";
- private static final String INSERT_NODE_POOL =
- "INSERT INTO nodePool (datanodeUUID, poolName) " +
- "VALUES (\"%s\", \"%s\")";
// and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO
// for openContainer.db
private static final String CREATE_OPEN_CONTAINER =
@@ -285,9 +273,6 @@ public class SQLCLI extends Configured implements Tool {
if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) {
LOG.info("Converting container DB");
convertContainerDB(dbPath, outPath);
- } else if (dbName.toString().equals(NODEPOOL_DB)) {
- LOG.info("Converting node pool DB");
- convertNodePoolDB(dbPath, outPath);
} else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
LOG.info("Converting open container DB");
convertOpenContainerDB(dbPath, outPath);
@@ -543,66 +528,7 @@ public class SQLCLI extends Configured implements Tool {
}
LOG.info("Insertion completed.");
}
- /**
- * Converts nodePool.db to sqlite. The schema of sql db:
- * two tables, nodePool and datanodeInfo (the same datanode Info as for
- * container.db).
- *
- * nodePool
- * ---------------------------------------------------------
- * datanodeUUID* | poolName*
- * ---------------------------------------------------------
- *
- * datanodeInfo:
- * ---------------------------------------------------------
- * hostname | datanodeUUid* | xferPort | ipcPort
- * ---------------------------------------------------------
- *
- * --------------------------------
- * |containerPort
- * --------------------------------
- *
- * @param dbPath path to container db.
- * @param outPath path to output sqlite
- * @throws IOException throws exception.
- */
- private void convertNodePoolDB(Path dbPath, Path outPath) throws Exception {
- LOG.info("Create table for sql node pool db.");
- File dbFile = dbPath.toFile();
- try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
- .setConf(conf).setDbFile(dbFile).build();
- Connection conn = connectDB(outPath.toString())) {
- executeSQL(conn, CREATE_NODE_POOL);
- executeSQL(conn, CREATE_DATANODE_INFO);
- dbStore.iterate(null, (key, value) -> {
- DatanodeDetails nodeId = DatanodeDetails
- .getFromProtoBuf(HddsProtos.DatanodeDetailsProto
- .PARSER.parseFrom(key));
- String blockPool = DFSUtil.bytes2String(value);
- try {
- insertNodePoolDB(conn, blockPool, nodeId);
- return true;
- } catch (SQLException e) {
- throw new IOException(e);
- }
- });
- }
- }
-
- private void insertNodePoolDB(Connection conn, String blockPool,
- DatanodeDetails datanodeDetails) throws SQLException {
- String insertNodePool = String.format(INSERT_NODE_POOL,
- datanodeDetails.getUuidString(), blockPool);
- executeSQL(conn, insertNodePool);
-
- String insertDatanodeDetails = String
- .format(INSERT_DATANODE_INFO, datanodeDetails.getHostName(),
- datanodeDetails.getUuidString(), datanodeDetails.getIpAddress(),
- datanodeDetails.getPort(DatanodeDetails.Port.Name.STANDALONE)
- .getValue());
- executeSQL(conn, insertDatanodeDetails);
- }
/**
* Convert openContainer.db to sqlite db file. This is rather simple db,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[45/45] hadoop git commit: Revert "Bad merge with
996a627b289947af3894bf83e7b63ec702a665cd"
Posted by xy...@apache.org.
Revert "Bad merge with 996a627b289947af3894bf83e7b63ec702a665cd"
This reverts commit 996a627b289947af3894bf83e7b63ec702a665cd.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a0db7fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a0db7fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a0db7fb
Branch: refs/heads/HDDS-4
Commit: 3a0db7fb838d3e599b3f686e87d707cad96530c3
Parents: 0a4ec99
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue May 15 16:56:24 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Jul 2 13:19:02 2018 -0700
----------------------------------------------------------------------
hadoop-hdds/common/src/main/resources/ozone-default.xml | 12 ++++++++++++
1 file changed, 12 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a0db7fb/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index b469e93..4ada591 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -129,6 +129,18 @@
</description>
</property>
<property>
+ <name>dfs.ratis.client.request.timeout.duration</name>
+ <value>3s</value>
+ <tag>OZONE, RATIS, MANAGEMENT</tag>
+ <description>The timeout duration for ratis client request.</description>
+ </property>
+ <property>
+ <name>dfs.ratis.server.request.timeout.duration</name>
+ <value>3s</value>
+ <tag>OZONE, RATIS, MANAGEMENT</tag>
+ <description>The timeout duration for ratis server request.</description>
+ </property>
+ <property>
<name>ozone.container.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[38/45] hadoop git commit: HDDS-206. Ozone shell command doesn't
respect KSM port set in ozone-site.xml. Contributed by Shashikant Banerjee.
Posted by xy...@apache.org.
HDDS-206. Ozone shell command doesn't respect KSM port set in ozone-site.xml. Contributed by Shashikant Banerjee.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab2f8343
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab2f8343
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab2f8343
Branch: refs/heads/HDDS-4
Commit: ab2f8343a9810c9ec8b0920215a0897e7f671aba
Parents: 1804a31
Author: Nanda kumar <na...@apache.org>
Authored: Tue Jul 3 00:51:16 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Tue Jul 3 00:51:16 2018 +0530
----------------------------------------------------------------------
.../hadoop/ozone/client/OzoneClientFactory.java | 13 ++++++-----
.../java/org/apache/hadoop/ozone/KsmUtils.java | 24 +++++++++++++-------
2 files changed, 23 insertions(+), 14 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab2f8343/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index dae94aa..3085b0d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.client;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.KsmUtils;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.client.rest.RestClient;
import org.apache.hadoop.ozone.client.rpc.RpcClient;
@@ -37,10 +38,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys
.OZONE_CLIENT_PROTOCOL;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
.OZONE_KSM_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
- .OZONE_KSM_HTTP_BIND_PORT_DEFAULT;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_PORT_DEFAULT;
/**
* Factory class to create different types of OzoneClients.
@@ -108,8 +106,9 @@ public final class OzoneClientFactory {
*/
public static OzoneClient getRpcClient(String ksmHost)
throws IOException {
- return getRpcClient(ksmHost, OZONE_KSM_PORT_DEFAULT,
- new OzoneConfiguration());
+ Configuration config = new OzoneConfiguration();
+ int port = KsmUtils.getKsmRpcPort(config);
+ return getRpcClient(ksmHost, port, config);
}
/**
@@ -185,7 +184,9 @@ public final class OzoneClientFactory {
*/
public static OzoneClient getRestClient(String ksmHost)
throws IOException {
- return getRestClient(ksmHost, OZONE_KSM_HTTP_BIND_PORT_DEFAULT);
+ Configuration config = new OzoneConfiguration();
+ int port = KsmUtils.getKsmRestPort(config);
+ return getRestClient(ksmHost, port, config);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab2f8343/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java
index ebada1c..1025963 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java
@@ -26,6 +26,8 @@ import com.google.common.base.Optional;
import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_HTTP_BIND_PORT_DEFAULT;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
.OZONE_KSM_BIND_HOST_DEFAULT;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_PORT_DEFAULT;
@@ -49,13 +51,9 @@ public final class KsmUtils {
final Optional<String> host = getHostNameFromConfigKeys(conf,
OZONE_KSM_ADDRESS_KEY);
- // If no port number is specified then we'll just try the defaultBindPort.
- final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
- OZONE_KSM_ADDRESS_KEY);
-
return NetUtils.createSocketAddr(
host.or(OZONE_KSM_BIND_HOST_DEFAULT) + ":" +
- port.or(OZONE_KSM_PORT_DEFAULT));
+ getKsmRpcPort(conf));
}
/**
@@ -76,12 +74,22 @@ public final class KsmUtils {
" details on configuring Ozone.");
}
+ return NetUtils.createSocketAddr(
+ host.get() + ":" + getKsmRpcPort(conf));
+ }
+
+ public static int getKsmRpcPort(Configuration conf) {
// If no port number is specified then we'll just try the defaultBindPort.
final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
OZONE_KSM_ADDRESS_KEY);
-
- return NetUtils.createSocketAddr(
- host.get() + ":" + port.or(OZONE_KSM_PORT_DEFAULT));
+ return port.or(OZONE_KSM_PORT_DEFAULT);
}
+ public static int getKsmRestPort(Configuration conf) {
+ // If no port number is specified then we'll just try the default
+ // HTTP BindPort.
+ final Optional<Integer> port =
+ getPortNumberFromConfigKeys(conf, OZONE_KSM_HTTP_ADDRESS_KEY);
+ return port.or(OZONE_KSM_HTTP_BIND_PORT_DEFAULT);
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[08/45] hadoop git commit: HDDS-194. Remove NodePoolManager and node
pool handling from SCM. Contributed by Elek Marton
Posted by xy...@apache.org.
HDDS-194. Remove NodePoolManager and node pool handling from SCM. Contributed by Elek Marton
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aaf03cc4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aaf03cc4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aaf03cc4
Branch: refs/heads/HDDS-4
Commit: aaf03cc459a34af284f9735453aefd4ddb430d67
Parents: fbaff36
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jun 27 12:39:15 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jun 27 12:39:15 2018 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdds/scm/ScmConfigKeys.java | 26 --
.../org/apache/hadoop/ozone/OzoneConsts.java | 1 -
.../common/src/main/resources/ozone-default.xml | 47 ---
.../container/replication/ReplicationQueue.java | 78 +++++
.../replication/ReplicationReqMsg.java | 107 ++++++
.../container/replication/package-info.java | 23 ++
.../replication/TestReplicationQueue.java | 134 ++++++++
.../container/replication/package-info.java | 23 ++
.../hdds/scm/container/ContainerMapping.java | 10 +-
.../replication/ContainerSupervisor.java | 340 -------------------
.../container/replication/InProgressPool.java | 255 --------------
.../scm/container/replication/PeriodicPool.java | 119 -------
.../scm/container/replication/package-info.java | 23 --
.../hadoop/hdds/scm/node/NodeManager.java | 6 -
.../hadoop/hdds/scm/node/NodePoolManager.java | 71 ----
.../hadoop/hdds/scm/node/SCMNodeManager.java | 23 --
.../hdds/scm/node/SCMNodePoolManager.java | 269 ---------------
.../hdds/scm/container/MockNodeManager.java | 6 -
.../hdds/scm/node/TestSCMNodePoolManager.java | 160 ---------
.../testutils/ReplicationNodeManagerMock.java | 5 -
.../ReplicationNodePoolManagerMock.java | 133 --------
.../hadoop/ozone/scm/TestContainerSQLCli.java | 31 --
.../org/apache/hadoop/ozone/scm/cli/SQLCLI.java | 74 ----
23 files changed, 368 insertions(+), 1596 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 85407e6..df6fbf0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -243,32 +243,6 @@ public final class ScmConfigKeys {
public static final String
OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
- /**
- * Don't start processing a pool if we have not had a minimum number of
- * seconds from the last processing.
- */
- public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL =
- "ozone.scm.container.report.processing.interval";
- public static final String
- OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s";
-
- /**
- * This determines the total number of pools to be processed in parallel.
- */
- public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS =
- "ozone.scm.max.nodepool.processing.threads";
- public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1;
- /**
- * These 2 settings control the number of threads in executor pool and time
- * outs for thw container reports from all nodes.
- */
- public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS =
- "ozone.scm.max.container.report.threads";
- public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100;
- public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT =
- "ozone.scm.container.reports.wait.timeout";
- public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
- "5m";
public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
"ozone.scm.block.deletion.max.retry";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index c40dc8e..08a5ffd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -91,7 +91,6 @@ public final class OzoneConsts {
public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
public static final String BLOCK_DB = "block.db";
- public static final String NODEPOOL_DB = "nodepool.db";
public static final String OPEN_CONTAINERS_DB = "openContainers.db";
public static final String DELETED_BLOCK_DB = "deletedBlock.db";
public static final String KSM_DB_NAME = "ksm.db";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 7a91610..25365c8 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -572,25 +572,6 @@
</description>
</property>
<property>
- <name>ozone.scm.container.report.processing.interval</name>
- <value>60s</value>
- <tag>OZONE, PERFORMANCE</tag>
- <description>Time interval for scm to process container reports
- for a node pool. Scm handles node pool reports in a cyclic clock
- manner, it fetches pools periodically with this time interval.
- </description>
- </property>
- <property>
- <name>ozone.scm.container.reports.wait.timeout</name>
- <value>300s</value>
- <tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
- <description>Maximum time to wait in seconds for processing all container
- reports from
- a node pool. It determines the timeout for a
- node pool report.
- </description>
- </property>
- <property>
<name>ozone.scm.container.size.gb</name>
<value>5</value>
<tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
@@ -793,17 +774,6 @@
</description>
</property>
<property>
- <name>ozone.scm.max.container.report.threads</name>
- <value>100</value>
- <tag>OZONE, PERFORMANCE</tag>
- <description>
- Maximum number of threads to process container reports in scm.
- Each container report from a data node is processed by scm in a worker
- thread, fetched from a thread pool. This property is used to control the
- maximum size of the thread pool.
- </description>
- </property>
- <property>
<name>ozone.scm.max.hb.count.to.process</name>
<value>5000</value>
<tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
@@ -815,14 +785,6 @@
</description>
</property>
<property>
- <name>ozone.scm.max.nodepool.processing.threads</name>
- <value>1</value>
- <tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
- <description>
- Number of node pools to process in parallel.
- </description>
- </property>
- <property>
<name>ozone.scm.names</name>
<value/>
<tag>OZONE</tag>
@@ -844,15 +806,6 @@
</description>
</property>
<property>
- <name>ozone.scm.max.nodepool.processing.threads</name>
- <value>1</value>
- <tag>OZONE, SCM</tag>
- <description>
- Controls the number of node pools that can be processed in parallel by
- Container Supervisor.
- </description>
- </property>
- <property>
<name>ozone.trace.enabled</name>
<value>false</value>
<tag>OZONE, DEBUG</tag>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
new file mode 100644
index 0000000..b83ecf1
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.util.List;
+import java.util.PriorityQueue;
+import java.util.Queue;
+
+/**
+ * Priority queue to handle under-replicated and over replicated containers
+ * in ozone. ReplicationManager will consume these messages and decide
+ * accordingly.
+ */
+public class ReplicationQueue {
+
+ private final Queue<ReplicationReqMsg> queue;
+
+ ReplicationQueue() {
+ queue = new PriorityQueue<>();
+ }
+
+ public synchronized boolean add(ReplicationReqMsg repObj) {
+ if (this.queue.contains(repObj)) {
+ // Remove the earlier message and insert this one
+ this.queue.remove(repObj);
+ return this.queue.add(repObj);
+ } else {
+ return this.queue.add(repObj);
+ }
+ }
+
+ public synchronized boolean remove(ReplicationReqMsg repObj) {
+ return queue.remove(repObj);
+ }
+
+ /**
+ * Retrieves, but does not remove, the head of this queue,
+ * or returns {@code null} if this queue is empty.
+ *
+ * @return the head of this queue, or {@code null} if this queue is empty
+ */
+ public synchronized ReplicationReqMsg peek() {
+ return queue.peek();
+ }
+
+ /**
+ * Retrieves and removes the head of this queue,
+ * or returns {@code null} if this queue is empty.
+ *
+ * @return the head of this queue, or {@code null} if this queue is empty
+ */
+ public synchronized ReplicationReqMsg poll() {
+ return queue.poll();
+ }
+
+ public synchronized boolean removeAll(List<ReplicationReqMsg> repObjs) {
+ return queue.removeAll(repObjs);
+ }
+
+ public int size() {
+ return queue.size();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java
new file mode 100644
index 0000000..8d26fc3
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.io.Serializable;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.commons.lang3.math.NumberUtils;
+
+/**
+ * Wrapper class for hdds replication queue. Implements its natural
+ * ordering for priority queue.
+ */
+public class ReplicationReqMsg implements Comparable<ReplicationReqMsg>,
+ Serializable {
+ private final long containerId;
+ private final short replicationCount;
+ private final short expecReplicationCount;
+ private final long timestamp;
+
+ public ReplicationReqMsg(long containerId, short replicationCount,
+ long timestamp, short expecReplicationCount) {
+ this.containerId = containerId;
+ this.replicationCount = replicationCount;
+ this.timestamp = timestamp;
+ this.expecReplicationCount = expecReplicationCount;
+ }
+
+ /**
+ * Compares this object with the specified object for order. Returns a
+ * negative integer, zero, or a positive integer as this object is less
+ * than, equal to, or greater than the specified object.
+ * @param o the object to be compared.
+ * @return a negative integer, zero, or a positive integer as this object
+ * is less than, equal to, or greater than the specified object.
+ * @throws NullPointerException if the specified object is null
+ * @throws ClassCastException if the specified object's type prevents it
+ * from being compared to this object.
+ */
+ @Override
+ public int compareTo(ReplicationReqMsg o) {
+ if (this == o) {
+ return 0;
+ }
+ if (o == null) {
+ return 1;
+ }
+ int retVal = NumberUtils
+ .compare(getReplicationCount() - getExpecReplicationCount(),
+ o.getReplicationCount() - o.getExpecReplicationCount());
+ if (retVal != 0) {
+ return retVal;
+ }
+ return NumberUtils.compare(getTimestamp(), o.getTimestamp());
+ }
+
+ @Override
+ public int hashCode() {
+ return new HashCodeBuilder(91, 1011)
+ .append(getContainerId())
+ .toHashCode();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ ReplicationReqMsg that = (ReplicationReqMsg) o;
+ return new EqualsBuilder().append(getContainerId(), that.getContainerId())
+ .isEquals();
+ }
+
+ public long getContainerId() {
+ return containerId;
+ }
+
+ public short getReplicationCount() {
+ return replicationCount;
+ }
+
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ public short getExpecReplicationCount() {
+ return expecReplicationCount;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
new file mode 100644
index 0000000..7f335e3
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.replication;
+
+/**
+ * Ozone Container replicaton related classes.
+ */
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
new file mode 100644
index 0000000..39c61d3
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.util.Random;
+import java.util.UUID;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test class for ReplicationQueue.
+ */
+public class TestReplicationQueue {
+
+ private ReplicationQueue replicationQueue;
+ private Random random;
+
+ @Before
+ public void setUp() {
+ replicationQueue = new ReplicationQueue();
+ random = new Random();
+ }
+
+ @Test
+ public void testDuplicateAddOp() {
+ long contId = random.nextLong();
+ String nodeId = UUID.randomUUID().toString();
+ ReplicationReqMsg obj1, obj2, obj3;
+ long time = Time.monotonicNow();
+ obj1 = new ReplicationReqMsg(contId, (short) 2, time, (short) 3);
+ obj2 = new ReplicationReqMsg(contId, (short) 2, time + 1, (short) 3);
+ obj3 = new ReplicationReqMsg(contId, (short) 1, time+2, (short) 3);
+
+ replicationQueue.add(obj1);
+ replicationQueue.add(obj2);
+ replicationQueue.add(obj3);
+ Assert.assertEquals("Should add only 1 msg as second one is duplicate",
+ 1, replicationQueue.size());
+ ReplicationReqMsg temp = replicationQueue.poll();
+ Assert.assertEquals(temp, obj3);
+ }
+
+ @Test
+ public void testPollOp() {
+ long contId = random.nextLong();
+ String nodeId = UUID.randomUUID().toString();
+ ReplicationReqMsg msg1, msg2, msg3, msg4, msg5;
+ msg1 = new ReplicationReqMsg(contId, (short) 1, Time.monotonicNow(),
+ (short) 3);
+ long time = Time.monotonicNow();
+ msg2 = new ReplicationReqMsg(contId + 1, (short) 4, time, (short) 3);
+ msg3 = new ReplicationReqMsg(contId + 2, (short) 0, time, (short) 3);
+ msg4 = new ReplicationReqMsg(contId, (short) 2, time, (short) 3);
+ // Replication message for same container but different nodeId
+ msg5 = new ReplicationReqMsg(contId + 1, (short) 2, time, (short) 3);
+
+ replicationQueue.add(msg1);
+ replicationQueue.add(msg2);
+ replicationQueue.add(msg3);
+ replicationQueue.add(msg4);
+ replicationQueue.add(msg5);
+ Assert.assertEquals("Should have 3 objects",
+ 3, replicationQueue.size());
+
+ // Since Priority queue orders messages according to replication count,
+ // message with lowest replication should be first
+ ReplicationReqMsg temp;
+ temp = replicationQueue.poll();
+ Assert.assertEquals("Should have 2 objects",
+ 2, replicationQueue.size());
+ Assert.assertEquals(temp, msg3);
+
+ temp = replicationQueue.poll();
+ Assert.assertEquals("Should have 1 objects",
+ 1, replicationQueue.size());
+ Assert.assertEquals(temp, msg5);
+
+ // Message 2 should be ordered before message 5 as both have same replication
+ // number but message 2 has earlier timestamp.
+ temp = replicationQueue.poll();
+ Assert.assertEquals("Should have 0 objects",
+ replicationQueue.size(), 0);
+ Assert.assertEquals(temp, msg4);
+ }
+
+ @Test
+ public void testRemoveOp() {
+ long contId = random.nextLong();
+ String nodeId = UUID.randomUUID().toString();
+ ReplicationReqMsg obj1, obj2, obj3;
+ obj1 = new ReplicationReqMsg(contId, (short) 1, Time.monotonicNow(),
+ (short) 3);
+ obj2 = new ReplicationReqMsg(contId + 1, (short) 2, Time.monotonicNow(),
+ (short) 3);
+ obj3 = new ReplicationReqMsg(contId + 2, (short) 3, Time.monotonicNow(),
+ (short) 3);
+
+ replicationQueue.add(obj1);
+ replicationQueue.add(obj2);
+ replicationQueue.add(obj3);
+ Assert.assertEquals("Should have 3 objects",
+ 3, replicationQueue.size());
+
+ replicationQueue.remove(obj3);
+ Assert.assertEquals("Should have 2 objects",
+ 2, replicationQueue.size());
+
+ replicationQueue.remove(obj2);
+ Assert.assertEquals("Should have 1 objects",
+ 1, replicationQueue.size());
+
+ replicationQueue.remove(obj1);
+ Assert.assertEquals("Should have 0 objects",
+ 0, replicationQueue.size());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
new file mode 100644
index 0000000..5b1fd0f
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * SCM Testing and Mocking Utils.
+ */
+package org.apache.hadoop.ozone.container.replication;
+// Test classes for Replication functionality.
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index b563e90..9fd30f2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
@@ -80,7 +79,6 @@ public class ContainerMapping implements Mapping {
private final PipelineSelector pipelineSelector;
private final ContainerStateManager containerStateManager;
private final LeaseManager<ContainerInfo> containerLeaseManager;
- private final ContainerSupervisor containerSupervisor;
private final float containerCloseThreshold;
private final ContainerCloser closer;
private final long size;
@@ -127,9 +125,7 @@ public class ContainerMapping implements Mapping {
OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024;
this.containerStateManager =
new ContainerStateManager(conf, this);
- this.containerSupervisor =
- new ContainerSupervisor(conf, nodeManager,
- nodeManager.getNodePoolManager());
+
this.containerCloseThreshold = conf.getFloat(
ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD,
ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT);
@@ -407,8 +403,8 @@ public class ContainerMapping implements Mapping {
throws IOException {
List<StorageContainerDatanodeProtocolProtos.ContainerInfo>
containerInfos = reports.getReportsList();
- containerSupervisor.handleContainerReport(datanodeDetails, reports);
- for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
+
+ for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
containerInfos) {
byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID());
lock.lock();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
deleted file mode 100644
index 5bd0574..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.PriorityQueue;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import static com.google.common.util.concurrent.Uninterruptibles
- .sleepUninterruptibly;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT;
-
-/**
- * This class takes a set of container reports that belong to a pool and then
- * computes the replication levels for each container.
- */
-public class ContainerSupervisor implements Closeable {
- public static final Logger LOG =
- LoggerFactory.getLogger(ContainerSupervisor.class);
-
- private final NodePoolManager poolManager;
- private final HashSet<String> poolNames;
- private final PriorityQueue<PeriodicPool> poolQueue;
- private final NodeManager nodeManager;
- private final long containerProcessingLag;
- private final AtomicBoolean runnable;
- private final ExecutorService executorService;
- private final long maxPoolWait;
- private long poolProcessCount;
- private final List<InProgressPool> inProgressPoolList;
- private final AtomicInteger threadFaultCount;
- private final int inProgressPoolMaxCount;
-
- private final ReadWriteLock inProgressPoolListLock;
-
- /**
- * Returns the number of times we have processed pools.
- * @return long
- */
- public long getPoolProcessCount() {
- return poolProcessCount;
- }
-
-
- /**
- * Constructs a class that computes Replication Levels.
- *
- * @param conf - OzoneConfiguration
- * @param nodeManager - Node Manager
- * @param poolManager - Pool Manager
- */
- public ContainerSupervisor(Configuration conf, NodeManager nodeManager,
- NodePoolManager poolManager) {
- Preconditions.checkNotNull(poolManager);
- Preconditions.checkNotNull(nodeManager);
- this.containerProcessingLag =
- conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL,
- OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT,
- TimeUnit.SECONDS
- ) * 1000;
- int maxContainerReportThreads =
- conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS,
- OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT
- );
- this.maxPoolWait =
- conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT,
- OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT,
- TimeUnit.MILLISECONDS);
- this.inProgressPoolMaxCount = conf.getInt(
- OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS,
- OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT);
- this.poolManager = poolManager;
- this.nodeManager = nodeManager;
- this.poolNames = new HashSet<>();
- this.poolQueue = new PriorityQueue<>();
- this.runnable = new AtomicBoolean(true);
- this.threadFaultCount = new AtomicInteger(0);
- this.executorService = newCachedThreadPool(
- new ThreadFactoryBuilder().setDaemon(true)
- .setNameFormat("Container Reports Processing Thread - %d")
- .build(), maxContainerReportThreads);
- this.inProgressPoolList = new LinkedList<>();
- this.inProgressPoolListLock = new ReentrantReadWriteLock();
-
- initPoolProcessThread();
- }
-
- private ExecutorService newCachedThreadPool(ThreadFactory threadFactory,
- int maxThreads) {
- return new HadoopThreadPoolExecutor(0, maxThreads, 60L, TimeUnit.SECONDS,
- new LinkedBlockingQueue<>(), threadFactory);
- }
-
- /**
- * Returns the number of pools that are under process right now.
- * @return int - Number of pools that are in process.
- */
- public int getInProgressPoolCount() {
- return inProgressPoolList.size();
- }
-
- /**
- * Exits the background thread.
- */
- public void setExit() {
- this.runnable.set(false);
- }
-
- /**
- * Adds or removes pools from names that we need to process.
- *
- * There are two different cases that we need to process.
- * The case where some pools are being added and some times we have to
- * handle cases where pools are removed.
- */
- private void refreshPools() {
- List<String> pools = this.poolManager.getNodePools();
- if (pools != null) {
-
- HashSet<String> removedPools =
- computePoolDifference(this.poolNames, new HashSet<>(pools));
-
- HashSet<String> addedPools =
- computePoolDifference(new HashSet<>(pools), this.poolNames);
- // TODO: Support remove pool API in pool manager so that this code
- // path can be tested. This never happens in the current code base.
- for (String poolName : removedPools) {
- for (PeriodicPool periodicPool : poolQueue) {
- if (periodicPool.getPoolName().compareTo(poolName) == 0) {
- poolQueue.remove(periodicPool);
- }
- }
- }
- // Remove the pool names that we have in the list.
- this.poolNames.removeAll(removedPools);
-
- for (String poolName : addedPools) {
- poolQueue.add(new PeriodicPool(poolName));
- }
-
- // Add to the pool names we are tracking.
- poolNames.addAll(addedPools);
- }
-
- }
-
- /**
- * Handle the case where pools are added.
- *
- * @param newPools - New Pools list
- * @param oldPool - oldPool List.
- */
- private HashSet<String> computePoolDifference(HashSet<String> newPools,
- Set<String> oldPool) {
- Preconditions.checkNotNull(newPools);
- Preconditions.checkNotNull(oldPool);
- HashSet<String> newSet = new HashSet<>(newPools);
- newSet.removeAll(oldPool);
- return newSet;
- }
-
- private void initPoolProcessThread() {
-
- /*
- * Task that runs to check if we need to start a pool processing job.
- * if so we create a pool reconciliation job and find out of all the
- * expected containers are on the nodes.
- */
- Runnable processPools = () -> {
- while (runnable.get()) {
- // Make sure that we don't have any new pools.
- refreshPools();
- while (inProgressPoolList.size() < inProgressPoolMaxCount) {
- PeriodicPool pool = poolQueue.poll();
- if (pool != null) {
- if (pool.getLastProcessedTime() + this.containerProcessingLag >
- Time.monotonicNow()) {
- LOG.debug("Not within the time window for processing: {}",
- pool.getPoolName());
- // we might over sleep here, not a big deal.
- sleepUninterruptibly(this.containerProcessingLag,
- TimeUnit.MILLISECONDS);
- }
- LOG.debug("Adding pool {} to container processing queue",
- pool.getPoolName());
- InProgressPool inProgressPool = new InProgressPool(maxPoolWait,
- pool, this.nodeManager, this.poolManager, this.executorService);
- inProgressPool.startReconciliation();
- inProgressPoolListLock.writeLock().lock();
- try {
- inProgressPoolList.add(inProgressPool);
- } finally {
- inProgressPoolListLock.writeLock().unlock();
- }
- poolProcessCount++;
- } else {
- break;
- }
- }
- sleepUninterruptibly(this.maxPoolWait, TimeUnit.MILLISECONDS);
- inProgressPoolListLock.readLock().lock();
- try {
- for (InProgressPool inProgressPool : inProgressPoolList) {
- inProgressPool.finalizeReconciliation();
- poolQueue.add(inProgressPool.getPool());
- }
- } finally {
- inProgressPoolListLock.readLock().unlock();
- }
- inProgressPoolListLock.writeLock().lock();
- try {
- inProgressPoolList.clear();
- } finally {
- inProgressPoolListLock.writeLock().unlock();
- }
- }
- };
-
- // We will have only one thread for pool processing.
- Thread poolProcessThread = new Thread(processPools);
- poolProcessThread.setDaemon(true);
- poolProcessThread.setName("Pool replica thread");
- poolProcessThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> {
- // Let us just restart this thread after logging a critical error.
- // if this thread is not running we cannot handle commands from SCM.
- LOG.error("Critical Error : Pool replica thread encountered an " +
- "error. Thread: {} Error Count : {}", t.toString(), e,
- threadFaultCount.incrementAndGet());
- poolProcessThread.start();
- // TODO : Add a config to restrict how many times we will restart this
- // thread in a single session.
- });
- poolProcessThread.start();
- }
-
- /**
- * Adds a container report to appropriate inProgress Pool.
- * @param containerReport -- Container report for a specific container from
- * a datanode.
- */
- public void handleContainerReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto containerReport) {
- inProgressPoolListLock.readLock().lock();
- try {
- String poolName = poolManager.getNodePool(datanodeDetails);
- for (InProgressPool ppool : inProgressPoolList) {
- if (ppool.getPoolName().equalsIgnoreCase(poolName)) {
- ppool.handleContainerReport(datanodeDetails, containerReport);
- return;
- }
- }
- // TODO: Decide if we can do anything else with this report.
- LOG.debug("Discarding the container report for pool {}. " +
- "That pool is not currently in the pool reconciliation process." +
- " Container Name: {}", poolName, datanodeDetails);
- } catch (SCMException e) {
- LOG.warn("Skipping processing container report from datanode {}, "
- + "cause: failed to get the corresponding node pool",
- datanodeDetails.toString(), e);
- } finally {
- inProgressPoolListLock.readLock().unlock();
- }
- }
-
- /**
- * Get in process pool list, used for testing.
- * @return List of InProgressPool
- */
- @VisibleForTesting
- public List<InProgressPool> getInProcessPoolList() {
- return inProgressPoolList;
- }
-
- /**
- * Shutdown the Container Replication Manager.
- * @throws IOException if an I/O error occurs
- */
- @Override
- public void close() throws IOException {
- setExit();
- HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
deleted file mode 100644
index 4b54731..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.function.Predicate;
-import java.util.stream.Collectors;
-
-/**
- * These are pools that are actively checking for replication status of the
- * containers.
- */
-public final class InProgressPool {
- public static final Logger LOG =
- LoggerFactory.getLogger(InProgressPool.class);
-
- private final PeriodicPool pool;
- private final NodeManager nodeManager;
- private final NodePoolManager poolManager;
- private final ExecutorService executorService;
- private final Map<Long, Integer> containerCountMap;
- private final Map<UUID, Boolean> processedNodeSet;
- private final long startTime;
- private ProgressStatus status;
- private AtomicInteger nodeCount;
- private AtomicInteger nodeProcessed;
- private AtomicInteger containerProcessedCount;
- private long maxWaitTime;
- /**
- * Constructs an pool that is being processed.
- * @param maxWaitTime - Maximum wait time in milliseconds.
- * @param pool - Pool that we are working against
- * @param nodeManager - Nodemanager
- * @param poolManager - pool manager
- * @param executorService - Shared Executor service.
- */
- InProgressPool(long maxWaitTime, PeriodicPool pool,
- NodeManager nodeManager, NodePoolManager poolManager,
- ExecutorService executorService) {
- Preconditions.checkNotNull(pool);
- Preconditions.checkNotNull(nodeManager);
- Preconditions.checkNotNull(poolManager);
- Preconditions.checkNotNull(executorService);
- Preconditions.checkArgument(maxWaitTime > 0);
- this.pool = pool;
- this.nodeManager = nodeManager;
- this.poolManager = poolManager;
- this.executorService = executorService;
- this.containerCountMap = new ConcurrentHashMap<>();
- this.processedNodeSet = new ConcurrentHashMap<>();
- this.maxWaitTime = maxWaitTime;
- startTime = Time.monotonicNow();
- }
-
- /**
- * Returns periodic pool.
- *
- * @return PeriodicPool
- */
- public PeriodicPool getPool() {
- return pool;
- }
-
- /**
- * We are done if we have got reports from all nodes or we have
- * done waiting for the specified time.
- *
- * @return true if we are done, false otherwise.
- */
- public boolean isDone() {
- return (nodeCount.get() == nodeProcessed.get()) ||
- (this.startTime + this.maxWaitTime) > Time.monotonicNow();
- }
-
- /**
- * Gets the number of containers processed.
- *
- * @return int
- */
- public int getContainerProcessedCount() {
- return containerProcessedCount.get();
- }
-
- /**
- * Returns the start time in milliseconds.
- *
- * @return - Start Time.
- */
- public long getStartTime() {
- return startTime;
- }
-
- /**
- * Get the number of nodes in this pool.
- *
- * @return - node count
- */
- public int getNodeCount() {
- return nodeCount.get();
- }
-
- /**
- * Get the number of nodes that we have already processed container reports
- * from.
- *
- * @return - Processed count.
- */
- public int getNodeProcessed() {
- return nodeProcessed.get();
- }
-
- /**
- * Returns the current status.
- *
- * @return Status
- */
- public ProgressStatus getStatus() {
- return status;
- }
-
- /**
- * Starts the reconciliation process for all the nodes in the pool.
- */
- public void startReconciliation() {
- List<DatanodeDetails> datanodeDetailsList =
- this.poolManager.getNodes(pool.getPoolName());
- if (datanodeDetailsList.size() == 0) {
- LOG.error("Datanode list for {} is Empty. Pool with no nodes ? ",
- pool.getPoolName());
- this.status = ProgressStatus.Error;
- return;
- }
-
- nodeProcessed = new AtomicInteger(0);
- containerProcessedCount = new AtomicInteger(0);
- nodeCount = new AtomicInteger(0);
- this.status = ProgressStatus.InProgress;
- this.getPool().setLastProcessedTime(Time.monotonicNow());
- }
-
- /**
- * Queues a container Report for handling. This is done in a worker thread
- * since decoding a container report might be compute intensive . We don't
- * want to block since we have asked for bunch of container reports
- * from a set of datanodes.
- *
- * @param containerReport - ContainerReport
- */
- public void handleContainerReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto containerReport) {
- if (status == ProgressStatus.InProgress) {
- executorService.submit(processContainerReport(datanodeDetails,
- containerReport));
- } else {
- LOG.debug("Cannot handle container report when the pool is in {} status.",
- status);
- }
- }
-
- private Runnable processContainerReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto reports) {
- return () -> {
- if (processedNodeSet.computeIfAbsent(datanodeDetails.getUuid(),
- (k) -> true)) {
- nodeProcessed.incrementAndGet();
- LOG.debug("Total Nodes processed : {} Node Name: {} ", nodeProcessed,
- datanodeDetails.getUuid());
- for (ContainerInfo info : reports.getReportsList()) {
- containerProcessedCount.incrementAndGet();
- LOG.debug("Total Containers processed: {} Container Name: {}",
- containerProcessedCount.get(), info.getContainerID());
-
- // Update the container map with count + 1 if the key exists or
- // update the map with 1. Since this is a concurrentMap the
- // computation and update is atomic.
- containerCountMap.merge(info.getContainerID(), 1, Integer::sum);
- }
- }
- };
- }
-
- /**
- * Filter the containers based on specific rules.
- *
- * @param predicate -- Predicate to filter by
- * @return A list of map entries.
- */
- public List<Map.Entry<Long, Integer>> filterContainer(
- Predicate<Map.Entry<Long, Integer>> predicate) {
- return containerCountMap.entrySet().stream()
- .filter(predicate).collect(Collectors.toList());
- }
-
- /**
- * Used only for testing, calling this will abort container report
- * processing. This is very dangerous call and should not be made by any users
- */
- @VisibleForTesting
- public void setDoneProcessing() {
- nodeProcessed.set(nodeCount.get());
- }
-
- /**
- * Returns the pool name.
- *
- * @return Name of the pool.
- */
- String getPoolName() {
- return pool.getPoolName();
- }
-
- public void finalizeReconciliation() {
- status = ProgressStatus.Done;
- //TODO: Add finalizing logic. This is where actual reconciliation happens.
- }
-
- /**
- * Current status of the computing replication status.
- */
- public enum ProgressStatus {
- InProgress, Done, Error
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
deleted file mode 100644
index ef28aa7..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * Periodic pool is a pool with a time stamp, this allows us to process pools
- * based on a cyclic clock.
- */
-public class PeriodicPool implements Comparable<PeriodicPool> {
- private final String poolName;
- private long lastProcessedTime;
- private AtomicLong totalProcessedCount;
-
- /**
- * Constructs a periodic pool.
- *
- * @param poolName - Name of the pool
- */
- public PeriodicPool(String poolName) {
- this.poolName = poolName;
- lastProcessedTime = 0;
- totalProcessedCount = new AtomicLong(0);
- }
-
- /**
- * Get pool Name.
- * @return PoolName
- */
- public String getPoolName() {
- return poolName;
- }
-
- /**
- * Compares this object with the specified object for order. Returns a
- * negative integer, zero, or a positive integer as this object is less
- * than, equal to, or greater than the specified object.
- *
- * @param o the object to be compared.
- * @return a negative integer, zero, or a positive integer as this object is
- * less than, equal to, or greater than the specified object.
- * @throws NullPointerException if the specified object is null
- * @throws ClassCastException if the specified object's type prevents it
- * from being compared to this object.
- */
- @Override
- public int compareTo(PeriodicPool o) {
- return Long.compare(this.lastProcessedTime, o.lastProcessedTime);
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
-
- PeriodicPool that = (PeriodicPool) o;
-
- return poolName.equals(that.poolName);
- }
-
- @Override
- public int hashCode() {
- return poolName.hashCode();
- }
-
- /**
- * Returns the Total Times we have processed this pool.
- *
- * @return processed count.
- */
- public long getTotalProcessedCount() {
- return totalProcessedCount.get();
- }
-
- /**
- * Gets the last time we processed this pool.
- * @return time in milliseconds
- */
- public long getLastProcessedTime() {
- return this.lastProcessedTime;
- }
-
-
- /**
- * Sets the last processed time.
- *
- * @param lastProcessedTime - Long in milliseconds.
- */
-
- public void setLastProcessedTime(long lastProcessedTime) {
- this.lastProcessedTime = lastProcessedTime;
- }
-
- /*
- * Increments the total processed count.
- */
- public void incTotalProcessedCount() {
- this.totalProcessedCount.incrementAndGet();
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
deleted file mode 100644
index 7bbe2ef..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-/*
- This package contains routines that manage replication of a container. This
- relies on container reports to understand the replication level of a
- container - UnderReplicated, Replicated, OverReplicated -- and manages the
- replication level based on that.
- */
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 4392633..72d7e94 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -124,12 +124,6 @@ public interface NodeManager extends StorageContainerNodeProtocol,
SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails);
/**
- * Returns the NodePoolManager associated with the NodeManager.
- * @return NodePoolManager
- */
- NodePoolManager getNodePoolManager();
-
- /**
* Wait for the heartbeat is processed by NodeManager.
* @return true if heartbeat has been processed.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
deleted file mode 100644
index 46faf9ca..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Interface that defines SCM NodePoolManager.
- */
-public interface NodePoolManager extends Closeable {
-
- /**
- * Add a node to a node pool.
- * @param pool - name of the node pool.
- * @param node - data node.
- */
- void addNode(String pool, DatanodeDetails node) throws IOException;
-
- /**
- * Remove a node from a node pool.
- * @param pool - name of the node pool.
- * @param node - data node.
- * @throws SCMException
- */
- void removeNode(String pool, DatanodeDetails node)
- throws SCMException;
-
- /**
- * Get a list of known node pools.
- * @return a list of known node pool names or an empty list if not node pool
- * is defined.
- */
- List<String> getNodePools();
-
- /**
- * Get all nodes of a node pool given the name of the node pool.
- * @param pool - name of the node pool.
- * @return a list of datanode ids or an empty list if the node pool was not
- * found.
- */
- List<DatanodeDetails> getNodes(String pool);
-
- /**
- * Get the node pool name if the node has been added to a node pool.
- * @param datanodeDetails - datanode ID.
- * @return node pool name if it has been assigned.
- * null if the node has not been assigned to any node pool yet.
- */
- String getNodePool(DatanodeDetails datanodeDetails) throws SCMException;
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index fc8b013..adca8ea 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import com.google.protobuf.GeneratedMessage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -159,7 +158,6 @@ public class SCMNodeManager
private ObjectName nmInfoBean;
// Node pool manager.
- private final SCMNodePoolManager nodePoolManager;
private final StorageContainerManager scmManager;
public static final Event<CommandForDatanode> DATANODE_COMMAND =
@@ -210,7 +208,6 @@ public class SCMNodeManager
registerMXBean();
- this.nodePoolManager = new SCMNodePoolManager(conf);
this.scmManager = scmManager;
}
@@ -682,7 +679,6 @@ public class SCMNodeManager
@Override
public void close() throws IOException {
unregisterMXBean();
- nodePoolManager.close();
executorService.shutdown();
try {
if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
@@ -760,20 +756,6 @@ public class SCMNodeManager
LOG.info("Leaving startup chill mode.");
}
- // TODO: define node pool policy for non-default node pool.
- // For now, all nodes are added to the "DefaultNodePool" upon registration
- // if it has not been added to any node pool yet.
- try {
- if (nodePoolManager.getNodePool(datanodeDetails) == null) {
- nodePoolManager.addNode(SCMNodePoolManager.DEFAULT_NODEPOOL,
- datanodeDetails);
- }
- } catch (IOException e) {
- // TODO: make sure registration failure is handled correctly.
- return RegisteredCommand.newBuilder()
- .setErrorCode(ErrorCode.errorNodeNotPermitted)
- .build();
- }
// Updating Node Report, as registration is successful
updateNodeStat(datanodeDetails.getUuid(), nodeReport);
LOG.info("Data node with ID: {} Registered.",
@@ -860,11 +842,6 @@ public class SCMNodeManager
}
@Override
- public NodePoolManager getNodePoolManager() {
- return nodePoolManager;
- }
-
- @Override
public Map<String, Integer> getNodeCount() {
Map<String, Integer> nodeCountMap = new HashMap<String, Integer>();
for(NodeState state : NodeState.values()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
deleted file mode 100644
index faf330e..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
- .FAILED_TO_FIND_NODE_IN_POOL;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
- .FAILED_TO_LOAD_NODEPOOL;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
-import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
-
-/**
- * SCM node pool manager that manges node pools.
- */
-public final class SCMNodePoolManager implements NodePoolManager {
-
- private static final Logger LOG =
- LoggerFactory.getLogger(SCMNodePoolManager.class);
- private static final List<DatanodeDetails> EMPTY_NODE_LIST =
- new ArrayList<>();
- private static final List<String> EMPTY_NODEPOOL_LIST = new ArrayList<>();
- public static final String DEFAULT_NODEPOOL = "DefaultNodePool";
-
- // DB that saves the node to node pool mapping.
- private MetadataStore nodePoolStore;
-
- // In-memory node pool to nodes mapping
- private HashMap<String, Set<DatanodeDetails>> nodePools;
-
- // Read-write lock for nodepool operations
- private ReadWriteLock lock;
-
- /**
- * Construct SCMNodePoolManager class that manages node to node pool mapping.
- * @param conf - configuration.
- * @throws IOException
- */
- public SCMNodePoolManager(final OzoneConfiguration conf)
- throws IOException {
- final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
- OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
- File metaDir = getOzoneMetaDirPath(conf);
- String scmMetaDataDir = metaDir.getPath();
- File nodePoolDBPath = new File(scmMetaDataDir, NODEPOOL_DB);
- nodePoolStore = MetadataStoreBuilder.newBuilder()
- .setConf(conf)
- .setDbFile(nodePoolDBPath)
- .setCacheSize(cacheSize * OzoneConsts.MB)
- .build();
- nodePools = new HashMap<>();
- lock = new ReentrantReadWriteLock();
- init();
- }
-
- /**
- * Initialize the in-memory store based on persist store from level db.
- * No lock is needed as init() is only invoked by constructor.
- * @throws SCMException
- */
- private void init() throws SCMException {
- try {
- nodePoolStore.iterate(null, (key, value) -> {
- try {
- DatanodeDetails nodeId = DatanodeDetails.getFromProtoBuf(
- HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(key));
- String poolName = DFSUtil.bytes2String(value);
-
- Set<DatanodeDetails> nodePool = null;
- if (nodePools.containsKey(poolName)) {
- nodePool = nodePools.get(poolName);
- } else {
- nodePool = new HashSet<>();
- nodePools.put(poolName, nodePool);
- }
- nodePool.add(nodeId);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding node: {} to node pool: {}",
- nodeId, poolName);
- }
- } catch (IOException e) {
- LOG.warn("Can't add a datanode to node pool, continue next...");
- }
- return true;
- });
- } catch (IOException e) {
- LOG.error("Loading node pool error " + e);
- throw new SCMException("Failed to load node pool",
- FAILED_TO_LOAD_NODEPOOL);
- }
- }
-
- /**
- * Add a datanode to a node pool.
- * @param pool - name of the node pool.
- * @param node - name of the datanode.
- */
- @Override
- public void addNode(final String pool, final DatanodeDetails node)
- throws IOException {
- Preconditions.checkNotNull(pool, "pool name is null");
- Preconditions.checkNotNull(node, "node is null");
- lock.writeLock().lock();
- try {
- // add to the persistent store
- nodePoolStore.put(node.getProtoBufMessage().toByteArray(),
- DFSUtil.string2Bytes(pool));
-
- // add to the in-memory store
- Set<DatanodeDetails> nodePool = null;
- if (nodePools.containsKey(pool)) {
- nodePool = nodePools.get(pool);
- } else {
- nodePool = new HashSet<DatanodeDetails>();
- nodePools.put(pool, nodePool);
- }
- nodePool.add(node);
- } finally {
- lock.writeLock().unlock();
- }
- }
-
- /**
- * Remove a datanode from a node pool.
- * @param pool - name of the node pool.
- * @param node - datanode id.
- * @throws SCMException
- */
- @Override
- public void removeNode(final String pool, final DatanodeDetails node)
- throws SCMException {
- Preconditions.checkNotNull(pool, "pool name is null");
- Preconditions.checkNotNull(node, "node is null");
- lock.writeLock().lock();
- try {
- // Remove from the persistent store
- byte[] kName = node.getProtoBufMessage().toByteArray();
- byte[] kData = nodePoolStore.get(kName);
- if (kData == null) {
- throw new SCMException(String.format("Unable to find node %s from" +
- " pool %s in DB.", DFSUtil.bytes2String(kName), pool),
- FAILED_TO_FIND_NODE_IN_POOL);
- }
- nodePoolStore.delete(kName);
-
- // Remove from the in-memory store
- if (nodePools.containsKey(pool)) {
- Set<DatanodeDetails> nodePool = nodePools.get(pool);
- nodePool.remove(node);
- } else {
- throw new SCMException(String.format("Unable to find node %s from" +
- " pool %s in MAP.", DFSUtil.bytes2String(kName), pool),
- FAILED_TO_FIND_NODE_IN_POOL);
- }
- } catch (IOException e) {
- throw new SCMException("Failed to remove node " + node.toString()
- + " from node pool " + pool, e,
- SCMException.ResultCodes.IO_EXCEPTION);
- } finally {
- lock.writeLock().unlock();
- }
- }
-
- /**
- * Get all the node pools.
- * @return all the node pools.
- */
- @Override
- public List<String> getNodePools() {
- lock.readLock().lock();
- try {
- if (!nodePools.isEmpty()) {
- return nodePools.keySet().stream().collect(Collectors.toList());
- } else {
- return EMPTY_NODEPOOL_LIST;
- }
- } finally {
- lock.readLock().unlock();
- }
- }
-
- /**
- * Get all datanodes of a specific node pool.
- * @param pool - name of the node pool.
- * @return all datanodes of the specified node pool.
- */
- @Override
- public List<DatanodeDetails> getNodes(final String pool) {
- Preconditions.checkNotNull(pool, "pool name is null");
- if (nodePools.containsKey(pool)) {
- return nodePools.get(pool).stream().collect(Collectors.toList());
- } else {
- return EMPTY_NODE_LIST;
- }
- }
-
- /**
- * Get the node pool name if the node has been added to a node pool.
- * @param datanodeDetails - datanode ID.
- * @return node pool name if it has been assigned.
- * null if the node has not been assigned to any node pool yet.
- * TODO: Put this in a in-memory map if performance is an issue.
- */
- @Override
- public String getNodePool(final DatanodeDetails datanodeDetails)
- throws SCMException {
- Preconditions.checkNotNull(datanodeDetails, "node is null");
- try {
- byte[] result = nodePoolStore.get(
- datanodeDetails.getProtoBufMessage().toByteArray());
- return result == null ? null : DFSUtil.bytes2String(result);
- } catch (IOException e) {
- throw new SCMException("Failed to get node pool for node "
- + datanodeDetails.toString(), e,
- SCMException.ResultCodes.IO_EXCEPTION);
- }
- }
-
- /**
- * Close node pool level db store.
- * @throws IOException
- */
- @Override
- public void close() throws IOException {
- nodePoolStore.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 8c59462..80b5d6e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdds.scm.container;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -273,11 +272,6 @@ public class MockNodeManager implements NodeManager {
return new SCMNodeMetric(nodeMetricMap.get(datanodeDetails.getUuid()));
}
- @Override
- public NodePoolManager getNodePoolManager() {
- return Mockito.mock(NodePoolManager.class);
- }
-
/**
* Used for testing.
*
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java
deleted file mode 100644
index 8f412de..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.commons.collections.ListUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
- .ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
- .SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.test.PathUtils;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for SCM node pool manager.
- */
-public class TestSCMNodePoolManager {
- private static final Logger LOG =
- LoggerFactory.getLogger(TestSCMNodePoolManager.class);
-
- @Rule
- public ExpectedException thrown = ExpectedException.none();
-
- private final File testDir = PathUtils.getTestDir(
- TestSCMNodePoolManager.class);
-
- SCMNodePoolManager createNodePoolManager(OzoneConfiguration conf)
- throws IOException {
- conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
- testDir.getAbsolutePath());
- conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
- SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
- return new SCMNodePoolManager(conf);
- }
-
- /**
- * Test default node pool.
- *
- * @throws IOException
- */
- @Test
- public void testDefaultNodePool() throws IOException {
- OzoneConfiguration conf = new OzoneConfiguration();
- try {
- final String defaultPool = "DefaultPool";
- NodePoolManager npMgr = createNodePoolManager(conf);
-
- final int nodeCount = 4;
- final List<DatanodeDetails> nodes = TestUtils
- .getListOfDatanodeDetails(nodeCount);
- assertEquals(0, npMgr.getNodePools().size());
- for (DatanodeDetails node: nodes) {
- npMgr.addNode(defaultPool, node);
- }
- List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
- assertEquals(nodeCount, nodesRetrieved.size());
- assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
-
- DatanodeDetails nodeRemoved = nodes.remove(2);
- npMgr.removeNode(defaultPool, nodeRemoved);
- List<DatanodeDetails> nodesAfterRemove = npMgr.getNodes(defaultPool);
- assertTwoDatanodeListsEqual(nodes, nodesAfterRemove);
-
- List<DatanodeDetails> nonExistSet = npMgr.getNodes("NonExistSet");
- assertEquals(0, nonExistSet.size());
- } finally {
- FileUtil.fullyDelete(testDir);
- }
- }
-
-
- /**
- * Test default node pool reload.
- *
- * @throws IOException
- */
- @Test
- public void testDefaultNodePoolReload() throws IOException {
- OzoneConfiguration conf = new OzoneConfiguration();
- final String defaultPool = "DefaultPool";
- final int nodeCount = 4;
- final List<DatanodeDetails> nodes = TestUtils
- .getListOfDatanodeDetails(nodeCount);
-
- try {
- try {
- SCMNodePoolManager npMgr = createNodePoolManager(conf);
- assertEquals(0, npMgr.getNodePools().size());
- for (DatanodeDetails node : nodes) {
- npMgr.addNode(defaultPool, node);
- }
- List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
- assertEquals(nodeCount, nodesRetrieved.size());
- assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
- npMgr.close();
- } finally {
- LOG.info("testDefaultNodePoolReload: Finish adding nodes to pool" +
- " and close.");
- }
-
- // try reload with a new NodePoolManager instance
- try {
- SCMNodePoolManager npMgr = createNodePoolManager(conf);
- List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
- assertEquals(nodeCount, nodesRetrieved.size());
- assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
- } finally {
- LOG.info("testDefaultNodePoolReload: Finish reloading node pool.");
- }
- } finally {
- FileUtil.fullyDelete(testDir);
- }
- }
-
- /**
- * Compare and verify that two datanode lists are equal.
- * @param list1 - datanode list 1.
- * @param list2 - datanode list 2.
- */
- private void assertTwoDatanodeListsEqual(List<DatanodeDetails> list1,
- List<DatanodeDetails> list2) {
- assertEquals(list1.size(), list2.size());
- Collections.sort(list1);
- Collections.sort(list2);
- assertTrue(ListUtils.isEqualList(list1, list2));
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 072d821..1a4dcd7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.node.CommandQueue;
import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -201,10 +200,6 @@ public class ReplicationNodeManagerMock implements NodeManager {
return null;
}
- @Override
- public NodePoolManager getNodePoolManager() {
- return Mockito.mock(NodePoolManager.class);
- }
/**
* Wait for the heartbeat is processed by NodeManager.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
deleted file mode 100644
index ffcd752..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Pool Manager replication mock.
- */
-public class ReplicationNodePoolManagerMock implements NodePoolManager {
-
- private final Map<DatanodeDetails, String> nodeMemberShip;
-
- /**
- * A node pool manager for testing.
- */
- public ReplicationNodePoolManagerMock() {
- nodeMemberShip = new HashMap<>();
- }
-
- /**
- * Add a node to a node pool.
- *
- * @param pool - name of the node pool.
- * @param node - data node.
- */
- @Override
- public void addNode(String pool, DatanodeDetails node) {
- nodeMemberShip.put(node, pool);
- }
-
- /**
- * Remove a node from a node pool.
- *
- * @param pool - name of the node pool.
- * @param node - data node.
- * @throws SCMException
- */
- @Override
- public void removeNode(String pool, DatanodeDetails node)
- throws SCMException {
- nodeMemberShip.remove(node);
-
- }
-
- /**
- * Get a list of known node pools.
- *
- * @return a list of known node pool names or an empty list if not node pool
- * is defined.
- */
- @Override
- public List<String> getNodePools() {
- Set<String> poolSet = new HashSet<>();
- for (Map.Entry<DatanodeDetails, String> entry : nodeMemberShip.entrySet()) {
- poolSet.add(entry.getValue());
- }
- return new ArrayList<>(poolSet);
-
- }
-
- /**
- * Get all nodes of a node pool given the name of the node pool.
- *
- * @param pool - name of the node pool.
- * @return a list of datanode ids or an empty list if the node pool was not
- * found.
- */
- @Override
- public List<DatanodeDetails> getNodes(String pool) {
- Set<DatanodeDetails> datanodeSet = new HashSet<>();
- for (Map.Entry<DatanodeDetails, String> entry : nodeMemberShip.entrySet()) {
- if (entry.getValue().equals(pool)) {
- datanodeSet.add(entry.getKey());
- }
- }
- return new ArrayList<>(datanodeSet);
- }
-
- /**
- * Get the node pool name if the node has been added to a node pool.
- *
- * @param datanodeDetails DatanodeDetails.
- * @return node pool name if it has been assigned. null if the node has not
- * been assigned to any node pool yet.
- */
- @Override
- public String getNodePool(DatanodeDetails datanodeDetails) {
- return nodeMemberShip.get(datanodeDetails);
- }
-
- /**
- * Closes this stream and releases any system resources associated
- * with it. If the stream is already closed then invoking this
- * method has no effect.
- * <p>
- * <p> As noted in {@link AutoCloseable#close()}, cases where the
- * close may fail require careful attention. It is strongly advised
- * to relinquish the underlying resources and to internally
- * <em>mark</em> the {@code Closeable} as closed, prior to throwing
- * the {@code IOException}.
- *
- * @throws IOException if an I/O error occurs
- */
- @Override
- public void close() throws IOException {
-
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf03cc4/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index 4d70af8..b4ed2b1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -51,12 +51,9 @@ import java.util.Collection;
import java.util.HashMap;
import java.util.UUID;
-import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.KB;
-import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
/**
* This class tests the CLI that transforms container into SQLite DB files.
@@ -177,34 +174,6 @@ public class TestContainerSQLCli {
}
@Test
- public void testConvertNodepoolDB() throws Exception {
- String dbOutPath = GenericTestUtils.getTempPath(
- UUID.randomUUID() + "/out_sql.db");
- String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
- String dbPath = dbRootPath + "/" + NODEPOOL_DB;
- String[] args = {"-p", dbPath, "-o", dbOutPath};
-
- cli.run(args);
-
- // verify the sqlite db
- HashMap<String, String> expectedPool = new HashMap<>();
- for (DatanodeDetails dnid : nodeManager.getAllNodes()) {
- expectedPool.put(dnid.getUuidString(), "DefaultNodePool");
- }
- Connection conn = connectDB(dbOutPath);
- String sql = "SELECT * FROM nodePool";
- ResultSet rs = executeQuery(conn, sql);
- while(rs.next()) {
- String datanodeUUID = rs.getString("datanodeUUID");
- String poolName = rs.getString("poolName");
- assertTrue(expectedPool.remove(datanodeUUID).equals(poolName));
- }
- assertEquals(0, expectedPool.size());
-
- Files.delete(Paths.get(dbOutPath));
- }
-
- @Test
public void testConvertContainerDB() throws Exception {
String dbOutPath = GenericTestUtils.getTempPath(
UUID.randomUUID() + "/out_sql.db");
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[23/45] hadoop git commit: HDDS-185:
TestCloseContainerByPipeline#testCloseContainerViaRatis fail intermittently.
Contributed by Shashikant Banerjee.
Posted by xy...@apache.org.
HDDS-185: TestCloseContainerByPipeline#testCloseContainerViaRatis fail intermittently. Contributed by Shashikant Banerjee.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/852ca6b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/852ca6b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/852ca6b4
Branch: refs/heads/HDDS-4
Commit: 852ca6b432a801df51f957a9b169483008e3c744
Parents: 2911943
Author: Nanda kumar <na...@apache.org>
Authored: Fri Jun 29 04:11:39 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Fri Jun 29 04:11:39 2018 +0530
----------------------------------------------------------------------
.../TestCloseContainerByPipeline.java | 65 +++++++++++++++++---
1 file changed, 58 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/852ca6b4/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 9e8cb46..265c82b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -33,6 +35,7 @@ import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
+import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
@@ -45,6 +48,7 @@ import org.junit.Test;
import java.io.IOException;
import java.util.List;
+import java.util.Random;
import java.util.concurrent.TimeoutException;
public class TestCloseContainerByPipeline {
@@ -89,6 +93,59 @@ public class TestCloseContainerByPipeline {
}
@Test
+ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception {
+ OzoneOutputStream key = objectStore.getVolume("test").getBucket("test")
+ .createKey("testCloseContainer", 1024, ReplicationType.STAND_ALONE,
+ ReplicationFactor.ONE);
+ key.write("standalone".getBytes());
+ key.close();
+
+ //get the name of a valid container
+ KsmKeyArgs keyArgs =
+ new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
+ .setType(HddsProtos.ReplicationType.STAND_ALONE)
+ .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
+ .setKeyName("testCloseContainer").build();
+
+ KsmKeyLocationInfo ksmKeyLocationInfo =
+ cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
+ .get(0).getBlocksLatestVersionOnly().get(0);
+
+ long containerID = ksmKeyLocationInfo.getContainerID();
+ List<DatanodeDetails> datanodes =
+ cluster.getStorageContainerManager().getContainerInfo(containerID)
+ .getPipeline().getMachines();
+ Assert.assertTrue(datanodes.size() == 1);
+
+ DatanodeDetails datanodeDetails = datanodes.get(0);
+ HddsDatanodeService datanodeService = null;
+ Assert
+ .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails));
+ for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
+ if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
+ datanodeService = datanodeServiceItr;
+ break;
+ }
+ }
+ CommandHandler closeContainerHandler =
+ datanodeService.getDatanodeStateMachine().getCommandDispatcher()
+ .getCloseContainerHandler();
+ int lastInvocationCount = closeContainerHandler.getInvocationCount();
+ //send the order to close the container
+ cluster.getStorageContainerManager().getScmNodeManager()
+ .addDatanodeCommand(datanodeDetails.getUuid(),
+ new CloseContainerCommand(containerID,
+ HddsProtos.ReplicationType.STAND_ALONE));
+ GenericTestUtils
+ .waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails),
+ 500, 5 * 1000);
+ // Make sure the closeContainerCommandHandler is Invoked
+ Assert.assertTrue(
+ closeContainerHandler.getInvocationCount() > lastInvocationCount);
+
+ }
+
+ @Test
public void testCloseContainerViaStandaAlone()
throws IOException, TimeoutException, InterruptedException {
@@ -205,13 +262,7 @@ public class TestCloseContainerByPipeline {
containerData =
datanodeService.getDatanodeStateMachine().getContainer()
.getContainerManager().readContainer(containerID);
- if (!containerData.isOpen()) {
- // make sure the closeContainerHandler on the Datanode is invoked
- Assert.assertTrue(
- datanodeService.getDatanodeStateMachine().getCommandDispatcher()
- .getCloseContainerHandler().getInvocationCount() > 0);
- return true;
- }
+ return !containerData.isOpen();
}
} catch (StorageContainerException e) {
throw new AssertionError(e);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[44/45] hadoop git commit: HDDS-6. Enable SCM kerberos auth.
Contributed by Ajay Kumar.
Posted by xy...@apache.org.
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a4ec99d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a4ec99d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a4ec99d
Branch: refs/heads/HDDS-4
Commit: 0a4ec99d97d68a78fa6915ba5ae0939135322d8b
Parents: 729465e
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Jul 2 13:19:02 2018 -0700
----------------------------------------------------------------------
hadoop-hdds/common/src/main/resources/ozone-default.xml | 12 ------------
1 file changed, 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a4ec99d/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 4ada591..b469e93 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -129,18 +129,6 @@
</description>
</property>
<property>
- <name>dfs.ratis.client.request.timeout.duration</name>
- <value>3s</value>
- <tag>OZONE, RATIS, MANAGEMENT</tag>
- <description>The timeout duration for ratis client request.</description>
- </property>
- <property>
- <name>dfs.ratis.server.request.timeout.duration</name>
- <value>3s</value>
- <tag>OZONE, RATIS, MANAGEMENT</tag>
- <description>The timeout duration for ratis server request.</description>
- </property>
- <property>
<name>ozone.container.report.interval</name>
<value>60000ms</value>
<tag>OZONE, CONTAINER, MANAGEMENT</tag>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[30/45] hadoop git commit: YARN-8481. AMRMProxyPolicies should accept
heartbeat response from new/unknown subclusters. Contributed by Botong Huang.
Posted by xy...@apache.org.
YARN-8481. AMRMProxyPolicies should accept heartbeat response from new/unknown subclusters. Contributed by Botong Huang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdb08442
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdb08442
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdb08442
Branch: refs/heads/HDDS-4
Commit: cdb084426bc27a9f902da0b6927a3354a307dd82
Parents: d36f6b9
Author: Giovanni Matteo Fumarola <gi...@apache.com>
Authored: Fri Jun 29 11:47:30 2018 -0700
Committer: Giovanni Matteo Fumarola <gi...@apache.com>
Committed: Fri Jun 29 11:47:30 2018 -0700
----------------------------------------------------------------------
.../policies/amrmproxy/BroadcastAMRMProxyPolicy.java | 11 -----------
.../policies/amrmproxy/RejectAMRMProxyPolicy.java | 4 ----
.../TestBroadcastAMRMProxyFederationPolicy.java | 11 +++--------
3 files changed, 3 insertions(+), 23 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdb08442/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
index 679f4d5..7fddb8e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/BroadcastAMRMProxyPolicy.java
@@ -19,10 +19,8 @@
package org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -30,7 +28,6 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
import org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator;
import org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
-import org.apache.hadoop.yarn.server.federation.policies.exceptions.UnknownSubclusterException;
import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
@@ -40,8 +37,6 @@ import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
*/
public class BroadcastAMRMProxyPolicy extends AbstractAMRMProxyPolicy {
- private Set<SubClusterId> knownClusterIds = new HashSet<>();
-
@Override
public void reinitialize(
FederationPolicyInitializationContext policyContext)
@@ -65,7 +60,6 @@ public class BroadcastAMRMProxyPolicy extends AbstractAMRMProxyPolicy {
// simply broadcast the resource request to all sub-clusters
for (SubClusterId subClusterId : activeSubclusters.keySet()) {
answer.put(subClusterId, resourceRequests);
- knownClusterIds.add(subClusterId);
}
return answer;
@@ -74,11 +68,6 @@ public class BroadcastAMRMProxyPolicy extends AbstractAMRMProxyPolicy {
@Override
public void notifyOfResponse(SubClusterId subClusterId,
AllocateResponse response) throws YarnException {
- if (!knownClusterIds.contains(subClusterId)) {
- throw new UnknownSubclusterException(
- "The response is received from a subcluster that is unknown to this "
- + "policy.");
- }
// stateless policy does not care about responses
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdb08442/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
index 3783df6..4500606 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
@@ -18,10 +18,8 @@
package org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -38,8 +36,6 @@ import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
*/
public class RejectAMRMProxyPolicy extends AbstractAMRMProxyPolicy {
- private Set<SubClusterId> knownClusterIds = new HashSet<>();
-
@Override
public void reinitialize(FederationPolicyInitializationContext policyContext)
throws FederationPolicyInitializationException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdb08442/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestBroadcastAMRMProxyFederationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestBroadcastAMRMProxyFederationPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestBroadcastAMRMProxyFederationPolicy.java
index a21f53d..df5da85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestBroadcastAMRMProxyFederationPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestBroadcastAMRMProxyFederationPolicy.java
@@ -89,7 +89,7 @@ public class TestBroadcastAMRMProxyFederationPolicy
}
@Test
- public void testNotifyOfResponse() throws Exception {
+ public void testNotifyOfResponseFromUnknownSubCluster() throws Exception {
String[] hosts = new String[] {"host1", "host2" };
List<ResourceRequest> resourceRequests = FederationPoliciesTestUtil
.createResourceRequests(hosts, 2 * 1024, 2, 1, 3, null, false);
@@ -97,13 +97,8 @@ public class TestBroadcastAMRMProxyFederationPolicy
((FederationAMRMProxyPolicy) getPolicy())
.splitResourceRequests(resourceRequests);
- try {
- ((FederationAMRMProxyPolicy) getPolicy()).notifyOfResponse(
- SubClusterId.newInstance("sc3"), mock(AllocateResponse.class));
- Assert.fail();
- } catch (FederationPolicyException f) {
- System.out.println("Expected: " + f.getMessage());
- }
+ ((FederationAMRMProxyPolicy) getPolicy()).notifyOfResponse(
+ SubClusterId.newInstance("sc3"), mock(AllocateResponse.class));
((FederationAMRMProxyPolicy) getPolicy()).notifyOfResponse(
SubClusterId.newInstance("sc1"), mock(AllocateResponse.class));
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[33/45] hadoop git commit: HDFS-13702. Remove HTrace hooks from
DFSClient to reduce CPU usage. Contributed by Todd Lipcon.
Posted by xy...@apache.org.
HDFS-13702. Remove HTrace hooks from DFSClient to reduce CPU usage. Contributed by Todd Lipcon.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d748bd0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d748bd0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d748bd0
Branch: refs/heads/HDDS-4
Commit: 5d748bd056a32f2c6922514cd0c5b31d866a9919
Parents: 6ba9974
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jul 2 12:11:06 2018 +0200
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jul 2 12:11:06 2018 +0200
----------------------------------------------------------------------
.../java/org/apache/hadoop/hdfs/DFSClient.java | 19 ----
.../org/apache/hadoop/hdfs/DFSInputStream.java | 44 ++-------
.../hdfs/client/impl/BlockReaderFactory.java | 16 +---
.../hdfs/client/impl/BlockReaderLocal.java | 93 ++++++++------------
.../client/impl/BlockReaderLocalLegacy.java | 44 ++++-----
.../hdfs/client/impl/BlockReaderRemote.java | 19 +---
.../erasurecode/StripedBlockReader.java | 2 +-
.../hdfs/server/namenode/NamenodeFsck.java | 1 -
.../hdfs/client/impl/BlockReaderTestUtil.java | 2 -
.../hdfs/client/impl/TestBlockReaderLocal.java | 2 -
.../blockmanagement/TestBlockTokenWithDFS.java | 2 -
.../datanode/TestDataNodeVolumeFailure.java | 2 -
12 files changed, 68 insertions(+), 178 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 96c4505..85d6512 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3071,25 +3071,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
/**
- * Full detailed tracing for read requests: path, position in the file,
- * and length.
- *
- * @param reqLen requested length
- */
- TraceScope newReaderTraceScope(String description, String path, long pos,
- int reqLen) {
- TraceScope scope = newPathTraceScope(description, path);
- scope.addKVAnnotation("pos", Long.toString(pos));
- scope.addKVAnnotation("reqLen", Integer.toString(reqLen));
- return scope;
- }
-
- /** Add the returned length info to the scope. */
- void addRetLenToReaderScope(TraceScope scope, int retLen) {
- scope.addKVAnnotation("retLen", Integer.toString(retLen));
- }
-
- /**
* Get the erasure coding policy information for the specified path
*
* @param src path to get the information for
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 1bdc50a..e5640d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -85,8 +85,6 @@ import org.apache.hadoop.util.IdentityHashStore;
import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.util.StringUtils;
import org.apache.htrace.core.SpanId;
-import org.apache.htrace.core.TraceScope;
-import org.apache.htrace.core.Tracer;
import com.google.common.annotations.VisibleForTesting;
@@ -641,7 +639,6 @@ public class DFSInputStream extends FSInputStream
setClientCacheContext(dfsClient.getClientContext()).
setUserGroupInformation(dfsClient.ugi).
setConfiguration(dfsClient.getConfiguration()).
- setTracer(dfsClient.getTracer()).
build();
}
@@ -821,31 +818,14 @@ public class DFSInputStream extends FSInputStream
}
ReaderStrategy byteArrayReader =
new ByteArrayStrategy(buf, off, len, readStatistics, dfsClient);
- try (TraceScope scope =
- dfsClient.newReaderTraceScope("DFSInputStream#byteArrayRead",
- src, getPos(), len)) {
- int retLen = readWithStrategy(byteArrayReader);
- if (retLen < len) {
- dfsClient.addRetLenToReaderScope(scope, retLen);
- }
- return retLen;
- }
+ return readWithStrategy(byteArrayReader);
}
@Override
public synchronized int read(final ByteBuffer buf) throws IOException {
ReaderStrategy byteBufferReader =
new ByteBufferStrategy(buf, readStatistics, dfsClient);
- int reqLen = buf.remaining();
- try (TraceScope scope =
- dfsClient.newReaderTraceScope("DFSInputStream#byteBufferRead",
- src, getPos(), reqLen)){
- int retLen = readWithStrategy(byteBufferReader);
- if (retLen < reqLen) {
- dfsClient.addRetLenToReaderScope(scope, retLen);
- }
- return retLen;
- }
+ return readWithStrategy(byteBufferReader);
}
private DNAddrPair chooseDataNode(LocatedBlock block,
@@ -1026,16 +1006,12 @@ public class DFSInputStream extends FSInputStream
final ByteBuffer bb,
final CorruptedBlocks corruptedBlocks,
final int hedgedReadId) {
- final SpanId parentSpanId = Tracer.getCurrentSpanId();
return new Callable<ByteBuffer>() {
@Override
public ByteBuffer call() throws Exception {
DFSClientFaultInjector.get().sleepBeforeHedgedGet();
- try (TraceScope ignored = dfsClient.getTracer().
- newScope("hedgedRead" + hedgedReadId, parentSpanId)) {
- actualGetFromOneDataNode(datanode, start, end, bb, corruptedBlocks);
- return bb;
- }
+ actualGetFromOneDataNode(datanode, start, end, bb, corruptedBlocks);
+ return bb;
}
};
}
@@ -1336,16 +1312,8 @@ public class DFSInputStream extends FSInputStream
if (length == 0) {
return 0;
}
- try (TraceScope scope = dfsClient.
- newReaderTraceScope("DFSInputStream#byteArrayPread",
- src, position, length)) {
- ByteBuffer bb = ByteBuffer.wrap(buffer, offset, length);
- int retLen = pread(position, bb);
- if (retLen < length) {
- dfsClient.addRetLenToReaderScope(scope, retLen);
- }
- return retLen;
- }
+ ByteBuffer bb = ByteBuffer.wrap(buffer, offset, length);
+ return pread(position, bb);
}
private int pread(long position, ByteBuffer buffer)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index a8c73a4..1003b95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -75,7 +75,6 @@ import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -190,11 +189,6 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
private Configuration configuration;
/**
- * The HTrace tracer to use.
- */
- private Tracer tracer;
-
- /**
* Information about the domain socket path we should use to connect to the
* local peer-- or null if we haven't examined the local domain socket.
*/
@@ -298,11 +292,6 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
return this;
}
- public BlockReaderFactory setTracer(Tracer tracer) {
- this.tracer = tracer;
- return this;
- }
-
@VisibleForTesting
public static void setFailureInjectorForTesting(FailureInjector injector) {
failureInjector = injector;
@@ -451,7 +440,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
try {
return BlockReaderLocalLegacy.newBlockReader(conf,
userGroupInformation, configuration, fileName, block, token,
- datanode, startOffset, length, storageType, tracer);
+ datanode, startOffset, length, storageType);
} catch (RemoteException remoteException) {
ioe = remoteException.unwrapRemoteException(
InvalidToken.class, AccessControlException.class);
@@ -509,7 +498,6 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
setVerifyChecksum(verifyChecksum).
setCachingStrategy(cachingStrategy).
setStorageType(storageType).
- setTracer(tracer).
build();
}
@@ -860,7 +848,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
return BlockReaderRemote.newBlockReader(
fileName, block, token, startOffset, length,
verifyChecksum, clientName, peer, datanode,
- clientContext.getPeerCache(), cachingStrategy, tracer,
+ clientContext.getPeerCache(), cachingStrategy,
networkDistance);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java
index df0f65f..9c1ef46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java
@@ -35,8 +35,6 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.util.Timer;
-import org.apache.htrace.core.TraceScope;
-import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -83,7 +81,6 @@ class BlockReaderLocal implements BlockReader {
private long dataPos;
private ExtendedBlock block;
private StorageType storageType;
- private Tracer tracer;
private ShortCircuitConf shortCircuitConf;
public Builder(ShortCircuitConf conf) {
@@ -131,11 +128,6 @@ class BlockReaderLocal implements BlockReader {
return this;
}
- public Builder setTracer(Tracer tracer) {
- this.tracer = tracer;
- return this;
- }
-
public BlockReaderLocal build() {
Preconditions.checkNotNull(replica);
return new BlockReaderLocal(this);
@@ -244,11 +236,6 @@ class BlockReaderLocal implements BlockReader {
*/
private StorageType storageType;
- /**
- * The Tracer to use.
- */
- private final Tracer tracer;
-
private BlockReaderLocal(Builder builder) {
this.replica = builder.replica;
this.dataIn = replica.getDataStream().getChannel();
@@ -278,7 +265,6 @@ class BlockReaderLocal implements BlockReader {
}
this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
this.storageType = builder.storageType;
- this.tracer = builder.tracer;
if (builder.shortCircuitConf.isScrMetricsEnabled()) {
metricsInitializationLock.lock();
@@ -360,52 +346,49 @@ class BlockReaderLocal implements BlockReader {
*/
private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum)
throws IOException {
- try (TraceScope ignored = tracer.newScope(
- "BlockReaderLocal#fillBuffer(" + block.getBlockId() + ")")) {
- int total = 0;
- long startDataPos = dataPos;
- int startBufPos = buf.position();
- while (buf.hasRemaining()) {
- int nRead = blockReaderIoProvider.read(dataIn, buf, dataPos);
- if (nRead < 0) {
- break;
- }
- dataPos += nRead;
- total += nRead;
- }
- if (canSkipChecksum) {
- freeChecksumBufIfExists();
- return total;
+ int total = 0;
+ long startDataPos = dataPos;
+ int startBufPos = buf.position();
+ while (buf.hasRemaining()) {
+ int nRead = blockReaderIoProvider.read(dataIn, buf, dataPos);
+ if (nRead < 0) {
+ break;
}
- if (total > 0) {
- try {
- buf.limit(buf.position());
- buf.position(startBufPos);
- createChecksumBufIfNeeded();
- int checksumsNeeded = (total + bytesPerChecksum - 1) /
- bytesPerChecksum;
- checksumBuf.clear();
- checksumBuf.limit(checksumsNeeded * checksumSize);
- long checksumPos = BlockMetadataHeader.getHeaderSize()
- + ((startDataPos / bytesPerChecksum) * checksumSize);
- while (checksumBuf.hasRemaining()) {
- int nRead = checksumIn.read(checksumBuf, checksumPos);
- if (nRead < 0) {
- throw new IOException("Got unexpected checksum file EOF at " +
- checksumPos + ", block file position " + startDataPos +
- " for block " + block + " of file " + filename);
- }
- checksumPos += nRead;
+ dataPos += nRead;
+ total += nRead;
+ }
+ if (canSkipChecksum) {
+ freeChecksumBufIfExists();
+ return total;
+ }
+ if (total > 0) {
+ try {
+ buf.limit(buf.position());
+ buf.position(startBufPos);
+ createChecksumBufIfNeeded();
+ int checksumsNeeded = (total + bytesPerChecksum - 1) /
+ bytesPerChecksum;
+ checksumBuf.clear();
+ checksumBuf.limit(checksumsNeeded * checksumSize);
+ long checksumPos = BlockMetadataHeader.getHeaderSize()
+ + ((startDataPos / bytesPerChecksum) * checksumSize);
+ while (checksumBuf.hasRemaining()) {
+ int nRead = checksumIn.read(checksumBuf, checksumPos);
+ if (nRead < 0) {
+ throw new IOException("Got unexpected checksum file EOF at " +
+ checksumPos + ", block file position " + startDataPos +
+ " for block " + block + " of file " + filename);
}
- checksumBuf.flip();
-
- checksum.verifyChunkedSums(buf, checksumBuf, filename, startDataPos);
- } finally {
- buf.position(buf.limit());
+ checksumPos += nRead;
}
+ checksumBuf.flip();
+
+ checksum.verifyChunkedSums(buf, checksumBuf, filename, startDataPos);
+ } finally {
+ buf.position(buf.limit());
}
- return total;
}
+ return total;
}
private boolean createNoChecksumContext() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java
index 7d20a83..e1e38c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocalLegacy.java
@@ -51,8 +51,6 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DirectBufferPool;
-import org.apache.htrace.core.TraceScope;
-import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -184,7 +182,6 @@ class BlockReaderLocalLegacy implements BlockReader {
private long startOffset;
private final String filename;
private long blockId;
- private final Tracer tracer;
/**
* The only way this object can be instantiated.
@@ -193,8 +190,8 @@ class BlockReaderLocalLegacy implements BlockReader {
UserGroupInformation userGroupInformation,
Configuration configuration, String file, ExtendedBlock blk,
Token<BlockTokenIdentifier> token, DatanodeInfo node,
- long startOffset, long length, StorageType storageType,
- Tracer tracer) throws IOException {
+ long startOffset, long length, StorageType storageType)
+ throws IOException {
final ShortCircuitConf scConf = conf.getShortCircuitConf();
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
.getIpcPort());
@@ -239,11 +236,10 @@ class BlockReaderLocalLegacy implements BlockReader {
long firstChunkOffset = startOffset
- (startOffset % checksum.getBytesPerChecksum());
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk,
- startOffset, checksum, true, dataIn, firstChunkOffset, checksumIn,
- tracer);
+ startOffset, checksum, true, dataIn, firstChunkOffset, checksumIn);
} else {
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk,
- startOffset, dataIn, tracer);
+ startOffset, dataIn);
}
} catch (IOException e) {
// remove from cache
@@ -320,17 +316,17 @@ class BlockReaderLocalLegacy implements BlockReader {
}
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
- ExtendedBlock block, long startOffset, FileInputStream dataIn,
- Tracer tracer) throws IOException {
+ ExtendedBlock block, long startOffset, FileInputStream dataIn)
+ throws IOException {
this(conf, hdfsfile, block, startOffset,
DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
- dataIn, startOffset, null, tracer);
+ dataIn, startOffset, null);
}
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
ExtendedBlock block, long startOffset, DataChecksum checksum,
boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
- FileInputStream checksumIn, Tracer tracer) throws IOException {
+ FileInputStream checksumIn) throws IOException {
this.filename = hdfsfile;
this.checksum = checksum;
this.verifyChecksum = verifyChecksum;
@@ -369,7 +365,6 @@ class BlockReaderLocalLegacy implements BlockReader {
bufferPool.returnBuffer(checksumBuff);
}
}
- this.tracer = tracer;
}
/**
@@ -377,23 +372,20 @@ class BlockReaderLocalLegacy implements BlockReader {
*/
private int fillBuffer(FileInputStream stream, ByteBuffer buf)
throws IOException {
- try (TraceScope ignored = tracer.
- newScope("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")")) {
- int bytesRead = stream.getChannel().read(buf);
- if (bytesRead < 0) {
+ int bytesRead = stream.getChannel().read(buf);
+ if (bytesRead < 0) {
+ //EOF
+ return bytesRead;
+ }
+ while (buf.remaining() > 0) {
+ int n = stream.getChannel().read(buf);
+ if (n < 0) {
//EOF
return bytesRead;
}
- while (buf.remaining() > 0) {
- int n = stream.getChannel().read(buf);
- if (n < 0) {
- //EOF
- return bytesRead;
- }
- bytesRead += n;
- }
- return bytesRead;
+ bytesRead += n;
}
+ return bytesRead;
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
index 5a2ce40..caf15e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
@@ -49,11 +49,9 @@ import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
-import org.apache.htrace.core.TraceScope;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -121,8 +119,6 @@ public class BlockReaderRemote implements BlockReader {
private boolean sentStatusCode = false;
- private final Tracer tracer;
-
private final int networkDistance;
@VisibleForTesting
@@ -139,10 +135,7 @@ public class BlockReaderRemote implements BlockReader {
if (curDataSlice == null ||
curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
- try (TraceScope ignored = tracer.newScope(
- "BlockReaderRemote2#readNextPacket(" + blockId + ")")) {
- readNextPacket();
- }
+ readNextPacket();
}
LOG.trace("Finishing read #{}", randomId);
@@ -163,10 +156,7 @@ public class BlockReaderRemote implements BlockReader {
public synchronized int read(ByteBuffer buf) throws IOException {
if (curDataSlice == null ||
(curDataSlice.remaining() == 0 && bytesNeededToFinish > 0)) {
- try (TraceScope ignored = tracer.newScope(
- "BlockReaderRemote2#readNextPacket(" + blockId + ")")) {
- readNextPacket();
- }
+ readNextPacket();
}
if (curDataSlice.remaining() == 0) {
// we're at EOF now
@@ -280,7 +270,6 @@ public class BlockReaderRemote implements BlockReader {
long startOffset, long firstChunkOffset,
long bytesToRead, Peer peer,
DatanodeID datanodeID, PeerCache peerCache,
- Tracer tracer,
int networkDistance) {
// Path is used only for printing block and file information in debug
this.peer = peer;
@@ -300,7 +289,6 @@ public class BlockReaderRemote implements BlockReader {
this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
bytesPerChecksum = this.checksum.getBytesPerChecksum();
checksumSize = this.checksum.getChecksumSize();
- this.tracer = tracer;
this.networkDistance = networkDistance;
}
@@ -397,7 +385,6 @@ public class BlockReaderRemote implements BlockReader {
Peer peer, DatanodeID datanodeID,
PeerCache peerCache,
CachingStrategy cachingStrategy,
- Tracer tracer,
int networkDistance) throws IOException {
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
@@ -431,7 +418,7 @@ public class BlockReaderRemote implements BlockReader {
return new BlockReaderRemote(file, block.getBlockId(), checksum,
verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID,
- peerCache, tracer, networkDistance);
+ peerCache, networkDistance);
}
static void checkSuccess(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
index 5e77de5..cbef318 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
@@ -128,7 +128,7 @@ class StripedBlockReader {
return BlockReaderRemote.newBlockReader(
"dummy", block, blockToken, offsetInBlock,
block.getNumBytes() - offsetInBlock, true, "", peer, source,
- null, stripedReader.getCachingStrategy(), datanode.getTracer(), -1);
+ null, stripedReader.getCachingStrategy(), -1);
} catch (IOException e) {
LOG.info("Exception while creating remote block reader, datanode {}",
source, e);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index a82c9fe..5e7bab5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -1032,7 +1032,6 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
setCachingStrategy(CachingStrategy.newDropBehind()).
setClientCacheContext(dfs.getClientContext()).
setConfiguration(namenode.getConf()).
- setTracer(tracer).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
index 57f5cf8..e2f55e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
@@ -30,7 +30,6 @@ import java.util.Random;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.ClientContext;
@@ -206,7 +205,6 @@ public class BlockReaderTestUtil {
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setConfiguration(fs.getConf()).
setAllowShortCircuitLocalReads(true).
- setTracer(FsTracer.get(fs.getConf())).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
index b9bb495..ace21c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
@@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
-import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.unix.DomainSocket;
@@ -208,7 +207,6 @@ public class TestBlockReaderLocal {
setShortCircuitReplica(replica).
setCachingStrategy(new CachingStrategy(false, readahead)).
setVerifyChecksum(checksum).
- setTracer(FsTracer.get(conf)).
build();
dataIn = null;
metaIn = null;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 3fbcd26..b57c4f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory;
@@ -167,7 +166,6 @@ public class TestBlockTokenWithDFS {
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
- setTracer(FsTracer.get(conf)).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d748bd0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index c116ce0..16c0cfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -40,7 +40,6 @@ import org.apache.commons.io.filefilter.TrueFileFilter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.ClientContext;
@@ -655,7 +654,6 @@ public class TestDataNodeVolumeFailure {
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
- setTracer(FsTracer.get(conf)).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[11/45] hadoop git commit: HDDS-194. Remove NodePoolManager and node
pool handling from SCM. Contributed by Elek Marton.
Posted by xy...@apache.org.
HDDS-194. Remove NodePoolManager and node pool handling from SCM. Contributed by Elek Marton.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56a4cdb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56a4cdb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56a4cdb9
Branch: refs/heads/HDDS-4
Commit: 56a4cdb9804daea7164155a5b1b4eba44a11b705
Parents: 0d6fe5f
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jun 27 13:28:00 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jun 27 13:28:00 2018 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdds/scm/ScmConfigKeys.java | 26 --
.../org/apache/hadoop/ozone/OzoneConsts.java | 1 -
.../common/src/main/resources/ozone-default.xml | 47 ---
.../hdds/scm/container/ContainerMapping.java | 10 +-
.../replication/ContainerSupervisor.java | 340 -------------------
.../container/replication/InProgressPool.java | 255 --------------
.../scm/container/replication/PeriodicPool.java | 119 -------
.../scm/container/replication/package-info.java | 23 --
.../hadoop/hdds/scm/node/NodeManager.java | 6 -
.../hadoop/hdds/scm/node/NodePoolManager.java | 71 ----
.../hadoop/hdds/scm/node/SCMNodeManager.java | 23 --
.../hdds/scm/node/SCMNodePoolManager.java | 269 ---------------
.../hdds/scm/container/MockNodeManager.java | 6 -
.../hdds/scm/node/TestSCMNodePoolManager.java | 160 ---------
.../testutils/ReplicationNodeManagerMock.java | 5 -
.../ReplicationNodePoolManagerMock.java | 133 --------
.../hadoop/ozone/scm/TestContainerSQLCli.java | 31 --
.../org/apache/hadoop/ozone/scm/cli/SQLCLI.java | 74 ----
18 files changed, 3 insertions(+), 1596 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 85407e6..df6fbf0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -243,32 +243,6 @@ public final class ScmConfigKeys {
public static final String
OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
- /**
- * Don't start processing a pool if we have not had a minimum number of
- * seconds from the last processing.
- */
- public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL =
- "ozone.scm.container.report.processing.interval";
- public static final String
- OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s";
-
- /**
- * This determines the total number of pools to be processed in parallel.
- */
- public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS =
- "ozone.scm.max.nodepool.processing.threads";
- public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1;
- /**
- * These 2 settings control the number of threads in executor pool and time
- * outs for thw container reports from all nodes.
- */
- public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS =
- "ozone.scm.max.container.report.threads";
- public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100;
- public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT =
- "ozone.scm.container.reports.wait.timeout";
- public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
- "5m";
public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
"ozone.scm.block.deletion.max.retry";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index c40dc8e..08a5ffd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -91,7 +91,6 @@ public final class OzoneConsts {
public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
public static final String BLOCK_DB = "block.db";
- public static final String NODEPOOL_DB = "nodepool.db";
public static final String OPEN_CONTAINERS_DB = "openContainers.db";
public static final String DELETED_BLOCK_DB = "deletedBlock.db";
public static final String KSM_DB_NAME = "ksm.db";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 7a91610..25365c8 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -572,25 +572,6 @@
</description>
</property>
<property>
- <name>ozone.scm.container.report.processing.interval</name>
- <value>60s</value>
- <tag>OZONE, PERFORMANCE</tag>
- <description>Time interval for scm to process container reports
- for a node pool. Scm handles node pool reports in a cyclic clock
- manner, it fetches pools periodically with this time interval.
- </description>
- </property>
- <property>
- <name>ozone.scm.container.reports.wait.timeout</name>
- <value>300s</value>
- <tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
- <description>Maximum time to wait in seconds for processing all container
- reports from
- a node pool. It determines the timeout for a
- node pool report.
- </description>
- </property>
- <property>
<name>ozone.scm.container.size.gb</name>
<value>5</value>
<tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
@@ -793,17 +774,6 @@
</description>
</property>
<property>
- <name>ozone.scm.max.container.report.threads</name>
- <value>100</value>
- <tag>OZONE, PERFORMANCE</tag>
- <description>
- Maximum number of threads to process container reports in scm.
- Each container report from a data node is processed by scm in a worker
- thread, fetched from a thread pool. This property is used to control the
- maximum size of the thread pool.
- </description>
- </property>
- <property>
<name>ozone.scm.max.hb.count.to.process</name>
<value>5000</value>
<tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
@@ -815,14 +785,6 @@
</description>
</property>
<property>
- <name>ozone.scm.max.nodepool.processing.threads</name>
- <value>1</value>
- <tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
- <description>
- Number of node pools to process in parallel.
- </description>
- </property>
- <property>
<name>ozone.scm.names</name>
<value/>
<tag>OZONE</tag>
@@ -844,15 +806,6 @@
</description>
</property>
<property>
- <name>ozone.scm.max.nodepool.processing.threads</name>
- <value>1</value>
- <tag>OZONE, SCM</tag>
- <description>
- Controls the number of node pools that can be processed in parallel by
- Container Supervisor.
- </description>
- </property>
- <property>
<name>ozone.trace.enabled</name>
<value>false</value>
<tag>OZONE, DEBUG</tag>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index b563e90..9fd30f2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
@@ -80,7 +79,6 @@ public class ContainerMapping implements Mapping {
private final PipelineSelector pipelineSelector;
private final ContainerStateManager containerStateManager;
private final LeaseManager<ContainerInfo> containerLeaseManager;
- private final ContainerSupervisor containerSupervisor;
private final float containerCloseThreshold;
private final ContainerCloser closer;
private final long size;
@@ -127,9 +125,7 @@ public class ContainerMapping implements Mapping {
OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024;
this.containerStateManager =
new ContainerStateManager(conf, this);
- this.containerSupervisor =
- new ContainerSupervisor(conf, nodeManager,
- nodeManager.getNodePoolManager());
+
this.containerCloseThreshold = conf.getFloat(
ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD,
ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT);
@@ -407,8 +403,8 @@ public class ContainerMapping implements Mapping {
throws IOException {
List<StorageContainerDatanodeProtocolProtos.ContainerInfo>
containerInfos = reports.getReportsList();
- containerSupervisor.handleContainerReport(datanodeDetails, reports);
- for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
+
+ for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
containerInfos) {
byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID());
lock.lock();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
deleted file mode 100644
index 5bd0574..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.PriorityQueue;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import static com.google.common.util.concurrent.Uninterruptibles
- .sleepUninterruptibly;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT;
-
-/**
- * This class takes a set of container reports that belong to a pool and then
- * computes the replication levels for each container.
- */
-public class ContainerSupervisor implements Closeable {
- public static final Logger LOG =
- LoggerFactory.getLogger(ContainerSupervisor.class);
-
- private final NodePoolManager poolManager;
- private final HashSet<String> poolNames;
- private final PriorityQueue<PeriodicPool> poolQueue;
- private final NodeManager nodeManager;
- private final long containerProcessingLag;
- private final AtomicBoolean runnable;
- private final ExecutorService executorService;
- private final long maxPoolWait;
- private long poolProcessCount;
- private final List<InProgressPool> inProgressPoolList;
- private final AtomicInteger threadFaultCount;
- private final int inProgressPoolMaxCount;
-
- private final ReadWriteLock inProgressPoolListLock;
-
- /**
- * Returns the number of times we have processed pools.
- * @return long
- */
- public long getPoolProcessCount() {
- return poolProcessCount;
- }
-
-
- /**
- * Constructs a class that computes Replication Levels.
- *
- * @param conf - OzoneConfiguration
- * @param nodeManager - Node Manager
- * @param poolManager - Pool Manager
- */
- public ContainerSupervisor(Configuration conf, NodeManager nodeManager,
- NodePoolManager poolManager) {
- Preconditions.checkNotNull(poolManager);
- Preconditions.checkNotNull(nodeManager);
- this.containerProcessingLag =
- conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL,
- OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT,
- TimeUnit.SECONDS
- ) * 1000;
- int maxContainerReportThreads =
- conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS,
- OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT
- );
- this.maxPoolWait =
- conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT,
- OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT,
- TimeUnit.MILLISECONDS);
- this.inProgressPoolMaxCount = conf.getInt(
- OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS,
- OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT);
- this.poolManager = poolManager;
- this.nodeManager = nodeManager;
- this.poolNames = new HashSet<>();
- this.poolQueue = new PriorityQueue<>();
- this.runnable = new AtomicBoolean(true);
- this.threadFaultCount = new AtomicInteger(0);
- this.executorService = newCachedThreadPool(
- new ThreadFactoryBuilder().setDaemon(true)
- .setNameFormat("Container Reports Processing Thread - %d")
- .build(), maxContainerReportThreads);
- this.inProgressPoolList = new LinkedList<>();
- this.inProgressPoolListLock = new ReentrantReadWriteLock();
-
- initPoolProcessThread();
- }
-
- private ExecutorService newCachedThreadPool(ThreadFactory threadFactory,
- int maxThreads) {
- return new HadoopThreadPoolExecutor(0, maxThreads, 60L, TimeUnit.SECONDS,
- new LinkedBlockingQueue<>(), threadFactory);
- }
-
- /**
- * Returns the number of pools that are under process right now.
- * @return int - Number of pools that are in process.
- */
- public int getInProgressPoolCount() {
- return inProgressPoolList.size();
- }
-
- /**
- * Exits the background thread.
- */
- public void setExit() {
- this.runnable.set(false);
- }
-
- /**
- * Adds or removes pools from names that we need to process.
- *
- * There are two different cases that we need to process.
- * The case where some pools are being added and some times we have to
- * handle cases where pools are removed.
- */
- private void refreshPools() {
- List<String> pools = this.poolManager.getNodePools();
- if (pools != null) {
-
- HashSet<String> removedPools =
- computePoolDifference(this.poolNames, new HashSet<>(pools));
-
- HashSet<String> addedPools =
- computePoolDifference(new HashSet<>(pools), this.poolNames);
- // TODO: Support remove pool API in pool manager so that this code
- // path can be tested. This never happens in the current code base.
- for (String poolName : removedPools) {
- for (PeriodicPool periodicPool : poolQueue) {
- if (periodicPool.getPoolName().compareTo(poolName) == 0) {
- poolQueue.remove(periodicPool);
- }
- }
- }
- // Remove the pool names that we have in the list.
- this.poolNames.removeAll(removedPools);
-
- for (String poolName : addedPools) {
- poolQueue.add(new PeriodicPool(poolName));
- }
-
- // Add to the pool names we are tracking.
- poolNames.addAll(addedPools);
- }
-
- }
-
- /**
- * Handle the case where pools are added.
- *
- * @param newPools - New Pools list
- * @param oldPool - oldPool List.
- */
- private HashSet<String> computePoolDifference(HashSet<String> newPools,
- Set<String> oldPool) {
- Preconditions.checkNotNull(newPools);
- Preconditions.checkNotNull(oldPool);
- HashSet<String> newSet = new HashSet<>(newPools);
- newSet.removeAll(oldPool);
- return newSet;
- }
-
- private void initPoolProcessThread() {
-
- /*
- * Task that runs to check if we need to start a pool processing job.
- * if so we create a pool reconciliation job and find out of all the
- * expected containers are on the nodes.
- */
- Runnable processPools = () -> {
- while (runnable.get()) {
- // Make sure that we don't have any new pools.
- refreshPools();
- while (inProgressPoolList.size() < inProgressPoolMaxCount) {
- PeriodicPool pool = poolQueue.poll();
- if (pool != null) {
- if (pool.getLastProcessedTime() + this.containerProcessingLag >
- Time.monotonicNow()) {
- LOG.debug("Not within the time window for processing: {}",
- pool.getPoolName());
- // we might over sleep here, not a big deal.
- sleepUninterruptibly(this.containerProcessingLag,
- TimeUnit.MILLISECONDS);
- }
- LOG.debug("Adding pool {} to container processing queue",
- pool.getPoolName());
- InProgressPool inProgressPool = new InProgressPool(maxPoolWait,
- pool, this.nodeManager, this.poolManager, this.executorService);
- inProgressPool.startReconciliation();
- inProgressPoolListLock.writeLock().lock();
- try {
- inProgressPoolList.add(inProgressPool);
- } finally {
- inProgressPoolListLock.writeLock().unlock();
- }
- poolProcessCount++;
- } else {
- break;
- }
- }
- sleepUninterruptibly(this.maxPoolWait, TimeUnit.MILLISECONDS);
- inProgressPoolListLock.readLock().lock();
- try {
- for (InProgressPool inProgressPool : inProgressPoolList) {
- inProgressPool.finalizeReconciliation();
- poolQueue.add(inProgressPool.getPool());
- }
- } finally {
- inProgressPoolListLock.readLock().unlock();
- }
- inProgressPoolListLock.writeLock().lock();
- try {
- inProgressPoolList.clear();
- } finally {
- inProgressPoolListLock.writeLock().unlock();
- }
- }
- };
-
- // We will have only one thread for pool processing.
- Thread poolProcessThread = new Thread(processPools);
- poolProcessThread.setDaemon(true);
- poolProcessThread.setName("Pool replica thread");
- poolProcessThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> {
- // Let us just restart this thread after logging a critical error.
- // if this thread is not running we cannot handle commands from SCM.
- LOG.error("Critical Error : Pool replica thread encountered an " +
- "error. Thread: {} Error Count : {}", t.toString(), e,
- threadFaultCount.incrementAndGet());
- poolProcessThread.start();
- // TODO : Add a config to restrict how many times we will restart this
- // thread in a single session.
- });
- poolProcessThread.start();
- }
-
- /**
- * Adds a container report to appropriate inProgress Pool.
- * @param containerReport -- Container report for a specific container from
- * a datanode.
- */
- public void handleContainerReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto containerReport) {
- inProgressPoolListLock.readLock().lock();
- try {
- String poolName = poolManager.getNodePool(datanodeDetails);
- for (InProgressPool ppool : inProgressPoolList) {
- if (ppool.getPoolName().equalsIgnoreCase(poolName)) {
- ppool.handleContainerReport(datanodeDetails, containerReport);
- return;
- }
- }
- // TODO: Decide if we can do anything else with this report.
- LOG.debug("Discarding the container report for pool {}. " +
- "That pool is not currently in the pool reconciliation process." +
- " Container Name: {}", poolName, datanodeDetails);
- } catch (SCMException e) {
- LOG.warn("Skipping processing container report from datanode {}, "
- + "cause: failed to get the corresponding node pool",
- datanodeDetails.toString(), e);
- } finally {
- inProgressPoolListLock.readLock().unlock();
- }
- }
-
- /**
- * Get in process pool list, used for testing.
- * @return List of InProgressPool
- */
- @VisibleForTesting
- public List<InProgressPool> getInProcessPoolList() {
- return inProgressPoolList;
- }
-
- /**
- * Shutdown the Container Replication Manager.
- * @throws IOException if an I/O error occurs
- */
- @Override
- public void close() throws IOException {
- setExit();
- HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
deleted file mode 100644
index 4b54731..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.function.Predicate;
-import java.util.stream.Collectors;
-
-/**
- * These are pools that are actively checking for replication status of the
- * containers.
- */
-public final class InProgressPool {
- public static final Logger LOG =
- LoggerFactory.getLogger(InProgressPool.class);
-
- private final PeriodicPool pool;
- private final NodeManager nodeManager;
- private final NodePoolManager poolManager;
- private final ExecutorService executorService;
- private final Map<Long, Integer> containerCountMap;
- private final Map<UUID, Boolean> processedNodeSet;
- private final long startTime;
- private ProgressStatus status;
- private AtomicInteger nodeCount;
- private AtomicInteger nodeProcessed;
- private AtomicInteger containerProcessedCount;
- private long maxWaitTime;
- /**
- * Constructs an pool that is being processed.
- * @param maxWaitTime - Maximum wait time in milliseconds.
- * @param pool - Pool that we are working against
- * @param nodeManager - Nodemanager
- * @param poolManager - pool manager
- * @param executorService - Shared Executor service.
- */
- InProgressPool(long maxWaitTime, PeriodicPool pool,
- NodeManager nodeManager, NodePoolManager poolManager,
- ExecutorService executorService) {
- Preconditions.checkNotNull(pool);
- Preconditions.checkNotNull(nodeManager);
- Preconditions.checkNotNull(poolManager);
- Preconditions.checkNotNull(executorService);
- Preconditions.checkArgument(maxWaitTime > 0);
- this.pool = pool;
- this.nodeManager = nodeManager;
- this.poolManager = poolManager;
- this.executorService = executorService;
- this.containerCountMap = new ConcurrentHashMap<>();
- this.processedNodeSet = new ConcurrentHashMap<>();
- this.maxWaitTime = maxWaitTime;
- startTime = Time.monotonicNow();
- }
-
- /**
- * Returns periodic pool.
- *
- * @return PeriodicPool
- */
- public PeriodicPool getPool() {
- return pool;
- }
-
- /**
- * We are done if we have got reports from all nodes or we have
- * done waiting for the specified time.
- *
- * @return true if we are done, false otherwise.
- */
- public boolean isDone() {
- return (nodeCount.get() == nodeProcessed.get()) ||
- (this.startTime + this.maxWaitTime) > Time.monotonicNow();
- }
-
- /**
- * Gets the number of containers processed.
- *
- * @return int
- */
- public int getContainerProcessedCount() {
- return containerProcessedCount.get();
- }
-
- /**
- * Returns the start time in milliseconds.
- *
- * @return - Start Time.
- */
- public long getStartTime() {
- return startTime;
- }
-
- /**
- * Get the number of nodes in this pool.
- *
- * @return - node count
- */
- public int getNodeCount() {
- return nodeCount.get();
- }
-
- /**
- * Get the number of nodes that we have already processed container reports
- * from.
- *
- * @return - Processed count.
- */
- public int getNodeProcessed() {
- return nodeProcessed.get();
- }
-
- /**
- * Returns the current status.
- *
- * @return Status
- */
- public ProgressStatus getStatus() {
- return status;
- }
-
- /**
- * Starts the reconciliation process for all the nodes in the pool.
- */
- public void startReconciliation() {
- List<DatanodeDetails> datanodeDetailsList =
- this.poolManager.getNodes(pool.getPoolName());
- if (datanodeDetailsList.size() == 0) {
- LOG.error("Datanode list for {} is Empty. Pool with no nodes ? ",
- pool.getPoolName());
- this.status = ProgressStatus.Error;
- return;
- }
-
- nodeProcessed = new AtomicInteger(0);
- containerProcessedCount = new AtomicInteger(0);
- nodeCount = new AtomicInteger(0);
- this.status = ProgressStatus.InProgress;
- this.getPool().setLastProcessedTime(Time.monotonicNow());
- }
-
- /**
- * Queues a container Report for handling. This is done in a worker thread
- * since decoding a container report might be compute intensive . We don't
- * want to block since we have asked for bunch of container reports
- * from a set of datanodes.
- *
- * @param containerReport - ContainerReport
- */
- public void handleContainerReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto containerReport) {
- if (status == ProgressStatus.InProgress) {
- executorService.submit(processContainerReport(datanodeDetails,
- containerReport));
- } else {
- LOG.debug("Cannot handle container report when the pool is in {} status.",
- status);
- }
- }
-
- private Runnable processContainerReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto reports) {
- return () -> {
- if (processedNodeSet.computeIfAbsent(datanodeDetails.getUuid(),
- (k) -> true)) {
- nodeProcessed.incrementAndGet();
- LOG.debug("Total Nodes processed : {} Node Name: {} ", nodeProcessed,
- datanodeDetails.getUuid());
- for (ContainerInfo info : reports.getReportsList()) {
- containerProcessedCount.incrementAndGet();
- LOG.debug("Total Containers processed: {} Container Name: {}",
- containerProcessedCount.get(), info.getContainerID());
-
- // Update the container map with count + 1 if the key exists or
- // update the map with 1. Since this is a concurrentMap the
- // computation and update is atomic.
- containerCountMap.merge(info.getContainerID(), 1, Integer::sum);
- }
- }
- };
- }
-
- /**
- * Filter the containers based on specific rules.
- *
- * @param predicate -- Predicate to filter by
- * @return A list of map entries.
- */
- public List<Map.Entry<Long, Integer>> filterContainer(
- Predicate<Map.Entry<Long, Integer>> predicate) {
- return containerCountMap.entrySet().stream()
- .filter(predicate).collect(Collectors.toList());
- }
-
- /**
- * Used only for testing, calling this will abort container report
- * processing. This is very dangerous call and should not be made by any users
- */
- @VisibleForTesting
- public void setDoneProcessing() {
- nodeProcessed.set(nodeCount.get());
- }
-
- /**
- * Returns the pool name.
- *
- * @return Name of the pool.
- */
- String getPoolName() {
- return pool.getPoolName();
- }
-
- public void finalizeReconciliation() {
- status = ProgressStatus.Done;
- //TODO: Add finalizing logic. This is where actual reconciliation happens.
- }
-
- /**
- * Current status of the computing replication status.
- */
- public enum ProgressStatus {
- InProgress, Done, Error
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
deleted file mode 100644
index ef28aa7..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * Periodic pool is a pool with a time stamp, this allows us to process pools
- * based on a cyclic clock.
- */
-public class PeriodicPool implements Comparable<PeriodicPool> {
- private final String poolName;
- private long lastProcessedTime;
- private AtomicLong totalProcessedCount;
-
- /**
- * Constructs a periodic pool.
- *
- * @param poolName - Name of the pool
- */
- public PeriodicPool(String poolName) {
- this.poolName = poolName;
- lastProcessedTime = 0;
- totalProcessedCount = new AtomicLong(0);
- }
-
- /**
- * Get pool Name.
- * @return PoolName
- */
- public String getPoolName() {
- return poolName;
- }
-
- /**
- * Compares this object with the specified object for order. Returns a
- * negative integer, zero, or a positive integer as this object is less
- * than, equal to, or greater than the specified object.
- *
- * @param o the object to be compared.
- * @return a negative integer, zero, or a positive integer as this object is
- * less than, equal to, or greater than the specified object.
- * @throws NullPointerException if the specified object is null
- * @throws ClassCastException if the specified object's type prevents it
- * from being compared to this object.
- */
- @Override
- public int compareTo(PeriodicPool o) {
- return Long.compare(this.lastProcessedTime, o.lastProcessedTime);
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
-
- PeriodicPool that = (PeriodicPool) o;
-
- return poolName.equals(that.poolName);
- }
-
- @Override
- public int hashCode() {
- return poolName.hashCode();
- }
-
- /**
- * Returns the Total Times we have processed this pool.
- *
- * @return processed count.
- */
- public long getTotalProcessedCount() {
- return totalProcessedCount.get();
- }
-
- /**
- * Gets the last time we processed this pool.
- * @return time in milliseconds
- */
- public long getLastProcessedTime() {
- return this.lastProcessedTime;
- }
-
-
- /**
- * Sets the last processed time.
- *
- * @param lastProcessedTime - Long in milliseconds.
- */
-
- public void setLastProcessedTime(long lastProcessedTime) {
- this.lastProcessedTime = lastProcessedTime;
- }
-
- /*
- * Increments the total processed count.
- */
- public void incTotalProcessedCount() {
- this.totalProcessedCount.incrementAndGet();
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
deleted file mode 100644
index 7bbe2ef..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container.replication;
-/*
- This package contains routines that manage replication of a container. This
- relies on container reports to understand the replication level of a
- container - UnderReplicated, Replicated, OverReplicated -- and manages the
- replication level based on that.
- */
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 4392633..72d7e94 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -124,12 +124,6 @@ public interface NodeManager extends StorageContainerNodeProtocol,
SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails);
/**
- * Returns the NodePoolManager associated with the NodeManager.
- * @return NodePoolManager
- */
- NodePoolManager getNodePoolManager();
-
- /**
* Wait for the heartbeat is processed by NodeManager.
* @return true if heartbeat has been processed.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
deleted file mode 100644
index 46faf9ca..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Interface that defines SCM NodePoolManager.
- */
-public interface NodePoolManager extends Closeable {
-
- /**
- * Add a node to a node pool.
- * @param pool - name of the node pool.
- * @param node - data node.
- */
- void addNode(String pool, DatanodeDetails node) throws IOException;
-
- /**
- * Remove a node from a node pool.
- * @param pool - name of the node pool.
- * @param node - data node.
- * @throws SCMException
- */
- void removeNode(String pool, DatanodeDetails node)
- throws SCMException;
-
- /**
- * Get a list of known node pools.
- * @return a list of known node pool names or an empty list if not node pool
- * is defined.
- */
- List<String> getNodePools();
-
- /**
- * Get all nodes of a node pool given the name of the node pool.
- * @param pool - name of the node pool.
- * @return a list of datanode ids or an empty list if the node pool was not
- * found.
- */
- List<DatanodeDetails> getNodes(String pool);
-
- /**
- * Get the node pool name if the node has been added to a node pool.
- * @param datanodeDetails - datanode ID.
- * @return node pool name if it has been assigned.
- * null if the node has not been assigned to any node pool yet.
- */
- String getNodePool(DatanodeDetails datanodeDetails) throws SCMException;
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index fc8b013..adca8ea 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import com.google.protobuf.GeneratedMessage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -159,7 +158,6 @@ public class SCMNodeManager
private ObjectName nmInfoBean;
// Node pool manager.
- private final SCMNodePoolManager nodePoolManager;
private final StorageContainerManager scmManager;
public static final Event<CommandForDatanode> DATANODE_COMMAND =
@@ -210,7 +208,6 @@ public class SCMNodeManager
registerMXBean();
- this.nodePoolManager = new SCMNodePoolManager(conf);
this.scmManager = scmManager;
}
@@ -682,7 +679,6 @@ public class SCMNodeManager
@Override
public void close() throws IOException {
unregisterMXBean();
- nodePoolManager.close();
executorService.shutdown();
try {
if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
@@ -760,20 +756,6 @@ public class SCMNodeManager
LOG.info("Leaving startup chill mode.");
}
- // TODO: define node pool policy for non-default node pool.
- // For now, all nodes are added to the "DefaultNodePool" upon registration
- // if it has not been added to any node pool yet.
- try {
- if (nodePoolManager.getNodePool(datanodeDetails) == null) {
- nodePoolManager.addNode(SCMNodePoolManager.DEFAULT_NODEPOOL,
- datanodeDetails);
- }
- } catch (IOException e) {
- // TODO: make sure registration failure is handled correctly.
- return RegisteredCommand.newBuilder()
- .setErrorCode(ErrorCode.errorNodeNotPermitted)
- .build();
- }
// Updating Node Report, as registration is successful
updateNodeStat(datanodeDetails.getUuid(), nodeReport);
LOG.info("Data node with ID: {} Registered.",
@@ -860,11 +842,6 @@ public class SCMNodeManager
}
@Override
- public NodePoolManager getNodePoolManager() {
- return nodePoolManager;
- }
-
- @Override
public Map<String, Integer> getNodeCount() {
Map<String, Integer> nodeCountMap = new HashMap<String, Integer>();
for(NodeState state : NodeState.values()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
deleted file mode 100644
index faf330e..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
- .OZONE_SCM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
- .FAILED_TO_FIND_NODE_IN_POOL;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
- .FAILED_TO_LOAD_NODEPOOL;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
-import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
-
-/**
- * SCM node pool manager that manges node pools.
- */
-public final class SCMNodePoolManager implements NodePoolManager {
-
- private static final Logger LOG =
- LoggerFactory.getLogger(SCMNodePoolManager.class);
- private static final List<DatanodeDetails> EMPTY_NODE_LIST =
- new ArrayList<>();
- private static final List<String> EMPTY_NODEPOOL_LIST = new ArrayList<>();
- public static final String DEFAULT_NODEPOOL = "DefaultNodePool";
-
- // DB that saves the node to node pool mapping.
- private MetadataStore nodePoolStore;
-
- // In-memory node pool to nodes mapping
- private HashMap<String, Set<DatanodeDetails>> nodePools;
-
- // Read-write lock for nodepool operations
- private ReadWriteLock lock;
-
- /**
- * Construct SCMNodePoolManager class that manages node to node pool mapping.
- * @param conf - configuration.
- * @throws IOException
- */
- public SCMNodePoolManager(final OzoneConfiguration conf)
- throws IOException {
- final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
- OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
- File metaDir = getOzoneMetaDirPath(conf);
- String scmMetaDataDir = metaDir.getPath();
- File nodePoolDBPath = new File(scmMetaDataDir, NODEPOOL_DB);
- nodePoolStore = MetadataStoreBuilder.newBuilder()
- .setConf(conf)
- .setDbFile(nodePoolDBPath)
- .setCacheSize(cacheSize * OzoneConsts.MB)
- .build();
- nodePools = new HashMap<>();
- lock = new ReentrantReadWriteLock();
- init();
- }
-
- /**
- * Initialize the in-memory store based on persist store from level db.
- * No lock is needed as init() is only invoked by constructor.
- * @throws SCMException
- */
- private void init() throws SCMException {
- try {
- nodePoolStore.iterate(null, (key, value) -> {
- try {
- DatanodeDetails nodeId = DatanodeDetails.getFromProtoBuf(
- HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(key));
- String poolName = DFSUtil.bytes2String(value);
-
- Set<DatanodeDetails> nodePool = null;
- if (nodePools.containsKey(poolName)) {
- nodePool = nodePools.get(poolName);
- } else {
- nodePool = new HashSet<>();
- nodePools.put(poolName, nodePool);
- }
- nodePool.add(nodeId);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Adding node: {} to node pool: {}",
- nodeId, poolName);
- }
- } catch (IOException e) {
- LOG.warn("Can't add a datanode to node pool, continue next...");
- }
- return true;
- });
- } catch (IOException e) {
- LOG.error("Loading node pool error " + e);
- throw new SCMException("Failed to load node pool",
- FAILED_TO_LOAD_NODEPOOL);
- }
- }
-
- /**
- * Add a datanode to a node pool.
- * @param pool - name of the node pool.
- * @param node - name of the datanode.
- */
- @Override
- public void addNode(final String pool, final DatanodeDetails node)
- throws IOException {
- Preconditions.checkNotNull(pool, "pool name is null");
- Preconditions.checkNotNull(node, "node is null");
- lock.writeLock().lock();
- try {
- // add to the persistent store
- nodePoolStore.put(node.getProtoBufMessage().toByteArray(),
- DFSUtil.string2Bytes(pool));
-
- // add to the in-memory store
- Set<DatanodeDetails> nodePool = null;
- if (nodePools.containsKey(pool)) {
- nodePool = nodePools.get(pool);
- } else {
- nodePool = new HashSet<DatanodeDetails>();
- nodePools.put(pool, nodePool);
- }
- nodePool.add(node);
- } finally {
- lock.writeLock().unlock();
- }
- }
-
- /**
- * Remove a datanode from a node pool.
- * @param pool - name of the node pool.
- * @param node - datanode id.
- * @throws SCMException
- */
- @Override
- public void removeNode(final String pool, final DatanodeDetails node)
- throws SCMException {
- Preconditions.checkNotNull(pool, "pool name is null");
- Preconditions.checkNotNull(node, "node is null");
- lock.writeLock().lock();
- try {
- // Remove from the persistent store
- byte[] kName = node.getProtoBufMessage().toByteArray();
- byte[] kData = nodePoolStore.get(kName);
- if (kData == null) {
- throw new SCMException(String.format("Unable to find node %s from" +
- " pool %s in DB.", DFSUtil.bytes2String(kName), pool),
- FAILED_TO_FIND_NODE_IN_POOL);
- }
- nodePoolStore.delete(kName);
-
- // Remove from the in-memory store
- if (nodePools.containsKey(pool)) {
- Set<DatanodeDetails> nodePool = nodePools.get(pool);
- nodePool.remove(node);
- } else {
- throw new SCMException(String.format("Unable to find node %s from" +
- " pool %s in MAP.", DFSUtil.bytes2String(kName), pool),
- FAILED_TO_FIND_NODE_IN_POOL);
- }
- } catch (IOException e) {
- throw new SCMException("Failed to remove node " + node.toString()
- + " from node pool " + pool, e,
- SCMException.ResultCodes.IO_EXCEPTION);
- } finally {
- lock.writeLock().unlock();
- }
- }
-
- /**
- * Get all the node pools.
- * @return all the node pools.
- */
- @Override
- public List<String> getNodePools() {
- lock.readLock().lock();
- try {
- if (!nodePools.isEmpty()) {
- return nodePools.keySet().stream().collect(Collectors.toList());
- } else {
- return EMPTY_NODEPOOL_LIST;
- }
- } finally {
- lock.readLock().unlock();
- }
- }
-
- /**
- * Get all datanodes of a specific node pool.
- * @param pool - name of the node pool.
- * @return all datanodes of the specified node pool.
- */
- @Override
- public List<DatanodeDetails> getNodes(final String pool) {
- Preconditions.checkNotNull(pool, "pool name is null");
- if (nodePools.containsKey(pool)) {
- return nodePools.get(pool).stream().collect(Collectors.toList());
- } else {
- return EMPTY_NODE_LIST;
- }
- }
-
- /**
- * Get the node pool name if the node has been added to a node pool.
- * @param datanodeDetails - datanode ID.
- * @return node pool name if it has been assigned.
- * null if the node has not been assigned to any node pool yet.
- * TODO: Put this in a in-memory map if performance is an issue.
- */
- @Override
- public String getNodePool(final DatanodeDetails datanodeDetails)
- throws SCMException {
- Preconditions.checkNotNull(datanodeDetails, "node is null");
- try {
- byte[] result = nodePoolStore.get(
- datanodeDetails.getProtoBufMessage().toByteArray());
- return result == null ? null : DFSUtil.bytes2String(result);
- } catch (IOException e) {
- throw new SCMException("Failed to get node pool for node "
- + datanodeDetails.toString(), e,
- SCMException.ResultCodes.IO_EXCEPTION);
- }
- }
-
- /**
- * Close node pool level db store.
- * @throws IOException
- */
- @Override
- public void close() throws IOException {
- nodePoolStore.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 8c59462..80b5d6e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdds.scm.container;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -273,11 +272,6 @@ public class MockNodeManager implements NodeManager {
return new SCMNodeMetric(nodeMetricMap.get(datanodeDetails.getUuid()));
}
- @Override
- public NodePoolManager getNodePoolManager() {
- return Mockito.mock(NodePoolManager.class);
- }
-
/**
* Used for testing.
*
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java
deleted file mode 100644
index 8f412de..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.commons.collections.ListUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
- .ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
- .SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.test.PathUtils;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test for SCM node pool manager.
- */
-public class TestSCMNodePoolManager {
- private static final Logger LOG =
- LoggerFactory.getLogger(TestSCMNodePoolManager.class);
-
- @Rule
- public ExpectedException thrown = ExpectedException.none();
-
- private final File testDir = PathUtils.getTestDir(
- TestSCMNodePoolManager.class);
-
- SCMNodePoolManager createNodePoolManager(OzoneConfiguration conf)
- throws IOException {
- conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
- testDir.getAbsolutePath());
- conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
- SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
- return new SCMNodePoolManager(conf);
- }
-
- /**
- * Test default node pool.
- *
- * @throws IOException
- */
- @Test
- public void testDefaultNodePool() throws IOException {
- OzoneConfiguration conf = new OzoneConfiguration();
- try {
- final String defaultPool = "DefaultPool";
- NodePoolManager npMgr = createNodePoolManager(conf);
-
- final int nodeCount = 4;
- final List<DatanodeDetails> nodes = TestUtils
- .getListOfDatanodeDetails(nodeCount);
- assertEquals(0, npMgr.getNodePools().size());
- for (DatanodeDetails node: nodes) {
- npMgr.addNode(defaultPool, node);
- }
- List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
- assertEquals(nodeCount, nodesRetrieved.size());
- assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
-
- DatanodeDetails nodeRemoved = nodes.remove(2);
- npMgr.removeNode(defaultPool, nodeRemoved);
- List<DatanodeDetails> nodesAfterRemove = npMgr.getNodes(defaultPool);
- assertTwoDatanodeListsEqual(nodes, nodesAfterRemove);
-
- List<DatanodeDetails> nonExistSet = npMgr.getNodes("NonExistSet");
- assertEquals(0, nonExistSet.size());
- } finally {
- FileUtil.fullyDelete(testDir);
- }
- }
-
-
- /**
- * Test default node pool reload.
- *
- * @throws IOException
- */
- @Test
- public void testDefaultNodePoolReload() throws IOException {
- OzoneConfiguration conf = new OzoneConfiguration();
- final String defaultPool = "DefaultPool";
- final int nodeCount = 4;
- final List<DatanodeDetails> nodes = TestUtils
- .getListOfDatanodeDetails(nodeCount);
-
- try {
- try {
- SCMNodePoolManager npMgr = createNodePoolManager(conf);
- assertEquals(0, npMgr.getNodePools().size());
- for (DatanodeDetails node : nodes) {
- npMgr.addNode(defaultPool, node);
- }
- List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
- assertEquals(nodeCount, nodesRetrieved.size());
- assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
- npMgr.close();
- } finally {
- LOG.info("testDefaultNodePoolReload: Finish adding nodes to pool" +
- " and close.");
- }
-
- // try reload with a new NodePoolManager instance
- try {
- SCMNodePoolManager npMgr = createNodePoolManager(conf);
- List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
- assertEquals(nodeCount, nodesRetrieved.size());
- assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
- } finally {
- LOG.info("testDefaultNodePoolReload: Finish reloading node pool.");
- }
- } finally {
- FileUtil.fullyDelete(testDir);
- }
- }
-
- /**
- * Compare and verify that two datanode lists are equal.
- * @param list1 - datanode list 1.
- * @param list2 - datanode list 2.
- */
- private void assertTwoDatanodeListsEqual(List<DatanodeDetails> list1,
- List<DatanodeDetails> list2) {
- assertEquals(list1.size(), list2.size());
- Collections.sort(list1);
- Collections.sort(list2);
- assertTrue(ListUtils.isEqualList(list1, list2));
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 072d821..1a4dcd7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.node.CommandQueue;
import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -201,10 +200,6 @@ public class ReplicationNodeManagerMock implements NodeManager {
return null;
}
- @Override
- public NodePoolManager getNodePoolManager() {
- return Mockito.mock(NodePoolManager.class);
- }
/**
* Wait for the heartbeat is processed by NodeManager.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
deleted file mode 100644
index ffcd752..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodePoolManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Pool Manager replication mock.
- */
-public class ReplicationNodePoolManagerMock implements NodePoolManager {
-
- private final Map<DatanodeDetails, String> nodeMemberShip;
-
- /**
- * A node pool manager for testing.
- */
- public ReplicationNodePoolManagerMock() {
- nodeMemberShip = new HashMap<>();
- }
-
- /**
- * Add a node to a node pool.
- *
- * @param pool - name of the node pool.
- * @param node - data node.
- */
- @Override
- public void addNode(String pool, DatanodeDetails node) {
- nodeMemberShip.put(node, pool);
- }
-
- /**
- * Remove a node from a node pool.
- *
- * @param pool - name of the node pool.
- * @param node - data node.
- * @throws SCMException
- */
- @Override
- public void removeNode(String pool, DatanodeDetails node)
- throws SCMException {
- nodeMemberShip.remove(node);
-
- }
-
- /**
- * Get a list of known node pools.
- *
- * @return a list of known node pool names or an empty list if not node pool
- * is defined.
- */
- @Override
- public List<String> getNodePools() {
- Set<String> poolSet = new HashSet<>();
- for (Map.Entry<DatanodeDetails, String> entry : nodeMemberShip.entrySet()) {
- poolSet.add(entry.getValue());
- }
- return new ArrayList<>(poolSet);
-
- }
-
- /**
- * Get all nodes of a node pool given the name of the node pool.
- *
- * @param pool - name of the node pool.
- * @return a list of datanode ids or an empty list if the node pool was not
- * found.
- */
- @Override
- public List<DatanodeDetails> getNodes(String pool) {
- Set<DatanodeDetails> datanodeSet = new HashSet<>();
- for (Map.Entry<DatanodeDetails, String> entry : nodeMemberShip.entrySet()) {
- if (entry.getValue().equals(pool)) {
- datanodeSet.add(entry.getKey());
- }
- }
- return new ArrayList<>(datanodeSet);
- }
-
- /**
- * Get the node pool name if the node has been added to a node pool.
- *
- * @param datanodeDetails DatanodeDetails.
- * @return node pool name if it has been assigned. null if the node has not
- * been assigned to any node pool yet.
- */
- @Override
- public String getNodePool(DatanodeDetails datanodeDetails) {
- return nodeMemberShip.get(datanodeDetails);
- }
-
- /**
- * Closes this stream and releases any system resources associated
- * with it. If the stream is already closed then invoking this
- * method has no effect.
- * <p>
- * <p> As noted in {@link AutoCloseable#close()}, cases where the
- * close may fail require careful attention. It is strongly advised
- * to relinquish the underlying resources and to internally
- * <em>mark</em> the {@code Closeable} as closed, prior to throwing
- * the {@code IOException}.
- *
- * @throws IOException if an I/O error occurs
- */
- @Override
- public void close() throws IOException {
-
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index 4d70af8..b4ed2b1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -51,12 +51,9 @@ import java.util.Collection;
import java.util.HashMap;
import java.util.UUID;
-import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.KB;
-import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
/**
* This class tests the CLI that transforms container into SQLite DB files.
@@ -177,34 +174,6 @@ public class TestContainerSQLCli {
}
@Test
- public void testConvertNodepoolDB() throws Exception {
- String dbOutPath = GenericTestUtils.getTempPath(
- UUID.randomUUID() + "/out_sql.db");
- String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
- String dbPath = dbRootPath + "/" + NODEPOOL_DB;
- String[] args = {"-p", dbPath, "-o", dbOutPath};
-
- cli.run(args);
-
- // verify the sqlite db
- HashMap<String, String> expectedPool = new HashMap<>();
- for (DatanodeDetails dnid : nodeManager.getAllNodes()) {
- expectedPool.put(dnid.getUuidString(), "DefaultNodePool");
- }
- Connection conn = connectDB(dbOutPath);
- String sql = "SELECT * FROM nodePool";
- ResultSet rs = executeQuery(conn, sql);
- while(rs.next()) {
- String datanodeUUID = rs.getString("datanodeUUID");
- String poolName = rs.getString("poolName");
- assertTrue(expectedPool.remove(datanodeUUID).equals(poolName));
- }
- assertEquals(0, expectedPool.size());
-
- Files.delete(Paths.get(dbOutPath));
- }
-
- @Test
public void testConvertContainerDB() throws Exception {
String dbOutPath = GenericTestUtils.getTempPath(
UUID.randomUUID() + "/out_sql.db");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a4cdb9/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index 2bd43fb..edc0d7b 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.scm.cli;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
-import com.google.protobuf.ByteString;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
@@ -60,13 +59,11 @@ import java.sql.Statement;
import java.util.HashSet;
import java.util.Set;
-import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
/**
@@ -111,15 +108,6 @@ public class SQLCLI extends Configured implements Tool {
private static final String INSERT_CONTAINER_MEMBERS =
"INSERT INTO containerMembers (containerName, datanodeUUID) " +
"VALUES (\"%s\", \"%s\")";
- // for nodepool.db
- private static final String CREATE_NODE_POOL =
- "CREATE TABLE nodePool (" +
- "datanodeUUID TEXT NOT NULL," +
- "poolName TEXT NOT NULL," +
- "PRIMARY KEY(datanodeUUID, poolName))";
- private static final String INSERT_NODE_POOL =
- "INSERT INTO nodePool (datanodeUUID, poolName) " +
- "VALUES (\"%s\", \"%s\")";
// and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO
// for openContainer.db
private static final String CREATE_OPEN_CONTAINER =
@@ -285,9 +273,6 @@ public class SQLCLI extends Configured implements Tool {
if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) {
LOG.info("Converting container DB");
convertContainerDB(dbPath, outPath);
- } else if (dbName.toString().equals(NODEPOOL_DB)) {
- LOG.info("Converting node pool DB");
- convertNodePoolDB(dbPath, outPath);
} else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
LOG.info("Converting open container DB");
convertOpenContainerDB(dbPath, outPath);
@@ -543,66 +528,7 @@ public class SQLCLI extends Configured implements Tool {
}
LOG.info("Insertion completed.");
}
- /**
- * Converts nodePool.db to sqlite. The schema of sql db:
- * two tables, nodePool and datanodeInfo (the same datanode Info as for
- * container.db).
- *
- * nodePool
- * ---------------------------------------------------------
- * datanodeUUID* | poolName*
- * ---------------------------------------------------------
- *
- * datanodeInfo:
- * ---------------------------------------------------------
- * hostname | datanodeUUid* | xferPort | ipcPort
- * ---------------------------------------------------------
- *
- * --------------------------------
- * |containerPort
- * --------------------------------
- *
- * @param dbPath path to container db.
- * @param outPath path to output sqlite
- * @throws IOException throws exception.
- */
- private void convertNodePoolDB(Path dbPath, Path outPath) throws Exception {
- LOG.info("Create table for sql node pool db.");
- File dbFile = dbPath.toFile();
- try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
- .setConf(conf).setDbFile(dbFile).build();
- Connection conn = connectDB(outPath.toString())) {
- executeSQL(conn, CREATE_NODE_POOL);
- executeSQL(conn, CREATE_DATANODE_INFO);
- dbStore.iterate(null, (key, value) -> {
- DatanodeDetails nodeId = DatanodeDetails
- .getFromProtoBuf(HddsProtos.DatanodeDetailsProto
- .PARSER.parseFrom(key));
- String blockPool = DFSUtil.bytes2String(value);
- try {
- insertNodePoolDB(conn, blockPool, nodeId);
- return true;
- } catch (SQLException e) {
- throw new IOException(e);
- }
- });
- }
- }
-
- private void insertNodePoolDB(Connection conn, String blockPool,
- DatanodeDetails datanodeDetails) throws SQLException {
- String insertNodePool = String.format(INSERT_NODE_POOL,
- datanodeDetails.getUuidString(), blockPool);
- executeSQL(conn, insertNodePool);
-
- String insertDatanodeDetails = String
- .format(INSERT_DATANODE_INFO, datanodeDetails.getHostName(),
- datanodeDetails.getUuidString(), datanodeDetails.getIpAddress(),
- datanodeDetails.getPort(DatanodeDetails.Port.Name.STANDALONE)
- .getValue());
- executeSQL(conn, insertDatanodeDetails);
- }
/**
* Convert openContainer.db to sqlite db file. This is rather simple db,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[43/45] hadoop git commit: HDDS-70. Fix config names for secure ksm
and scm. Contributed by Ajay Kumar.
Posted by xy...@apache.org.
HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c5570be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c5570be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c5570be
Branch: refs/heads/HDDS-4
Commit: 5c5570be7f56495715b7c73f804fb9acef58a720
Parents: 4c73c72
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue May 22 13:32:28 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Jul 2 13:19:02 2018 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hdds/HddsConfigKeys.java | 4 --
.../apache/hadoop/hdds/scm/ScmConfigKeys.java | 14 ++---
.../scm/protocol/ScmBlockLocationProtocol.java | 2 +-
.../StorageContainerLocationProtocol.java | 3 +-
.../protocolPB/ScmBlockLocationProtocolPB.java | 4 +-
.../StorageContainerLocationProtocolPB.java | 2 +-
.../apache/hadoop/ozone/OzoneConfigKeys.java | 8 +--
.../common/src/main/resources/ozone-default.xml | 54 ++++++--------------
.../StorageContainerDatanodeProtocol.java | 2 +-
.../StorageContainerDatanodeProtocolPB.java | 2 +-
.../scm/server/StorageContainerManager.java | 12 ++---
.../StorageContainerManagerHttpServer.java | 4 +-
.../compose/compose-secure/docker-compose.yaml | 6 +--
.../test/compose/compose-secure/docker-config | 12 ++---
.../acceptance/ozone-secure.robot | 12 ++---
.../ozone/client/protocol/ClientProtocol.java | 2 +-
.../apache/hadoop/ozone/ksm/KSMConfigKeys.java | 10 ++--
.../ksm/protocol/KeySpaceManagerProtocol.java | 4 +-
.../protocolPB/KeySpaceManagerProtocolPB.java | 3 +-
.../hadoop/ozone/TestSecureOzoneCluster.java | 32 ++++++------
.../hadoop/ozone/ksm/KeySpaceManager.java | 13 ++---
.../ozone/ksm/KeySpaceManagerHttpServer.java | 4 +-
22 files changed, 89 insertions(+), 120 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index a12d6ac..dec2c1c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -20,8 +20,4 @@ package org.apache.hadoop.hdds;
public final class HddsConfigKeys {
private HddsConfigKeys() {
}
- public static final String HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY = "hdds.ksm."
- + "kerberos.keytab.file";
- public static final String HDDS_KSM_KERBEROS_PRINCIPAL_KEY = "hdds.ksm"
- + ".kerberos.principal";
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 91e1cc2..312eeba 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -132,9 +132,9 @@ public final class ScmConfigKeys {
"ozone.scm.http-address";
public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
"ozone.scm.https-address";
- public static final String OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY =
- "ozone.scm.kerberos.keytab.file";
- public static final String OZONE_SCM_KERBEROS_PRINCIPAL_KEY = "ozone.scm.kerberos.principal";
+ public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
+ "hdds.scm.kerberos.keytab.file";
+ public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = "hdds.scm.kerberos.principal";
public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
@@ -255,10 +255,10 @@ public final class ScmConfigKeys {
"ozone.scm.container.close.threshold";
public static final float OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
- public static final String SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
- "ozone.scm.web.authentication.kerberos.principal";
- public static final String SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
- "ozone.scm.web.authentication.kerberos.keytab";
+ public static final String HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
+ "hdds.scm.web.authentication.kerberos.principal";
+ public static final String HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
+ "hdds.scm.web.authentication.kerberos.keytab";
/**
* Never constructed.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
index e17f1c2..2d46ae0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -33,7 +33,7 @@ import java.util.List;
* ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
* to read/write a block.
*/
-@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
public interface ScmBlockLocationProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index d36bdf3..13545fb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.scm.protocol;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -35,7 +34,7 @@ import org.apache.hadoop.security.KerberosInfo;
* ContainerLocationProtocol is used by an HDFS node to find the set of nodes
* that currently host a container.
*/
-@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
public interface StorageContainerLocationProtocol {
/**
* Asks SCM where a container should be allocated. SCM responds with the
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
index 89bb066..06bbd05 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
@@ -18,11 +18,9 @@
package org.apache.hadoop.hdds.scm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.ScmBlockLocationProtocolService;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
@@ -35,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
protocolVersion = 1)
@InterfaceAudience.Private
@KerberosInfo(
- serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+ serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
public interface ScmBlockLocationProtocolPB
extends ScmBlockLocationProtocolService.BlockingInterface {
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
index 3bd83f9..f80ba20 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
"org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
protocolVersion = 1)
@KerberosInfo(
- serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+ serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private
public interface StorageContainerLocationProtocolPB
extends StorageContainerLocationProtocolService.BlockingInterface {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 33cfa93..663ddab 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -240,9 +240,6 @@ public final class OzoneConfigKeys {
DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT;
- public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
- "ozone.web.authentication.kerberos.principal";
-
public static final String HDDS_DATANODE_PLUGINS_KEY =
"hdds.datanode.plugins";
@@ -261,6 +258,11 @@ public final class OzoneConfigKeys {
public static final String OZONE_SYSTEM_TAGS_KEY = "ozone.system.tags";
public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false;
+ public static final String OZONE_OM_KERBEROS_KEYTAB_FILE_KEY = "ozone.om."
+ + "kerberos.keytab.file";
+ public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om"
+ + ".kerberos.principal";
+
/**
* There is no need to instantiate this class.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 4ada591..ac3bf82 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -345,14 +345,6 @@
</description>
</property>
<property>
- <name>ozone.ksm.keytab.file</name>
- <value/>
- <tag>KSM, SECURITY</tag>
- <description>
- The keytab file for Kerberos authentication in KSM.
- </description>
- </property>
- <property>
<name>ozone.ksm.db.cache.size.mb</name>
<value>128</value>
<tag>KSM, PERFORMANCE</tag>
@@ -825,20 +817,6 @@
the logs. Very useful when debugging REST protocol.
</description>
</property>
- <property>
- <name>ozone.web.authentication.kerberos.principal</name>
- <value/>
- <tag>OZONE, SECURITY</tag>
- <description>
- The server principal used by the SCM and KSM for web UI SPNEGO
- authentication when Kerberos security is enabled. This is typically set to
- HTTP/_HOST@REALM.TLD The SPNEGO server principal begins with the prefix
- HTTP/ by convention.
-
- If the value is '*', the web server will attempt to login with
- every principal specified in the keytab file.
- </description>
- </property>
<!--Client Settings-->
<property>
@@ -874,7 +852,7 @@
</property>
<property>
- <name>ozone.scm.container.creation.lease.timeout</name>
+ <name>hdds.scm.container.creation.lease.timeout</name>
<value>60s</value>
<tag>OZONE, SCM</tag>
<description>
@@ -928,7 +906,7 @@
</description>
</property>
<property>
- <name>ozone.scm.container.close.threshold</name>
+ <name>hdds.scm.container.close.threshold</name>
<value>0.9f</value>
<tag>OZONE, SCM</tag>
<description>
@@ -1059,58 +1037,58 @@
</property>
<property>
- <name>ozone.scm.kerberos.keytab.file</name>
+ <name>hdds.scm.kerberos.keytab.file</name>
<value></value>
<tag> OZONE, SECURITY</tag>
<description> The keytab file used by each SCM daemon to login as its
service principal. The principal name is configured with
- ozone.scm.kerberos.principal.
+ hdds.scm.kerberos.principal.
</description>
</property>
<property>
- <name>ozone.scm.kerberos.principal</name>
+ <name>hdds.scm.kerberos.principal</name>
<value></value>
<tag> OZONE, SECURITY</tag>
<description>The SCM service principal. Ex scm/_HOST@REALM.COM</description>
</property>
<property>
- <name>hdds.ksm.kerberos.keytab.file</name>
+ <name>ozone.om.kerberos.keytab.file</name>
<value></value>
<tag> HDDS, SECURITY</tag>
- <description> The keytab file used by KSM daemon to login as its
+ <description> The keytab file used by OzoneManager daemon to login as its
service principal. The principal name is configured with
- hdds.ksm.kerberos.principal.
+ ozone.om.kerberos.principal.
</description>
</property>
<property>
- <name>hdds.ksm.kerberos.principal</name>
+ <name>ozone.om.kerberos.principal</name>
<value></value>
<tag> HDDS, SECURITY</tag>
- <description>The KSM service principal. Ex ksm/_HOST@REALM.COM</description>
+ <description>The OzoneManager service principal. Ex om/_HOST@REALM.COM</description>
</property>
<property>
- <name>ozone.scm.web.authentication.kerberos.principal</name>
+ <name>hdds.scm.web.authentication.kerberos.principal</name>
<value>HTTP/_HOST@EXAMPLE.COM</value>
</property>
<property>
- <name>ozone.scm.web.authentication.kerberos.keytab</name>
+ <name>hdds.scm.web.authentication.kerberos.keytab</name>
<value>/etc/security/keytabs/HTTP.keytab</value>
</property>
<property>
- <name>hdds.ksm.web.authentication.kerberos.principal</name>
+ <name>ozone.om.web.authentication.kerberos.principal</name>
<value>HTTP/_HOST@EXAMPLE.COM</value>
<description>
- KSM http server kerberos principal.
+ OzoneManager http server kerberos principal.
</description>
</property>
<property>
- <name>hdds.ksm.web.authentication.kerberos.keytab</name>
+ <name>ozone.om.web.authentication.kerberos.keytab</name>
<value>/etc/security/keytabs/HTTP.keytab</value>
<description>
- KSM http server kerberos keytab.
+ OzoneManager http server kerberos keytab.
</description>
</property>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index 5b04c56..9f18d96 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.security.KerberosInfo;
* Protoc file that defines this protocol.
*/
@KerberosInfo(
- serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+ serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private
public interface StorageContainerDatanodeProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
index 9c32ef8..9006e91 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
"org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol",
protocolVersion = 1)
@KerberosInfo(
- serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
public interface StorageContainerDatanodeProtocolPB extends
StorageContainerDatanodeProtocolService.BlockingInterface {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index e15157e..eacbba1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -78,8 +78,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_M
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
/**
@@ -215,16 +215,16 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
throws IOException, AuthenticationException {
LOG.debug("Ozone security is enabled. Attempting login for SCM user. "
+ "Principal: {}, keytab: {}", this.scmConf.get
- (OZONE_SCM_KERBEROS_PRINCIPAL_KEY),
- this.scmConf.get(OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY));
+ (HDDS_SCM_KERBEROS_PRINCIPAL_KEY),
+ this.scmConf.get(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY));
if (SecurityUtil.getAuthenticationMethod(conf).equals
(AuthenticationMethod.KERBEROS)) {
UserGroupInformation.setConfiguration(this.scmConf);
InetSocketAddress socAddr = HddsServerUtil
.getScmBlockClientBindAddress(conf);
- SecurityUtil.login(conf, OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
- OZONE_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+ SecurityUtil.login(conf, HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ HDDS_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
} else {
throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
(conf) + " authentication method not support. "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
index da936ad..41dd89a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
@@ -62,11 +62,11 @@ public class StorageContainerManagerHttpServer extends BaseHttpServer {
}
@Override protected String getKeytabFile() {
- return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY;
+ return ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY;
}
@Override protected String getSpnegoPrincipal() {
- return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
+ return ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
}
@Override protected String getEnabledKey() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
index 2661163..db211bc 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
@@ -40,15 +40,15 @@ services:
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/ozone","datanode"]
- ksm:
+ om:
image: ahadoop/ozone:v1
- hostname: ksm
+ hostname: om
volumes:
- ${OZONEDIR}:/opt/hadoop
ports:
- 9874:9874
environment:
- ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+ ENSURE_KSM_INITIALIZED: /data/metadata/om/current/VERSION
env_file:
- ./docker-config
command: ["/opt/hadoop/bin/ozone","ksm"]
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
index 678c75a..360b69a 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.ksm.address=om
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
OZONE-SITE.XML_hdds.scm.datanode.id=/data/datanode.id
@@ -25,13 +25,13 @@ OZONE-SITE.XML_hdds.scm.client.address=scm
OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
-OZONE-SITE.XML_ozone.ksm.kerberos.principal=ksm/ksm@EXAMPLE.COM
-OZONE-SITE.XML_ozone.ksm.kerberos.keytab.file=/etc/security/keytabs/ksm.keytab
+OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM
+OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab
OZONE-SITE.XML_ozone.security.enabled=true
OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.principal=HTTP/scm@EXAMPLE.COM
OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.principal=HTTP/ksm@EXAMPLE.COM
-OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.om.web.authentication.kerberos.principal=HTTP/om@EXAMPLE.COM
+OZONE-SITE.XML_ozone.om.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.scm.client.address=scm
HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
@@ -57,7 +57,7 @@ LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH
OZONE_DATANODE_SECURE_USER=root
CONF_DIR=/etc/security/keytabs
-KERBEROS_KEYTABS=dn nn ksm scm HTTP testuser
+KERBEROS_KEYTABS=dn nn om scm HTTP testuser
KERBEROS_KEYSTORES=hadoop
KERBEROS_SERVER=ozone.kdc
JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
index 4a78980..7fc1088 100644
--- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
@@ -26,7 +26,7 @@ ${version}
*** Test Cases ***
Daemons are running
- Is daemon running ksm
+ Is daemon running om
Is daemon running scm
Is daemon running datanode
Is daemon running ozone.kdc
@@ -45,15 +45,15 @@ Test rest interface
Should contain ${result} 200 OK
Test ozone cli
- ${result} = Execute on 1 datanode ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
+ ${result} = Execute on 1 datanode ozone oz -createVolume o3://om/hive -user bilbo -quota 100TB -root
Should contain ${result} Client cannot authenticate via
# Authenticate testuser
Execute on 0 datanode kinit -k testuser/datanode@EXAMPLE.COM -t /etc/security/keytabs/testuser.keytab
- Execute on 0 datanode ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
- ${result} = Execute on 0 datanode ozone oz -listVolume o3://ksm/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+ Execute on 0 datanode ozone oz -createVolume o3://om/hive -user bilbo -quota 100TB -root
+ ${result} = Execute on 0 datanode ozone oz -listVolume o3://om/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
Should contain ${result} createdOn
- Execute on 0 datanode ozone oz -updateVolume o3://ksm/hive -user bill -quota 10TB
- ${result} = Execute on 0 datanode ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+ Execute on 0 datanode ozone oz -updateVolume o3://om/hive -user bill -quota 10TB
+ ${result} = Execute on 0 datanode ozone oz -infoVolume o3://om/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
Should Be Equal ${result} bill
*** Keywords ***
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 80b0a40..ee5dca9 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.security.KerberosInfo;
* includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and
* {@link org.apache.hadoop.ozone.client.rest.RestClient} for REST.
*/
-@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
public interface ClientProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
index d911bcb..cc25dbe 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
@@ -49,8 +49,6 @@ public final class KSMConfigKeys {
"ozone.ksm.http-address";
public static final String OZONE_KSM_HTTPS_ADDRESS_KEY =
"ozone.ksm.https-address";
- public static final String OZONE_KSM_KEYTAB_FILE =
- "ozone.ksm.keytab.file";
public static final String OZONE_KSM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
public static final int OZONE_KSM_HTTP_BIND_PORT_DEFAULT = 9874;
public static final int OZONE_KSM_HTTPS_BIND_PORT_DEFAULT = 9875;
@@ -79,8 +77,8 @@ public final class KSMConfigKeys {
"ozone.key.deleting.limit.per.task";
public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
- public static final String KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
- "hdds.ksm.web.authentication.kerberos.principal";
- public static final String KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
- "hdds.ksm.web.authentication.kerberos.keytab";
+ public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
+ "ozone.om.web.authentication.kerberos.principal";
+ public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
+ "ozone.om.web.authentication.kerberos.keytab";
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
index de27108..21c36fa 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.ozone.ksm.protocol;
-import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
@@ -36,7 +36,7 @@ import org.apache.hadoop.security.KerberosInfo;
* Protocol to talk to KSM.
*/
@KerberosInfo(
- serverPrincipal = HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY)
+ serverPrincipal = OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
public interface KeySpaceManagerProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
index 71b9da0..84fe154 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.ksm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.protocol.proto
@@ -32,7 +31,7 @@ import org.apache.hadoop.security.KerberosInfo;
"org.apache.hadoop.ozone.protocol.KeySpaceManagerProtocol",
protocolVersion = 1)
@KerberosInfo(
- serverPrincipal = HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY)
+ serverPrincipal = OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private
public interface KeySpaceManagerProtocolPB
extends KeySpaceManagerService.BlockingInterface {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index b917dfe..cc97576 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -29,9 +29,7 @@ import java.util.UUID;
import java.util.concurrent.Callable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmInfo;
@@ -120,12 +118,12 @@ public final class TestSecureOzoneCluster {
private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
throws Exception {
createPrincipal(scmKeytab,
- conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
+ conf.get(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY));
createPrincipal(spnegoKeytab,
- conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
- conf.get(KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL));
+ conf.get(ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
+ conf.get(KSMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL));
createPrincipal(ksmKeyTab,
- conf.get(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY));
+ conf.get(OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY));
}
private void createPrincipal(File keytab, String... principal)
@@ -155,25 +153,25 @@ public final class TestSecureOzoneCluster {
"kerberos");
conf.set(OZONE_ADMINISTRATORS, curUser);
- conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
"scm/" + host + "@" + realm);
- conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+ conf.set(ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
"HTTP_SCM/" + host + "@" + realm);
- conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY,
+ conf.set(OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
"ksm/" + host + "@" + realm);
- conf.set(KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL,
+ conf.set(KSMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL,
"HTTP_KSM/" + host + "@" + realm);
scmKeytab = new File(workDir, "scm.keytab");
spnegoKeytab = new File(workDir, "http.keytab");
ksmKeyTab = new File(workDir, "ksm.keytab");
- conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
scmKeytab.getAbsolutePath());
- conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
spnegoKeytab.getAbsolutePath());
- conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(OzoneConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
ksmKeyTab.getAbsolutePath());
}
@@ -206,7 +204,7 @@ public final class TestSecureOzoneCluster {
@Test
public void testSecureScmStartupFailure() throws Exception {
initSCM();
- conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
+ conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
@@ -216,9 +214,9 @@ public final class TestSecureOzoneCluster {
StorageContainerManager.createSCM(null, conf);
});
- conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
"scm/_HOST@EXAMPLE.com");
- conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
"/etc/security/keytabs/scm.keytab");
testCommonKerberosFailures(
@@ -261,7 +259,7 @@ public final class TestSecureOzoneCluster {
ksmStore.setScmId("testScmId");
// writes the version file properties
ksmStore.initialize();
- conf.set(HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY,
+ conf.set(OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
"non-existent-user@EXAMPLE.com");
testCommonKerberosFailures(() -> KeySpaceManager.createKSM(null, conf));
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
index be747d2..3e5a1e8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
@@ -88,8 +88,8 @@ import java.util.List;
import java.util.Map;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KSM_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
.OZONE_KSM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
@@ -213,14 +213,15 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
if (SecurityUtil.getAuthenticationMethod(conf).equals
(AuthenticationMethod.KERBEROS)) {
LOG.debug("Ozone security is enabled. Attempting login for KSM user. "
- + "Principal: {},keytab: {}", conf.get(HDDS_KSM_KERBEROS_PRINCIPAL_KEY),
- conf.get(HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY));
+ + "Principal: {},keytab: {}", conf.get(
+ OZONE_OM_KERBEROS_PRINCIPAL_KEY),
+ conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY));
UserGroupInformation.setConfiguration(conf);
InetSocketAddress socAddr = getKsmAddress(conf);
- SecurityUtil.login(conf, HDDS_KSM_KERBEROS_KEYTAB_FILE_KEY,
- HDDS_KSM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+ SecurityUtil.login(conf, OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
+ OZONE_OM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
} else {
throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
(conf) + " authentication method not supported. KSM user login "
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5570be/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
index a0d15b3..9848840 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
@@ -64,11 +64,11 @@ public class KeySpaceManagerHttpServer extends BaseHttpServer {
}
@Override protected String getKeytabFile() {
- return KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE;
+ return KSMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE;
}
@Override protected String getSpnegoPrincipal() {
- return KSMConfigKeys.KSM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+ return KSMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
}
@Override protected String getEnabledKey() {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[35/45] hadoop git commit: HDFS-13635. Incorrect message when block
is not found. Contributed by Gabor Bota.
Posted by xy...@apache.org.
HDFS-13635. Incorrect message when block is not found. Contributed by Gabor Bota.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fef20a44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fef20a44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fef20a44
Branch: refs/heads/HDDS-4
Commit: fef20a446f7bf9f29e0f0ee690987fb6fc78a031
Parents: f51da9c
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Mon Jul 2 09:41:15 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Mon Jul 2 09:41:15 2018 -0700
----------------------------------------------------------------------
.../hadoop/hdfs/server/datanode/ReplicaNotFoundException.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fef20a44/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
index 90f257f..946950c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
@@ -37,7 +37,7 @@ public class ReplicaNotFoundException extends IOException {
"Cannot recover append/close to a replica that's not FINALIZED and not RBW"
+ " ";
public final static String NON_EXISTENT_REPLICA =
- "Cannot append to a non-existent replica ";
+ "Replica does not exist ";
public final static String UNEXPECTED_GS_REPLICA =
"Cannot append to a replica with unexpected generation stamp ";
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[06/45] hadoop git commit: YARN-8401. [UI2] new ui is not accessible
with out internet connection. Contributed by Bibin A Chundatt.
Posted by xy...@apache.org.
YARN-8401. [UI2] new ui is not accessible with out internet connection. Contributed by Bibin A Chundatt.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbaff369
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbaff369
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbaff369
Branch: refs/heads/HDDS-4
Commit: fbaff369e9b9022723a7b2c6f25e71122a8f8a15
Parents: bedc4fe
Author: Sunil G <su...@apache.org>
Authored: Wed Jun 27 10:35:15 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Wed Jun 27 10:35:15 2018 -0700
----------------------------------------------------------------------
.../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaff369/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml
index ddb8532..ac74d5c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/web.xml
@@ -16,10 +16,6 @@
* limitations under the License.
-->
-<!DOCTYPE web-app PUBLIC
- "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
- "http://java.sun.com/dtd/web-app_2_3.dtd" >
-
-<web-app>
+<web-app version="2.4" xmlns="http://java.sun.com/xml/ns/j2ee">
<display-name>YARN UI</display-name>
</web-app>
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[18/45] hadoop git commit: HADOOP-14313. Replace/improve Hadoop's
byte[] comparator. Contributed by Vikas Vishwakarma.
Posted by xy...@apache.org.
HADOOP-14313. Replace/improve Hadoop's byte[] comparator. Contributed by Vikas Vishwakarma.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddbff7c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddbff7c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddbff7c8
Branch: refs/heads/HDDS-4
Commit: ddbff7c8d3f1851e5c5fa9bc33637e859d7d8ccf
Parents: 2b2399d
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Jun 28 14:58:40 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Jun 28 14:58:40 2018 +0900
----------------------------------------------------------------------
NOTICE.txt | 8 ++++
.../apache/hadoop/io/FastByteComparisons.java | 44 ++++++++------------
2 files changed, 25 insertions(+), 27 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddbff7c8/NOTICE.txt
----------------------------------------------------------------------
diff --git a/NOTICE.txt b/NOTICE.txt
index 95a670d..a53f13c 100644
--- a/NOTICE.txt
+++ b/NOTICE.txt
@@ -196,6 +196,14 @@ by Google Inc, which can be obtained at:
* HOMEPAGE:
* http://code.google.com/p/snappy/
+This product contains a modified portion of UnsignedBytes LexicographicalComparator
+from Guava v21 project by Google Inc, which can be obtained at:
+
+ * LICENSE:
+ * license/COPYING (Apache License 2.0)
+ * HOMEPAGE:
+ * https://github.com/google/guava
+
This product optionally depends on 'JBoss Marshalling', an alternative Java
serialization API, which can be obtained at:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddbff7c8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
index a2903f8..5af6602 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
@@ -26,7 +26,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import sun.misc.Unsafe;
-import com.google.common.primitives.Longs;
import com.google.common.primitives.UnsignedBytes;
/**
@@ -195,52 +194,43 @@ abstract class FastByteComparisons {
length1 == length2) {
return 0;
}
+ final int stride = 8;
int minLength = Math.min(length1, length2);
- int minWords = minLength / Longs.BYTES;
+ int strideLimit = minLength & ~(stride - 1);
int offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET;
int offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET;
+ int i;
/*
* Compare 8 bytes at a time. Benchmarking shows comparing 8 bytes at a
* time is no slower than comparing 4 bytes at a time even on 32-bit.
* On the other hand, it is substantially faster on 64-bit.
*/
- for (int i = 0; i < minWords * Longs.BYTES; i += Longs.BYTES) {
+ for (i = 0; i < strideLimit; i += stride) {
long lw = theUnsafe.getLong(buffer1, offset1Adj + (long) i);
long rw = theUnsafe.getLong(buffer2, offset2Adj + (long) i);
- long diff = lw ^ rw;
- if (diff != 0) {
+ if (lw != rw) {
if (!littleEndian) {
return lessThanUnsigned(lw, rw) ? -1 : 1;
}
- // Use binary search
- int n = 0;
- int y;
- int x = (int) diff;
- if (x == 0) {
- x = (int) (diff >>> 32);
- n = 32;
- }
-
- y = x << 16;
- if (y == 0) {
- n += 16;
- } else {
- x = y;
- }
-
- y = x << 8;
- if (y == 0) {
- n += 8;
- }
- return (int) (((lw >>> n) & 0xFFL) - ((rw >>> n) & 0xFFL));
+ /*
+ * We want to compare only the first index where left[index] !=
+ * right[index]. This corresponds to the least significant nonzero
+ * byte in lw ^ rw, since lw and rw are little-endian.
+ * Long.numberOfTrailingZeros(diff) tells us the least significant
+ * nonzero bit, and zeroing out the first three bits of L.nTZ gives
+ * us the shift to get that least significant nonzero byte. This
+ * comparison logic is based on UnsignedBytes from Guava v21
+ */
+ int n = Long.numberOfTrailingZeros(lw ^ rw) & ~0x7;
+ return ((int) ((lw >>> n) & 0xFF)) - ((int) ((rw >>> n) & 0xFF));
}
}
// The epilogue to cover the last (minLength % 8) elements.
- for (int i = minWords * Longs.BYTES; i < minLength; i++) {
+ for (; i < minLength; i++) {
int result = UnsignedBytes.compare(
buffer1[offset1 + i],
buffer2[offset2 + i]);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[03/45] hadoop git commit: YARN-8461. Support strict memory control
on individual container with elastic control memory mechanism. Contributed by
Haibo Chen.
Posted by xy...@apache.org.
YARN-8461. Support strict memory control on individual container with elastic control memory mechanism. Contributed by Haibo Chen.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62d83ca5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62d83ca5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62d83ca5
Branch: refs/heads/HDDS-4
Commit: 62d83ca5360cf803ecf6780caf192462d0092009
Parents: b69ba0f
Author: Miklos Szegedi <mi...@cloudera.com>
Authored: Tue Jun 26 15:21:35 2018 -0700
Committer: Miklos Szegedi <mi...@cloudera.com>
Committed: Tue Jun 26 15:21:35 2018 -0700
----------------------------------------------------------------------
.../CGroupsMemoryResourceHandlerImpl.java | 24 +++++
.../linux/resources/MemoryResourceHandler.java | 10 ++
.../monitor/ContainersMonitorImpl.java | 108 +++++++++++--------
.../TestCGroupsMemoryResourceHandlerImpl.java | 43 ++++++++
4 files changed, 142 insertions(+), 43 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d83ca5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
index a57adb1..053b796 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
@@ -34,6 +34,9 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileg
import java.io.File;
import java.util.ArrayList;
import java.util.List;
+import java.util.Optional;
+
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL;
/**
* Handler class to handle the memory controller. YARN already ships a
@@ -172,4 +175,25 @@ public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler {
return null;
}
+ @Override
+ public Optional<Boolean> isUnderOOM(ContainerId containerId) {
+ try {
+ String status = cGroupsHandler.getCGroupParam(
+ CGroupsHandler.CGroupController.MEMORY,
+ containerId.toString(),
+ CGROUP_PARAM_MEMORY_OOM_CONTROL);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("cgroups OOM status for " + containerId + ": " + status);
+ }
+ if (status.contains(CGroupsHandler.UNDER_OOM)) {
+ LOG.warn("Container " + containerId + " under OOM based on cgroups.");
+ return Optional.of(true);
+ } else {
+ return Optional.of(false);
+ }
+ } catch (ResourceHandlerException e) {
+ LOG.warn("Could not read cgroups" + containerId, e);
+ }
+ return Optional.empty();
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d83ca5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
index 013a49f..1729fc1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
@@ -20,8 +20,18 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
+import java.util.Optional;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public interface MemoryResourceHandler extends ResourceHandler {
+ /**
+ * check whether a container is under OOM.
+ * @param containerId the id of the container
+ * @return empty if the status is unknown, true is the container is under oom,
+ * false otherwise
+ */
+ Optional<Boolean> isUnderOOM(ContainerId containerId);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d83ca5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index bd68dfe..d83fe39 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -22,6 +22,7 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupElasticMemoryController;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.MemoryResourceHandler;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.slf4j.Logger;
@@ -51,6 +52,7 @@ import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
import java.util.Arrays;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
/**
@@ -697,55 +699,75 @@ public class ContainersMonitorImpl extends AbstractService implements
ProcessTreeInfo ptInfo,
long currentVmemUsage,
long currentPmemUsage) {
- if (elasticMemoryEnforcement || strictMemoryEnforcement) {
- // We enforce the overall memory usage instead of individual containers
- return;
- }
- boolean isMemoryOverLimit = false;
- long vmemLimit = ptInfo.getVmemLimit();
- long pmemLimit = ptInfo.getPmemLimit();
- // as processes begin with an age 1, we want to see if there
- // are processes more than 1 iteration old.
- long curMemUsageOfAgedProcesses = pTree.getVirtualMemorySize(1);
- long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1);
+ Optional<Boolean> isMemoryOverLimit = Optional.empty();
String msg = "";
int containerExitStatus = ContainerExitStatus.INVALID;
- if (isVmemCheckEnabled()
- && isProcessTreeOverLimit(containerId.toString(),
- currentVmemUsage, curMemUsageOfAgedProcesses, vmemLimit)) {
- // The current usage (age=0) is always higher than the aged usage. We
- // do not show the aged size in the message, base the delta on the
- // current usage
- long delta = currentVmemUsage - vmemLimit;
- // Container (the root process) is still alive and overflowing
- // memory.
- // Dump the process-tree and then clean it up.
- msg = formatErrorMessage("virtual",
- formatUsageString(currentVmemUsage, vmemLimit,
+
+ if (strictMemoryEnforcement && elasticMemoryEnforcement) {
+ // Both elastic memory control and strict memory control are enabled
+ // through cgroups. A container will be frozen by the elastic memory
+ // control mechanism if it exceeds its request, so we check for this
+ // here and kill it. Otherwise, the container will not be killed if
+ // the node never exceeds its limit and the procfs-based
+ // memory accounting is different from the cgroup-based accounting.
+
+ MemoryResourceHandler handler =
+ ResourceHandlerModule.getMemoryResourceHandler();
+ if (handler != null) {
+ isMemoryOverLimit = handler.isUnderOOM(containerId);
+ containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
+ msg = containerId + " is under oom because it exceeded its" +
+ " physical memory limit";
+ }
+ } else if (strictMemoryEnforcement || elasticMemoryEnforcement) {
+ // if cgroup-based memory control is enabled
+ isMemoryOverLimit = Optional.of(false);
+ }
+
+ if (!isMemoryOverLimit.isPresent()) {
+ long vmemLimit = ptInfo.getVmemLimit();
+ long pmemLimit = ptInfo.getPmemLimit();
+ // as processes begin with an age 1, we want to see if there
+ // are processes more than 1 iteration old.
+ long curMemUsageOfAgedProcesses = pTree.getVirtualMemorySize(1);
+ long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1);
+ if (isVmemCheckEnabled()
+ && isProcessTreeOverLimit(containerId.toString(),
+ currentVmemUsage, curMemUsageOfAgedProcesses, vmemLimit)) {
+ // The current usage (age=0) is always higher than the aged usage. We
+ // do not show the aged size in the message, base the delta on the
+ // current usage
+ long delta = currentVmemUsage - vmemLimit;
+ // Container (the root process) is still alive and overflowing
+ // memory.
+ // Dump the process-tree and then clean it up.
+ msg = formatErrorMessage("virtual",
+ formatUsageString(currentVmemUsage, vmemLimit,
currentPmemUsage, pmemLimit),
- pId, containerId, pTree, delta);
- isMemoryOverLimit = true;
- containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_VMEM;
- } else if (isPmemCheckEnabled()
- && isProcessTreeOverLimit(containerId.toString(),
- currentPmemUsage, curRssMemUsageOfAgedProcesses,
- pmemLimit)) {
- // The current usage (age=0) is always higher than the aged usage. We
- // do not show the aged size in the message, base the delta on the
- // current usage
- long delta = currentPmemUsage - pmemLimit;
- // Container (the root process) is still alive and overflowing
- // memory.
- // Dump the process-tree and then clean it up.
- msg = formatErrorMessage("physical",
- formatUsageString(currentVmemUsage, vmemLimit,
+ pId, containerId, pTree, delta);
+ isMemoryOverLimit = Optional.of(true);
+ containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_VMEM;
+ } else if (isPmemCheckEnabled()
+ && isProcessTreeOverLimit(containerId.toString(),
+ currentPmemUsage, curRssMemUsageOfAgedProcesses,
+ pmemLimit)) {
+ // The current usage (age=0) is always higher than the aged usage. We
+ // do not show the aged size in the message, base the delta on the
+ // current usage
+ long delta = currentPmemUsage - pmemLimit;
+ // Container (the root process) is still alive and overflowing
+ // memory.
+ // Dump the process-tree and then clean it up.
+ msg = formatErrorMessage("physical",
+ formatUsageString(currentVmemUsage, vmemLimit,
currentPmemUsage, pmemLimit),
- pId, containerId, pTree, delta);
- isMemoryOverLimit = true;
- containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
+ pId, containerId, pTree, delta);
+ isMemoryOverLimit = Optional.of(true);
+ containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
+ }
}
- if (isMemoryOverLimit) {
+ if (isMemoryOverLimit.isPresent() && isMemoryOverLimit.get()) {
// Virtual or physical memory over limit. Fail the container and
// remove
// the corresponding process tree
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d83ca5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
index 5c7e233..4d3e7e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
@@ -31,7 +31,9 @@ import org.junit.Test;
import org.junit.Assert;
import java.util.List;
+import java.util.Optional;
+import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL;
import static org.mockito.Mockito.*;
/**
@@ -242,4 +244,45 @@ public class TestCGroupsMemoryResourceHandlerImpl {
.updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id,
CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, "1024M");
}
+
+ @Test
+ public void testContainerUnderOom() throws Exception {
+ Configuration conf = new YarnConfiguration();
+ conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
+ conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
+
+ cGroupsMemoryResourceHandler.bootstrap(conf);
+
+ ContainerId containerId = mock(ContainerId.class);
+ when(containerId.toString()).thenReturn("container_01_01");
+
+ when(mockCGroupsHandler.getCGroupParam(
+ CGroupsHandler.CGroupController.MEMORY,
+ containerId.toString(),
+ CGROUP_PARAM_MEMORY_OOM_CONTROL)).thenReturn(CGroupsHandler.UNDER_OOM);
+ Optional<Boolean> outOfOom =
+ cGroupsMemoryResourceHandler.isUnderOOM(containerId);
+ Assert.assertTrue("The container should be reported to run under oom",
+ outOfOom.isPresent() && outOfOom.get().equals(true));
+
+ when(mockCGroupsHandler.getCGroupParam(
+ CGroupsHandler.CGroupController.MEMORY,
+ containerId.toString(),
+ CGROUP_PARAM_MEMORY_OOM_CONTROL)).thenReturn("");
+ outOfOom = cGroupsMemoryResourceHandler.isUnderOOM(containerId);
+ Assert.assertTrue(
+ "The container should not be reported to run under oom",
+ outOfOom.isPresent() && outOfOom.get().equals(false));
+
+ when(mockCGroupsHandler.getCGroupParam(
+ CGroupsHandler.CGroupController.MEMORY,
+ containerId.toString(),
+ CGROUP_PARAM_MEMORY_OOM_CONTROL)).
+ thenThrow(new ResourceHandlerException());
+ outOfOom = cGroupsMemoryResourceHandler.isUnderOOM(containerId);
+ Assert.assertFalse(
+ "No report of the oom status should be available.",
+ outOfOom.isPresent());
+
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[02/45] hadoop git commit: YARN-8108. Added option to disable loading
existing filters to prevent security filter from initialize twice.
Contributed by Sunil Govindan
Posted by xy...@apache.org.
YARN-8108. Added option to disable loading existing filters to prevent
security filter from initialize twice.
Contributed by Sunil Govindan
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b69ba0f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b69ba0f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b69ba0f3
Branch: refs/heads/HDDS-4
Commit: b69ba0f3307a90500aeb0c5db9e582fcda60b501
Parents: 3e58633
Author: Eric Yang <ey...@apache.org>
Authored: Tue Jun 26 17:34:57 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Tue Jun 26 17:34:57 2018 -0400
----------------------------------------------------------------------
.../java/org/apache/hadoop/yarn/webapp/WebApps.java | 14 +++++++++++---
.../yarn/server/resourcemanager/ResourceManager.java | 2 +-
2 files changed, 12 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b69ba0f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
index 0d045f3..0e9f0a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -83,6 +83,7 @@ public class WebApps {
public String name;
public String spec;
public Map<String, String> params;
+ public boolean loadExistingFilters = true;
}
final String name;
@@ -151,12 +152,13 @@ public class WebApps {
public Builder<T> withServlet(String name, String pathSpec,
Class<? extends HttpServlet> servlet,
- Map<String, String> params) {
+ Map<String, String> params,boolean loadExistingFilters) {
ServletStruct struct = new ServletStruct();
struct.clazz = servlet;
struct.name = name;
struct.spec = pathSpec;
struct.params = params;
+ struct.loadExistingFilters = loadExistingFilters;
servlets.add(struct);
return this;
}
@@ -256,9 +258,15 @@ public class WebApps {
pathList.add("/" + wsName + "/*");
}
}
+
for (ServletStruct s : servlets) {
if (!pathList.contains(s.spec)) {
- pathList.add(s.spec);
+ // The servlet told us to not load-existing filters, but we still want
+ // to add the default authentication filter always, so add it to the
+ // pathList
+ if (!s.loadExistingFilters) {
+ pathList.add(s.spec);
+ }
}
}
if (conf == null) {
@@ -333,7 +341,7 @@ public class WebApps {
HttpServer2 server = builder.build();
for(ServletStruct struct: servlets) {
- if (struct.params != null) {
+ if (!struct.loadExistingFilters) {
server.addInternalServlet(struct.name, struct.spec,
struct.clazz, struct.params);
} else {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b69ba0f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index c533111..0b7e87c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -1111,7 +1111,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
"ws")
.with(conf)
.withServlet("API-Service", "/app/*",
- ServletContainer.class, params)
+ ServletContainer.class, params, false)
.withHttpSpnegoPrincipalKey(
YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY)
.withHttpSpnegoKeytabKey(
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[32/45] hadoop git commit: HDFS-13703. Avoid allocation of
CorruptedBlocks hashmap when no corrupted blocks are hit. Contributed by Todd
Lipcon.
Posted by xy...@apache.org.
HDFS-13703. Avoid allocation of CorruptedBlocks hashmap when no corrupted blocks are hit. Contributed by Todd Lipcon.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ba99741
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ba99741
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ba99741
Branch: refs/heads/HDDS-4
Commit: 6ba99741086170b83c38d3e7e715d9e8046a1e00
Parents: d401218
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jul 2 12:02:19 2018 +0200
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jul 2 12:02:19 2018 +0200
----------------------------------------------------------------------
.../main/java/org/apache/hadoop/hdfs/DFSInputStream.java | 2 +-
.../main/java/org/apache/hadoop/hdfs/DFSUtilClient.java | 11 ++++++-----
.../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 2 +-
3 files changed, 8 insertions(+), 7 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba99741/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 573b860..1bdc50a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1423,7 +1423,7 @@ public class DFSInputStream extends FSInputStream
Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap =
corruptedBlocks.getCorruptionMap();
- if (corruptedBlockMap.isEmpty()) {
+ if (corruptedBlockMap == null) {
return;
}
List<LocatedBlock> reportList = new ArrayList<>(corruptedBlockMap.size());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba99741/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 6c0b106..313b973 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -751,14 +751,14 @@ public class DFSUtilClient {
public static class CorruptedBlocks {
private Map<ExtendedBlock, Set<DatanodeInfo>> corruptionMap;
- public CorruptedBlocks() {
- this.corruptionMap = new HashMap<>();
- }
-
/**
* Indicate a block replica on the specified datanode is corrupted
*/
public void addCorruptedBlock(ExtendedBlock blk, DatanodeInfo node) {
+ if (corruptionMap == null) {
+ corruptionMap = new HashMap<>();
+ }
+
Set<DatanodeInfo> dnSet = corruptionMap.get(blk);
if (dnSet == null) {
dnSet = new HashSet<>();
@@ -770,7 +770,8 @@ public class DFSUtilClient {
}
/**
- * @return the map that contains all the corruption entries.
+ * @return the map that contains all the corruption entries, or null if
+ * there were no corrupted entries
*/
public Map<ExtendedBlock, Set<DatanodeInfo>> getCorruptionMap() {
return corruptionMap;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ba99741/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 96b0f36..4baafb9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1278,7 +1278,7 @@ public class DataNode extends ReconfigurableBase
DFSUtilClient.CorruptedBlocks corruptedBlocks) throws IOException {
Map<ExtendedBlock, Set<DatanodeInfo>> corruptionMap =
corruptedBlocks.getCorruptionMap();
- if (!corruptionMap.isEmpty()) {
+ if (corruptionMap != null) {
for (Map.Entry<ExtendedBlock, Set<DatanodeInfo>> entry :
corruptionMap.entrySet()) {
for (DatanodeInfo dnInfo : entry.getValue()) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[36/45] hadoop git commit: YARN-8465. Fixed docker container status
for node manager restart. Contributed by Shane Kumpf
Posted by xy...@apache.org.
YARN-8465. Fixed docker container status for node manager restart.
Contributed by Shane Kumpf
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cc2541a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cc2541a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cc2541a
Branch: refs/heads/HDDS-4
Commit: 5cc2541a163591181b80bf2ec42c1e7e7f8929f5
Parents: fef20a4
Author: Eric Yang <ey...@apache.org>
Authored: Mon Jul 2 13:37:51 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Mon Jul 2 13:37:51 2018 -0400
----------------------------------------------------------------------
.../linux/runtime/DockerLinuxContainerRuntime.java | 4 ++--
.../runtime/ContainerExecutionException.java | 6 ++++++
.../linux/runtime/TestDockerContainerRuntime.java | 10 ++++++++--
3 files changed, 16 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc2541a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index f13ba59..c89d5fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -1027,7 +1027,6 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
handleContainerKill(ctx, env, signal);
}
} catch (ContainerExecutionException e) {
- LOG.warn("Signal docker container failed. Exception: ", e);
throw new ContainerExecutionException("Signal docker container failed",
e.getExitCode(), e.getOutput(), e.getErrorOutput());
}
@@ -1201,7 +1200,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
if (!new File(procFs + File.separator + pid).exists()) {
String msg = "Liveliness check failed for PID: " + pid
+ ". Container may have already completed.";
- throw new ContainerExecutionException(msg);
+ throw new ContainerExecutionException(msg,
+ PrivilegedOperation.ResultCode.INVALID_CONTAINER_PID.getValue());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc2541a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
index 3147277..735db1f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerExecutionException.java
@@ -53,6 +53,12 @@ public class ContainerExecutionException extends YarnException {
errorOutput = OUTPUT_UNSET;
}
+ public ContainerExecutionException(String message, int exitCode) {
+ super(message);
+ this.exitCode = exitCode;
+ this.output = OUTPUT_UNSET;
+ this.errorOutput = OUTPUT_UNSET;
+ }
public ContainerExecutionException(String message, int exitCode, String
output, String errorOutput) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc2541a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index d85c403..855ec44 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -1492,7 +1492,7 @@ public class TestDockerContainerRuntime {
runtime.signalContainer(builder.build());
}
- @Test(expected = ContainerExecutionException.class)
+ @Test
public void testContainerLivelinessNoFileException() throws Exception {
DockerLinuxContainerRuntime runtime = new DockerLinuxContainerRuntime(
mockExecutor, mockCGroupsHandler);
@@ -1501,7 +1501,13 @@ public class TestDockerContainerRuntime {
.setExecutionAttribute(PID, signalPid)
.setExecutionAttribute(SIGNAL, ContainerExecutor.Signal.NULL);
runtime.initialize(enableMockContainerExecutor(conf), null);
- runtime.signalContainer(builder.build());
+ try {
+ runtime.signalContainer(builder.build());
+ } catch (ContainerExecutionException e) {
+ Assert.assertEquals(
+ PrivilegedOperation.ResultCode.INVALID_CONTAINER_PID.getValue(),
+ e.getExitCode());
+ }
}
@Test
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[42/45] hadoop git commit: HDDS-100. SCM CA: generate public/private
key pair for SCM/OM/DNs. Contributed by Ajay Kumar.
Posted by xy...@apache.org.
HDDS-100. SCM CA: generate public/private key pair for SCM/OM/DNs. Contributed by Ajay Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ca01442
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ca01442
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ca01442
Branch: refs/heads/HDDS-4
Commit: 7ca0144241eea00b8f81836fb1e5d3cbb5a1231f
Parents: 5c5570b
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jun 8 08:33:58 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Jul 2 13:19:02 2018 -0700
----------------------------------------------------------------------
hadoop-hdds/common/pom.xml | 7 +-
.../org/apache/hadoop/hdds/HddsConfigKeys.java | 23 ++
.../hdds/security/x509/HDDSKeyGenerator.java | 99 ++++++++
.../hdds/security/x509/HDDSKeyPEMWriter.java | 254 +++++++++++++++++++
.../hdds/security/x509/SecurityConfig.java | 190 ++++++++++++++
.../hadoop/hdds/security/x509/package-info.java | 25 ++
.../common/src/main/resources/ozone-default.xml | 42 +++
.../security/x509/TestHDDSKeyGenerator.java | 81 ++++++
.../security/x509/TestHDDSKeyPEMWriter.java | 213 ++++++++++++++++
.../ozone/TestOzoneConfigurationFields.java | 6 +
10 files changed, 938 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca01442/hadoop-hdds/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index a8a634c..819e0ee 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -73,14 +73,17 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>rocksdbjni</artifactId>
<version>5.8.0</version>
</dependency>
-
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
-
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk15on</artifactId>
+ <version>1.49</version>
+ </dependency>
</dependencies>
<build>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca01442/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index dec2c1c..db78348 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -17,7 +17,30 @@
*/
package org.apache.hadoop.hdds;
+/**
+ * HDDS Config.
+ */
public final class HddsConfigKeys {
private HddsConfigKeys() {
}
+
+ public static final String HDDS_KEY_LEN = "hdds.key.len";
+ public static final int HDDS_DEFAULT_KEY_LEN = 2048;
+ public static final String HDDS_KEY_ALGORITHM = "hdds.key.algo";
+ public static final String HDDS_DEFAULT_KEY_ALGORITHM = "RSA";
+ public static final String HDDS_SECURITY_PROVIDER = "hdds.security.provider";
+ public static final String HDDS_DEFAULT_SECURITY_PROVIDER = "BC";
+ public static final String HDDS_KEY_DIR_NAME = "hdds.key.dir.name";
+ public static final String HDDS_KEY_DIR_NAME_DEFAULT = "keys";
+
+ // TODO : Talk to StorageIO classes and see if they can return a secure
+ // storage location for each node.
+ public static final String HDDS_METADATA_DIR_NAME = "hdds.metadata.dir";
+ public static final String HDDS_PRIVATE_KEY_FILE_NAME =
+ "hdds.priv.key.file.name";
+ public static final String HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT = "private.pem";
+ public static final String HDDS_PUBLIC_KEY_FILE_NAME = "hdds.public.key.file"
+ + ".name";
+ public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem";
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca01442/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
new file mode 100644
index 0000000..cb411b2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.hdds.security.x509;
+
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+
+/** A class to generate Key Pair for use with Certificates. */
+public class HDDSKeyGenerator {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HDDSKeyGenerator.class);
+ private final SecurityConfig securityConfig;
+
+ /**
+ * Constructor for HDDSKeyGenerator.
+ *
+ * @param configuration - config
+ */
+ public HDDSKeyGenerator(Configuration configuration) {
+ this.securityConfig = new SecurityConfig(configuration);
+ }
+
+ /**
+ * Returns the Security config used for this object.
+ * @return SecurityConfig
+ */
+ public SecurityConfig getSecurityConfig() {
+ return securityConfig;
+ }
+
+ /**
+ * Use Config to generate key.
+ *
+ * @return KeyPair
+ * @throws NoSuchProviderException
+ * @throws NoSuchAlgorithmException
+ */
+ public KeyPair generateKey() throws NoSuchProviderException,
+ NoSuchAlgorithmException {
+ return generateKey(securityConfig.getSize(),
+ securityConfig.getAlgo(), securityConfig.getProvider());
+ }
+
+ /**
+ * Specify the size -- all other parameters are used from config.
+ *
+ * @param size - int, valid key sizes.
+ * @return KeyPair
+ * @throws NoSuchProviderException
+ * @throws NoSuchAlgorithmException
+ */
+ public KeyPair generateKey(int size) throws
+ NoSuchProviderException, NoSuchAlgorithmException {
+ return generateKey(size,
+ securityConfig.getAlgo(), securityConfig.getProvider());
+ }
+
+ /**
+ * Custom Key Generation, all values are user provided.
+ *
+ * @param size - Key Size
+ * @param algorithm - Algorithm to use
+ * @param provider - Security provider.
+ * @return KeyPair.
+ * @throws NoSuchProviderException
+ * @throws NoSuchAlgorithmException
+ */
+ public KeyPair generateKey(int size, String algorithm, String provider)
+ throws NoSuchProviderException, NoSuchAlgorithmException {
+ LOG.info("Generating key pair using size:{}, Algorithm:{}, Provider:{}",
+ size, algorithm, provider);
+ KeyPairGenerator generator = KeyPairGenerator
+ .getInstance(algorithm, provider);
+ generator.initialize(size);
+ return generator.generateKeyPair();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca01442/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyPEMWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyPEMWriter.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyPEMWriter.java
new file mode 100644
index 0000000..6ca7584
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyPEMWriter.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.hdds.security.x509;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import org.apache.commons.io.output.FileWriterWithEncoding;
+import org.apache.hadoop.conf.Configuration;
+import org.bouncycastle.util.io.pem.PemObject;
+import org.bouncycastle.util.io.pem.PemWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.attribute.PosixFilePermission;
+import java.security.KeyPair;
+import java.util.Set;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE;
+import static java.nio.file.attribute.PosixFilePermission.OWNER_READ;
+import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE;
+
+/**
+ * We store all Key material in good old PEM files.
+ * This helps in avoiding dealing will persistent
+ * Java KeyStore issues. Also when debugging,
+ * general tools like OpenSSL can be used to read and
+ * decode these files.
+ */
+public class HDDSKeyPEMWriter {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HDDSKeyPEMWriter.class);
+ private final Path location;
+ private final SecurityConfig securityConfig;
+ private Set<PosixFilePermission> permissionSet =
+ Stream.of(OWNER_READ, OWNER_WRITE, OWNER_EXECUTE)
+ .collect(Collectors.toSet());
+ private Supplier<Boolean> isPosixFileSystem;
+ public final static String PRIVATE_KEY = "PRIVATE KEY";
+ public final static String PUBLIC_KEY = "PUBLIC KEY";
+ public static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
+ /*
+ Creates an HDDS Key Writer.
+
+ @param configuration - Configuration
+ */
+ public HDDSKeyPEMWriter(Configuration configuration) throws IOException {
+ Preconditions.checkNotNull(configuration, "Config cannot be null");
+ this.securityConfig = new SecurityConfig(configuration);
+ isPosixFileSystem = HDDSKeyPEMWriter::isPosix;
+ this.location = securityConfig.getKeyLocation();
+ }
+
+ /**
+ * Checks if File System supports posix style security permissions.
+ *
+ * @return True if it supports posix.
+ */
+ private static Boolean isPosix() {
+ return FileSystems.getDefault().supportedFileAttributeViews()
+ .contains("posix");
+ }
+
+ /**
+ * Returns the Permission set.
+ * @return Set
+ */
+ @VisibleForTesting
+ public Set<PosixFilePermission> getPermissionSet() {
+ return permissionSet;
+ }
+
+ /**
+ * Returns the Security config used for this object.
+ * @return SecurityConfig
+ */
+ public SecurityConfig getSecurityConfig() {
+ return securityConfig;
+ }
+
+ /**
+ * This function is used only for testing.
+ *
+ * @param isPosixFileSystem - Sets a boolean function for mimicking
+ * files systems that are not posix.
+ */
+ @VisibleForTesting
+ public void setIsPosixFileSystem(Supplier<Boolean> isPosixFileSystem) {
+ this.isPosixFileSystem = isPosixFileSystem;
+ }
+
+ /**
+ * Writes a given key using the default config options.
+ *
+ * @param keyPair - Key Pair to write to file.
+ * @throws IOException
+ */
+ public void writeKey(KeyPair keyPair) throws IOException {
+ writeKey(location, keyPair, securityConfig.getPrivateKeyName(),
+ securityConfig.getPublicKeyName(), false);
+ }
+
+ /**
+ * Writes a given key using default config options.
+ *
+ * @param keyPair - Key pair to write
+ * @param overwrite - Overwrites the keys if they already exist.
+ * @throws IOException
+ */
+ public void writeKey(KeyPair keyPair, boolean overwrite) throws IOException {
+ writeKey(location, keyPair, securityConfig.getPrivateKeyName(),
+ securityConfig.getPublicKeyName(), overwrite);
+ }
+
+ /**
+ * Writes a given key using default config options.
+ *
+ * @param basePath - The location to write to, override the config values.
+ * @param keyPair - Key pair to write
+ * @param overwrite - Overwrites the keys if they already exist.
+ * @throws IOException
+ */
+ public void writeKey(Path basePath, KeyPair keyPair, boolean overwrite)
+ throws IOException {
+ writeKey(basePath, keyPair, securityConfig.getPrivateKeyName(),
+ securityConfig.getPublicKeyName(), overwrite);
+ }
+
+ /**
+ * Helper function that actually writes data to the files.
+ *
+ * @param basePath - base path to write key
+ * @param keyPair - Key pair to write to file.
+ * @param privateKeyFileName - private key file name.
+ * @param publicKeyFileName - public key file name.
+ * @param force - forces overwriting the keys.
+ * @throws IOException
+ */
+ private synchronized void writeKey(Path basePath, KeyPair keyPair,
+ String privateKeyFileName, String publicKeyFileName, boolean force)
+ throws IOException {
+ checkPreconditions(basePath);
+
+ File privateKeyFile =
+ Paths.get(location.toString(), privateKeyFileName).toFile();
+ File publicKeyFile =
+ Paths.get(location.toString(), publicKeyFileName).toFile();
+ checkKeyFile(privateKeyFile, force, publicKeyFile);
+
+ try (PemWriter privateKeyWriter = new PemWriter(new
+ FileWriterWithEncoding(privateKeyFile, DEFAULT_CHARSET))) {
+ privateKeyWriter.writeObject(
+ new PemObject(PRIVATE_KEY, keyPair.getPrivate().getEncoded()));
+ }
+
+ try (PemWriter publicKeyWriter = new PemWriter(new
+ FileWriterWithEncoding(publicKeyFile, DEFAULT_CHARSET))) {
+ publicKeyWriter.writeObject(
+ new PemObject(PUBLIC_KEY, keyPair.getPublic().getEncoded()));
+ }
+ Files.setPosixFilePermissions(privateKeyFile.toPath(), permissionSet);
+ Files.setPosixFilePermissions(publicKeyFile.toPath(), permissionSet);
+ }
+
+ /**
+ * Checks if private and public key file already exists. Throws IOException
+ * if file exists and force flag is set to false, else will delete the
+ * existing file.
+ *
+ * @param privateKeyFile - Private key file.
+ * @param force - forces overwriting the keys.
+ * @param publicKeyFile - public key file.
+ * @throws IOException
+ */
+ private void checkKeyFile(File privateKeyFile, boolean force,
+ File publicKeyFile) throws IOException {
+ if (privateKeyFile.exists() && force) {
+ if (!privateKeyFile.delete()) {
+ throw new IOException("Unable to delete private key file.");
+ }
+ }
+
+ if (publicKeyFile.exists() && force) {
+ if (!publicKeyFile.delete()) {
+ throw new IOException("Unable to delete public key file.");
+ }
+ }
+
+ if (privateKeyFile.exists()) {
+ throw new IOException("Private Key file already exists.");
+ }
+
+ if (publicKeyFile.exists()) {
+ throw new IOException("Public Key file already exists.");
+ }
+ }
+
+ /**
+ * Checks if base path exists and sets file permissions.
+ *
+ * @param basePath - base path to write key
+ * @throws IOException
+ */
+ private void checkPreconditions(Path basePath) throws IOException {
+ Preconditions.checkNotNull(basePath, "Base path cannot be null");
+ if (!isPosixFileSystem.get()) {
+ LOG.error("Keys cannot be stored securely without POSIX file system "
+ + "support for now.");
+ throw new IOException("Unsupported File System for pem file.");
+ }
+
+ if (Files.exists(basePath)) {
+ // Not the end of the world if we reset the permissions on an existing
+ // directory.
+ Files.setPosixFilePermissions(basePath, permissionSet);
+ } else {
+ boolean success = basePath.toFile().mkdirs();
+ if (!success) {
+ LOG.error("Unable to create the directory for the "
+ + "location. Location: {}", basePath);
+ throw new IOException("Unable to create the directory for the "
+ + "location. Location:" + basePath);
+ }
+ Files.setPosixFilePermissions(basePath, permissionSet);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca01442/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
new file mode 100644
index 0000000..896a379
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.security.x509;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.bouncycastle.jce.provider.BouncyCastleProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.security.Provider;
+import java.security.Security;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_LEN;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_ALGORITHM;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_SECURITY_PROVIDER;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_ALGORITHM;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME_DEFAULT;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_LEN;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_PROVIDER;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
+
+/**
+ * A class that deals with all Security related configs in HDDDS.
+ * It is easier to have all Java code related to config in a single place.
+ */
+public class SecurityConfig {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SecurityConfig.class);
+ private static volatile Provider provider;
+ private final Configuration configuration;
+ private final int size;
+ private final String algo;
+ private final String providerString;
+ private final String metadatDir;
+ private final String keyDir;
+ private final String privateKeyName;
+ private final String publicKeyName;
+
+ /**
+ * Constructs a HDDSKeyGenerator.
+ *
+ * @param configuration - HDDS Configuration
+ */
+ public SecurityConfig(Configuration configuration) {
+ Preconditions.checkNotNull(configuration, "Configuration cannot be null");
+ this.configuration = configuration;
+ this.size = this.configuration.getInt(HDDS_KEY_LEN, HDDS_DEFAULT_KEY_LEN);
+ this.algo = this.configuration.get(HDDS_KEY_ALGORITHM,
+ HDDS_DEFAULT_KEY_ALGORITHM);
+ this.providerString = this.configuration.get(HDDS_SECURITY_PROVIDER,
+ HDDS_DEFAULT_SECURITY_PROVIDER);
+
+ // Please Note: To make it easy for our customers we will attempt to read
+ // HDDS metadata dir and if that is not set, we will use Ozone directory.
+ // TODO: We might want to fix this later.
+ this.metadatDir = this.configuration.get(HDDS_METADATA_DIR_NAME,
+ configuration.get(OZONE_METADATA_DIRS));
+
+ Preconditions.checkNotNull(this.metadatDir, "Metadata directory can't be"
+ + " null. Please check configs.");
+ this.keyDir = this.configuration.get(HDDS_KEY_DIR_NAME,
+ HDDS_KEY_DIR_NAME_DEFAULT);
+ this.privateKeyName = this.configuration.get(HDDS_PRIVATE_KEY_FILE_NAME,
+ HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT);
+ this.publicKeyName = this.configuration.get(HDDS_PUBLIC_KEY_FILE_NAME,
+ HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT);
+
+ // First Startup -- if the provider is null, check for the provider.
+ if (SecurityConfig.provider == null) {
+ synchronized (SecurityConfig.class) {
+ provider = Security.getProvider(this.providerString);
+ if (SecurityConfig.provider == null) {
+ // Provider not found, let us try to Dynamically initialize the
+ // provider.
+ provider = initSecurityProvider(this.providerString);
+ }
+ }
+ }
+ }
+
+ /**
+ * Returns the Provider name.
+ * @return String Provider name.
+ */
+ public String getProviderString() {
+ return providerString;
+ }
+
+ /**
+ * Returns the public key file name.
+ * @return String, File name used for public keys.
+ */
+ public String getPublicKeyName() {
+ return publicKeyName;
+ }
+
+ /**
+ * Returns the private key file name.
+ * @return String, File name used for private keys.
+ */
+ public String getPrivateKeyName() {
+ return privateKeyName;
+ }
+
+ /**
+ * Returns the File path to where keys are stored.
+ * @return String Key location.
+ */
+ public Path getKeyLocation() {
+ return Paths.get(metadatDir, keyDir);
+ }
+
+ /**
+ * Gets the Key Size.
+ *
+ * @return key size.
+ */
+ public int getSize() {
+ return size;
+ }
+
+ /**
+ * Gets provider.
+ *
+ * @return String Provider name.
+ */
+ public String getProvider() {
+ return providerString;
+ }
+
+ /**
+ * Returns the Key generation Algorithm used.
+ *
+ * @return String Algo.
+ */
+ public String getAlgo() {
+ return algo;
+ }
+
+ /**
+ * Returns the Configuration used for initializing this SecurityConfig.
+ * @return Configuration
+ */
+ public Configuration getConfiguration() {
+ return configuration;
+ }
+
+
+ /**
+ * Adds a security provider dynamically if it is not loaded already.
+ *
+ * @param providerName - name of the provider.
+ */
+ private Provider initSecurityProvider(String providerName) {
+ switch (providerName) {
+ case "BC":
+ Security.addProvider(new BouncyCastleProvider());
+ return Security.getProvider(providerName);
+ default:
+ LOG.error("Security Provider:{} is unknown", provider);
+ throw new SecurityException("Unknown security provider:" + provider);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca01442/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
new file mode 100644
index 0000000..89d5d51
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+
+/**
+ * This package contains common routines used in creating an x509 based
+ * identity framework for HDDS.
+ */
+package org.apache.hadoop.hdds.security.x509;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca01442/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index ac3bf82..5354e9b 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1092,4 +1092,46 @@
</description>
</property>
+
+ <property>
+ <name>hdds.key.len</name>
+ <value>2048</value>
+ <tag>SCM, HDDS, X509, SECURITY</tag>
+ <description>
+ SCM CA key length. This is an algorithm-specific metric, such as modulus length, specified in number of bits.
+ </description>
+ </property>
+ <property>
+ <name>hdds.key.dir.name</name>
+ <value>keys</value>
+ <tag>SCM, HDDS, X509, SECURITY</tag>
+ <description>
+ Directory to store public/private key for SCM CA. This is relative to ozone/hdds meteadata dir.
+ </description>
+ </property>
+ <property>
+ <name>hdds.metadata.dir</name>
+ <value/>
+ <tag>X509, SECURITY</tag>
+ <description>
+ Absolute path to HDDS metadata dir.
+ </description>
+ </property>
+ <property>
+ <name>hdds.priv.key.file.name</name>
+ <value>private.pem</value>
+ <tag>X509, SECURITY</tag>
+ <description>
+ Name of file which stores private key generated for SCM CA.
+ </description>
+ </property>
+ <property>
+ <name>hdds.public.key.file.name</name>
+ <value>public.pem</value>
+ <tag>X509, SECURITY</tag>
+ <description>
+ Name of file which stores public key generated for SCM CA.
+ </description>
+ </property>
+
</configuration>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca01442/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyGenerator.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyGenerator.java
new file mode 100644
index 0000000..2ddf59c
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyGenerator.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.security.x509;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
+import java.security.KeyPair;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.PublicKey;
+import java.security.interfaces.RSAPublicKey;
+import java.security.spec.PKCS8EncodedKeySpec;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test class for HDDS Key Generator.
+ */
+public class TestHDDSKeyGenerator {
+ private static SecurityConfig config;
+
+ @Before
+ public void init() {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(OZONE_METADATA_DIRS, GenericTestUtils.getTempPath("testpath"));
+ config = new SecurityConfig(conf);
+ }
+ /**
+ * In this test we verify that we are able to create a key pair, then get
+ * bytes of that and use ASN1. parser to parse it back to a private key.
+ * @throws NoSuchProviderException
+ * @throws NoSuchAlgorithmException
+ */
+ @Test
+ public void testGenerateKey()
+ throws NoSuchProviderException, NoSuchAlgorithmException {
+ HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration());
+ KeyPair keyPair = keyGen.generateKey();
+ Assert.assertEquals(config.getAlgo(), keyPair.getPrivate().getAlgorithm());
+ PKCS8EncodedKeySpec keySpec =
+ new PKCS8EncodedKeySpec(keyPair.getPrivate().getEncoded());
+ Assert.assertEquals("PKCS#8", keySpec.getFormat());
+ }
+
+ /**
+ * In this test we assert that size that we specified is used for Key
+ * generation.
+ * @throws NoSuchProviderException
+ * @throws NoSuchAlgorithmException
+ */
+ @Test
+ public void testGenerateKeyWithSize() throws NoSuchProviderException,
+ NoSuchAlgorithmException {
+ HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration());
+ KeyPair keyPair = keyGen.generateKey(4096);
+ PublicKey publicKey = keyPair.getPublic();
+ if(publicKey instanceof RSAPublicKey) {
+ Assert.assertEquals(4096,
+ ((RSAPublicKey)(publicKey)).getModulus().bitLength());
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca01442/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyPEMWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyPEMWriter.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyPEMWriter.java
new file mode 100644
index 0000000..68ff9e6
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyPEMWriter.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.security.x509;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.attribute.PosixFilePermission;
+import java.security.KeyFactory;
+import java.security.KeyPair;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.PrivateKey;
+import java.security.PublicKey;
+import java.security.spec.InvalidKeySpecException;
+import java.security.spec.PKCS8EncodedKeySpec;
+import java.security.spec.X509EncodedKeySpec;
+import java.util.Set;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+/**
+ * Test class for HDDS pem writer.
+ */
+public class TestHDDSKeyPEMWriter {
+
+ @Rule
+ public TemporaryFolder temporaryFolder = new TemporaryFolder();
+ private OzoneConfiguration configuration;
+ private HDDSKeyGenerator keyGenerator;
+ private String prefix;
+
+ @Before
+ public void init() throws IOException {
+ configuration = new OzoneConfiguration();
+ prefix = temporaryFolder.newFolder().toString();
+ configuration.set(HDDS_METADATA_DIR_NAME, prefix);
+ keyGenerator = new HDDSKeyGenerator(configuration);
+ }
+
+ /**
+ * Assert basic things like we are able to create a file, and the names are
+ * in expected format etc.
+ *
+ * @throws NoSuchProviderException
+ * @throws NoSuchAlgorithmException
+ * @throws IOException
+ */
+ @Test
+ public void testWriteKey()
+ throws NoSuchProviderException, NoSuchAlgorithmException,
+ IOException, InvalidKeySpecException {
+ KeyPair keys = keyGenerator.generateKey();
+ HDDSKeyPEMWriter pemWriter = new HDDSKeyPEMWriter(configuration);
+ pemWriter.writeKey(keys);
+
+ // Assert that locations have been created.
+ Path keyLocation = pemWriter.getSecurityConfig().getKeyLocation();
+ Assert.assertTrue(keyLocation.toFile().exists());
+
+ // Assert that locations are created in the locations that we specified
+ // using the Config.
+ Assert.assertTrue(keyLocation.toString().startsWith(prefix));
+ Path privateKeyPath = Paths.get(keyLocation.toString(),
+ pemWriter.getSecurityConfig().getPrivateKeyName());
+ Assert.assertTrue(privateKeyPath.toFile().exists());
+ Path publicKeyPath = Paths.get(keyLocation.toString(),
+ pemWriter.getSecurityConfig().getPublicKeyName());
+ Assert.assertTrue(publicKeyPath.toFile().exists());
+
+ // Read the private key and test if the expected String in the PEM file
+ // format exists.
+ byte[] privateKey = Files.readAllBytes(privateKeyPath);
+ String privateKeydata = new String(privateKey, StandardCharsets.UTF_8);
+ Assert.assertTrue(privateKeydata.contains("PRIVATE KEY"));
+
+ // Read the public key and test if the expected String in the PEM file
+ // format exists.
+ byte[] publicKey = Files.readAllBytes(publicKeyPath);
+ String publicKeydata = new String(publicKey, StandardCharsets.UTF_8);
+ Assert.assertTrue(publicKeydata.contains("PUBLIC KEY"));
+
+ // Let us decode the PEM file and parse it back into binary.
+ KeyFactory kf = KeyFactory.getInstance(
+ pemWriter.getSecurityConfig().getAlgo());
+
+ // Replace the PEM Human readable guards.
+ privateKeydata =
+ privateKeydata.replace("-----BEGIN PRIVATE KEY-----\n", "");
+ privateKeydata =
+ privateKeydata.replace("-----END PRIVATE KEY-----", "");
+
+ // Decode the bas64 to binary format and then use an ASN.1 parser to
+ // parse the binary format.
+
+ byte[] keyBytes = Base64.decodeBase64(privateKeydata);
+ PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(keyBytes);
+ PrivateKey privateKeyDecoded = kf.generatePrivate(spec);
+ Assert.assertNotNull("Private Key should not be null",
+ privateKeyDecoded);
+
+ // Let us decode the public key and veriy that we can parse it back into
+ // binary.
+ publicKeydata =
+ publicKeydata.replace("-----BEGIN PUBLIC KEY-----\n", "");
+ publicKeydata =
+ publicKeydata.replace("-----END PUBLIC KEY-----", "");
+
+ keyBytes = Base64.decodeBase64(publicKeydata);
+ X509EncodedKeySpec pubKeyspec = new X509EncodedKeySpec(keyBytes);
+ PublicKey publicKeyDecoded = kf.generatePublic(pubKeyspec);
+ Assert.assertNotNull("Public Key should not be null",
+ publicKeyDecoded);
+
+ // Now let us assert the permissions on the Directories and files are as
+ // expected.
+ Set<PosixFilePermission> expectedSet = pemWriter.getPermissionSet();
+ Set<PosixFilePermission> currentSet =
+ Files.getPosixFilePermissions(privateKeyPath);
+ currentSet.removeAll(expectedSet);
+ Assert.assertEquals(0, currentSet.size());
+
+ currentSet =
+ Files.getPosixFilePermissions(publicKeyPath);
+ currentSet.removeAll(expectedSet);
+ Assert.assertEquals(0, currentSet.size());
+
+ currentSet =
+ Files.getPosixFilePermissions(keyLocation);
+ currentSet.removeAll(expectedSet);
+ Assert.assertEquals(0, currentSet.size());
+ }
+
+ /**
+ * Assert key rewrite fails without force option.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testReWriteKey()
+ throws Exception {
+ KeyPair kp = keyGenerator.generateKey();
+ HDDSKeyPEMWriter pemWriter = new HDDSKeyPEMWriter(configuration);
+ SecurityConfig secConfig = pemWriter.getSecurityConfig();
+ pemWriter.writeKey(kp);
+
+ // Assert that rewriting of keys throws exception with valid messages.
+ LambdaTestUtils
+ .intercept(IOException.class, "Private Key file already exists.",
+ () -> pemWriter.writeKey(kp));
+ FileUtils.deleteQuietly(Paths.get(
+ secConfig.getKeyLocation().toString() + "/" + secConfig
+ .getPrivateKeyName()).toFile());
+ LambdaTestUtils
+ .intercept(IOException.class, "Public Key file already exists.",
+ () -> pemWriter.writeKey(kp));
+ FileUtils.deleteQuietly(Paths.get(
+ secConfig.getKeyLocation().toString() + "/" + secConfig
+ .getPublicKeyName()).toFile());
+
+ // Should succeed now as both public and private key are deleted.
+ pemWriter.writeKey(kp);
+ // Should succeed with overwrite flag as true.
+ pemWriter.writeKey(kp, true);
+
+ }
+
+ /**
+ * Assert key rewrite fails in non Posix file system.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testWriteKeyInNonPosixFS()
+ throws Exception {
+ KeyPair kp = keyGenerator.generateKey();
+ HDDSKeyPEMWriter pemWriter = new HDDSKeyPEMWriter(configuration);
+ pemWriter.setIsPosixFileSystem(() -> false);
+
+ // Assert key rewrite fails in non Posix file system.
+ LambdaTestUtils
+ .intercept(IOException.class, "Unsupported File System for pem file.",
+ () -> pemWriter.writeKey(kp));
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca01442/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 92e2ffc..fbbd71c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -36,5 +36,11 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
errorIfMissingConfigProps = true;
errorIfMissingXmlProps = true;
xmlPropsToSkipCompare.add("hadoop.tags.custom");
+ addPropertiesNotInXml();
+ }
+
+ private void addPropertiesNotInXml() {
+ configurationPropsToSkipCompare.add(HddsConfigKeys.HDDS_KEY_ALGORITHM);
+ configurationPropsToSkipCompare.add(HddsConfigKeys.HDDS_SECURITY_PROVIDER);
}
}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[14/45] hadoop git commit: HDDS-94. Change ozone datanode command to
start the standalone datanode plugin. Contributed by Sandeep Nemuri.
Posted by xy...@apache.org.
HDDS-94. Change ozone datanode command to start the standalone datanode plugin.
Contributed by Sandeep Nemuri.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18932717
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18932717
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18932717
Branch: refs/heads/HDDS-4
Commit: 18932717c42382ed8842de7719ec6d20c1765366
Parents: 1e30547
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Jun 26 18:28:47 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Jun 27 14:15:15 2018 -0700
----------------------------------------------------------------------
hadoop-dist/src/main/compose/ozone/docker-compose.yaml | 12 ------------
hadoop-dist/src/main/compose/ozone/docker-config | 5 -----
.../src/main/compose/ozoneperf/docker-compose.yaml | 13 -------------
hadoop-dist/src/main/compose/ozoneperf/docker-config | 5 -----
.../org/apache/hadoop/ozone/HddsDatanodeService.java | 13 ++++++++++++-
.../src/test/acceptance/basic/docker-compose.yaml | 12 ------------
.../src/test/acceptance/basic/docker-config | 5 -----
.../src/test/acceptance/commonlib.robot | 1 -
.../src/test/acceptance/ozonefs/docker-compose.yaml | 12 ------------
.../src/test/acceptance/ozonefs/docker-config | 5 -----
hadoop-ozone/common/src/main/bin/ozone | 9 +++------
11 files changed, 15 insertions(+), 77 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
index faf420c..512c649 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
@@ -16,18 +16,6 @@
version: "3"
services:
- namenode:
- image: apache/hadoop-runner
- hostname: namenode
- volumes:
- - ../../ozone:/opt/hadoop
- ports:
- - 9870:9870
- environment:
- ENSURE_NAMENODE_DIR: /data/namenode
- env_file:
- - ./docker-config
- command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-dist/src/main/compose/ozone/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config
index c693db0..632f870 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
@@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
-HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
-HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
index fb7873b..3233c11 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
@@ -16,19 +16,6 @@
version: "3"
services:
- namenode:
- image: apache/hadoop-runner
- hostname: namenode
- volumes:
- - ../../ozone:/opt/hadoop
- - ./jmxpromo.jar:/opt/jmxpromo.jar
- ports:
- - 9870:9870
- environment:
- ENSURE_NAMENODE_DIR: /data/namenode
- env_file:
- - ./docker-config
- command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-dist/src/main/compose/ozoneperf/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-config b/hadoop-dist/src/main/compose/ozoneperf/docker-config
index e4f5485..2be22a7 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/docker-config
+++ b/hadoop-dist/src/main/compose/ozoneperf/docker-config
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.scm.names=scm
OZONE-SITE.XML_ozone.enabled=True
@@ -23,12 +22,8 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
-HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
-HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index fa4187a..ddeec87 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -25,9 +25,11 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.statemachine
.DatanodeStateMachine;
+import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
@@ -229,9 +231,18 @@ public class HddsDatanodeService implements ServicePlugin {
public static void main(String[] args) {
try {
+ if (DFSUtil.parseHelpArgument(args, "Starts HDDS Datanode", System.out, false)) {
+ System.exit(0);
+ }
+ Configuration conf = new OzoneConfiguration();
+ GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
+ if (!hParser.isParseSuccessful()) {
+ GenericOptionsParser.printGenericCommandUsage(System.err);
+ System.exit(1);
+ }
StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG);
HddsDatanodeService hddsDatanodeService =
- createHddsDatanodeService(new OzoneConfiguration());
+ createHddsDatanodeService(conf);
hddsDatanodeService.start(null);
hddsDatanodeService.join();
} catch (Throwable e) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
index 44bd4a0..b50f42d 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
@@ -16,18 +16,6 @@
version: "3"
services:
- namenode:
- image: apache/hadoop-runner
- hostname: namenode
- volumes:
- - ${OZONEDIR}:/opt/hadoop
- ports:
- - 9870
- environment:
- ENSURE_NAMENODE_DIR: /data/namenode
- env_file:
- - ./docker-config
- command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
index 180dc8e..c3ec2ef 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874
OZONE-SITE.XML_ozone.scm.names=scm
@@ -24,13 +23,9 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s
-HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
-HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
index 01ed302..a5ea30a 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
@@ -28,7 +28,6 @@ Startup Ozone cluster with size
Daemons are running without error
Is daemon running without error ksm
Is daemon running without error scm
- Is daemon running without error namenode
Is daemon running without error datanode
Check if datanode is connected to the scm
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
index 3323557..12022df 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
@@ -16,18 +16,6 @@
version: "3"
services:
- namenode:
- image: apache/hadoop-runner
- hostname: namenode
- volumes:
- - ${OZONEDIR}:/opt/hadoop
- ports:
- - 9870
- environment:
- ENSURE_NAMENODE_DIR: /data/namenode
- env_file:
- - ./docker-config
- command: ["/opt/hadoop/bin/hdfs","namenode"]
datanode:
image: apache/hadoop-runner
volumes:
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
index dec863e..e06d434 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
OZONE-SITE.XML_ozone.ksm.address=ksm
OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874
@@ -25,13 +24,9 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
OZONE-SITE.XML_ozone.handler.type=distributed
OZONE-SITE.XML_ozone.scm.client.address=scm
-OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
OZONE-SITE.XML_ozone.scm.heartbeat.interval=3s
-HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
-HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
HDFS-SITE.XML_rpc.metrics.quantile.enable=true
HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
http://git-wip-us.apache.org/repos/asf/hadoop/blob/18932717/hadoop-ozone/common/src/main/bin/ozone
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
index 6843bdd..390f089 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -34,7 +34,7 @@ function hadoop_usage
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
- hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
+ hadoop_add_subcommand "datanode" daemon "run a HDDS datanode"
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
hadoop_add_subcommand "freon" client "runs an ozone data generator"
hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning."
@@ -45,7 +45,7 @@ function hadoop_usage
hadoop_add_subcommand "o3" client "command line interface for ozone"
hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data"
hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
- hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager "
+ hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager"
hadoop_add_subcommand "version" client "print the version"
hadoop_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path"
@@ -68,10 +68,7 @@ function ozonecmd_case
;;
datanode)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
- HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
- HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode'
- hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR
- hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR
+ HADOOP_CLASSNAME=org.apache.hadoop.ozone.HddsDatanodeService
;;
envvars)
echo "JAVA_HOME='${JAVA_HOME}'"
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[21/45] hadoop git commit: YARN-8409. Fixed NPE in
ActiveStandbyElectorBasedElectorService. Contributed by Chandni Singh
Posted by xy...@apache.org.
YARN-8409. Fixed NPE in ActiveStandbyElectorBasedElectorService.
Contributed by Chandni Singh
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/384764cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/384764cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/384764cd
Branch: refs/heads/HDDS-4
Commit: 384764cdeac6490bc47fa0eb7b936baa4c0d3230
Parents: d3fa83a
Author: Eric Yang <ey...@apache.org>
Authored: Thu Jun 28 12:39:49 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu Jun 28 12:39:49 2018 -0400
----------------------------------------------------------------------
.../apache/hadoop/ha/ActiveStandbyElector.java | 5 ++++-
.../apache/hadoop/ha/ZKFailoverController.java | 2 +-
.../resourcemanager/TestRMEmbeddedElector.java | 22 ++++++++++++++++++++
3 files changed, 27 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/384764cd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index a23fb71..d099ca7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -329,9 +329,12 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
* This recursively creates the znode as well as all of its parents.
*/
public synchronized void ensureParentZNode()
- throws IOException, InterruptedException {
+ throws IOException, InterruptedException, KeeperException {
Preconditions.checkState(!wantToBeInElection,
"ensureParentZNode() may not be called while in the election");
+ if (zkClient == null) {
+ createConnection();
+ }
String pathParts[] = znodeWorkingDir.split("/");
Preconditions.checkArgument(pathParts.length >= 1 &&
http://git-wip-us.apache.org/repos/asf/hadoop/blob/384764cd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 9295288..f66e3c9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -269,7 +269,7 @@ public abstract class ZKFailoverController {
}
private int formatZK(boolean force, boolean interactive)
- throws IOException, InterruptedException {
+ throws IOException, InterruptedException, KeeperException {
if (elector.parentZNodeExists()) {
if (!force && (!interactive || !confirmFormat())) {
return ERR_CODE_FORMAT_DENIED;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/384764cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 9d38149..8c03861 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -22,8 +22,10 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.ClientBaseWithFixes;
import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -304,6 +306,26 @@ public class TestRMEmbeddedElector extends ClientBaseWithFixes {
verify(as, times(1)).transitionToStandby(any());
}
+ /**
+ * Test that active elector service triggers a fatal RM Event when connection
+ * to ZK fails. YARN-8409
+ */
+ @Test
+ public void testFailureToConnectToZookeeper() throws Exception {
+ stopServer();
+ Configuration myConf = new Configuration(conf);
+ ResourceManager rm = new MockRM(conf);
+
+ ActiveStandbyElectorBasedElectorService ees =
+ new ActiveStandbyElectorBasedElectorService(rm);
+ try {
+ ees.init(myConf);
+ Assert.fail("expect failure to connect to Zookeeper");
+ } catch (ServiceStateException sse) {
+ Assert.assertTrue(sse.getMessage().contains("ConnectionLoss"));
+ }
+ }
+
private class MockRMWithElector extends MockRM {
private long delayMs = 0;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[37/45] hadoop git commit: HDFS-13536. [PROVIDED Storage] HA for
InMemoryAliasMap. Contributed by Virajith Jalaparti.
Posted by xy...@apache.org.
HDFS-13536. [PROVIDED Storage] HA for InMemoryAliasMap. Contributed by Virajith Jalaparti.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1804a315
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1804a315
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1804a315
Branch: refs/heads/HDDS-4
Commit: 1804a31515e541b3371925aa895589919b54d443
Parents: 5cc2541
Author: Inigo Goiri <in...@apache.org>
Authored: Mon Jul 2 10:48:20 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon Jul 2 10:48:20 2018 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hdfs/DFSUtilClient.java | 4 +-
.../hdfs/client/HdfsClientConfigKeys.java | 3 +
.../ha/ConfiguredFailoverProxyProvider.java | 9 +-
.../InMemoryAliasMapFailoverProxyProvider.java | 38 ++
.../org/apache/hadoop/hdfs/DFSConfigKeys.java | 5 +-
.../java/org/apache/hadoop/hdfs/DFSUtil.java | 37 +-
.../org/apache/hadoop/hdfs/NameNodeProxies.java | 15 +-
...yAliasMapProtocolClientSideTranslatorPB.java | 95 ++++-
.../aliasmap/InMemoryAliasMapProtocol.java | 5 +
.../aliasmap/InMemoryLevelDBAliasMapServer.java | 19 +-
.../impl/InMemoryLevelDBAliasMapClient.java | 80 ++--
.../src/main/resources/hdfs-default.xml | 22 +-
.../org/apache/hadoop/hdfs/MiniDFSCluster.java | 13 +-
.../apache/hadoop/hdfs/MiniDFSNNTopology.java | 2 +-
.../impl/TestInMemoryLevelDBAliasMapClient.java | 7 +
.../namenode/ITestProvidedImplementation.java | 371 ++++++++++++++++---
16 files changed, 615 insertions(+), 110 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 313b973..3fac7c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -396,7 +396,7 @@ public class DFSUtilClient {
* @param keys Set of keys to look for in the order of preference
* @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
*/
- static Map<String, Map<String, InetSocketAddress>> getAddresses(
+ public static Map<String, Map<String, InetSocketAddress>> getAddresses(
Configuration conf, String defaultAddress, String... keys) {
Collection<String> nameserviceIds = getNameServiceIds(conf);
return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys);
@@ -426,7 +426,7 @@ public class DFSUtilClient {
return ret;
}
- static Map<String, InetSocketAddress> getAddressesForNameserviceId(
+ public static Map<String, InetSocketAddress> getAddressesForNameserviceId(
Configuration conf, String nsId, String defaultValue, String... keys) {
Collection<String> nnIds = getNameNodeIds(conf, nsId);
Map<String, InetSocketAddress> ret = Maps.newLinkedHashMap();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index f2cec31..a812670 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -184,6 +184,9 @@ public interface HdfsClientConfigKeys {
"dfs.namenode.snapshot.capture.openfiles";
boolean DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT = false;
+ String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS =
+ "dfs.provided.aliasmap.inmemory.dnrpc-address";
+
/**
* These are deprecated config keys to client code.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index 96722fc..f46532a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -37,6 +37,8 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
+
/**
* A FailoverProxyProvider implementation which allows one to configure
* multiple URIs to connect to during fail-over. A random configured address is
@@ -60,6 +62,11 @@ public class ConfiguredFailoverProxyProvider<T> extends
public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
Class<T> xface, HAProxyFactory<T> factory) {
+ this(conf, uri, xface, factory, DFS_NAMENODE_RPC_ADDRESS_KEY);
+ }
+
+ public ConfiguredFailoverProxyProvider(Configuration conf, URI uri,
+ Class<T> xface, HAProxyFactory<T> factory, String addressKey) {
this.xface = xface;
this.conf = new Configuration(conf);
int maxRetries = this.conf.getInt(
@@ -81,7 +88,7 @@ public class ConfiguredFailoverProxyProvider<T> extends
ugi = UserGroupInformation.getCurrentUser();
Map<String, Map<String, InetSocketAddress>> map =
- DFSUtilClient.getHaNnRpcAddresses(conf);
+ DFSUtilClient.getAddresses(conf, null, addressKey);
Map<String, InetSocketAddress> addressesInNN = map.get(uri.getHost());
if (addressesInNN == null || addressesInNN.size() == 0) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/InMemoryAliasMapFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/InMemoryAliasMapFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/InMemoryAliasMapFailoverProxyProvider.java
new file mode 100644
index 0000000..6525942
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/InMemoryAliasMapFailoverProxyProvider.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import org.apache.hadoop.conf.Configuration;
+
+import java.net.URI;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
+
+/**
+ * A {@link ConfiguredFailoverProxyProvider} implementation used to connect
+ * to an InMemoryAliasMap.
+ */
+public class InMemoryAliasMapFailoverProxyProvider<T>
+ extends ConfiguredFailoverProxyProvider<T> {
+
+ public InMemoryAliasMapFailoverProxyProvider(
+ Configuration conf, URI uri, Class<T> xface, HAProxyFactory<T> factory) {
+ super(conf, uri, xface, factory,
+ DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index dde7eb7..cc902b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -86,8 +86,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
public static final String DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
public static final String DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.backup.dnrpc-address";
- public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS = "dfs.provided.aliasmap.inmemory.dnrpc-address";
+ public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS =
+ HdfsClientConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT = "0.0.0.0:50200";
+ public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_BIND_HOST = "dfs.provided.aliasmap.inmemory.rpc.bind-host";
+
public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR = "dfs.provided.aliasmap.inmemory.leveldb.dir";
public static final String DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE = "dfs.provided.aliasmap.inmemory.batch-size";
public static final int DFS_PROVIDED_ALIASMAP_INMEMORY_BATCH_SIZE_DEFAULT = 500;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 4c94e38..f7cd32b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1130,7 +1130,42 @@ public class DFSUtil {
return getSuffixIDs(conf, addressKey, null, nnId, LOCAL_ADDRESS_MATCHER)[0];
}
-
+
+ /**
+ * Determine the {@link InetSocketAddress} to bind to, for any service.
+ * In case of HA or federation, the address is assumed to specified as
+ * {@code confKey}.NAMESPACEID.NAMENODEID as appropriate.
+ *
+ * @param conf configuration.
+ * @param confKey configuration key (prefix if HA/federation) used to
+ * specify the address for the service.
+ * @param defaultValue default value for the address.
+ * @param bindHostKey configuration key (prefix if HA/federation)
+ * specifying host to bind to.
+ * @return the address to bind to.
+ */
+ public static InetSocketAddress getBindAddress(Configuration conf,
+ String confKey, String defaultValue, String bindHostKey) {
+ InetSocketAddress address;
+ String nsId = DFSUtil.getNamenodeNameServiceId(conf);
+ String bindHostActualKey;
+ if (nsId != null) {
+ String namenodeId = HAUtil.getNameNodeId(conf, nsId);
+ address = DFSUtilClient.getAddressesForNameserviceId(
+ conf, nsId, null, confKey).get(namenodeId);
+ bindHostActualKey = DFSUtil.addKeySuffixes(bindHostKey, nsId, namenodeId);
+ } else {
+ address = NetUtils.createSocketAddr(conf.get(confKey, defaultValue));
+ bindHostActualKey = bindHostKey;
+ }
+
+ String bindHost = conf.get(bindHostActualKey);
+ if (bindHost == null || bindHost.isEmpty()) {
+ bindHost = address.getHostName();
+ }
+ return new InetSocketAddress(bindHost, address.getPort());
+ }
+
/**
* Returns nameservice Id and namenode Id when the local host matches the
* configuration parameter {@code addressKey}.<nameservice Id>.<namenode Id>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index d556c90..b63d26b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -31,10 +31,13 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocolPB.AliasMapProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.InMemoryAliasMapProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.namenode.ha.NameNodeHAProxyFactory;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
@@ -184,6 +187,8 @@ public class NameNodeProxies {
conf, ugi);
} else if (xface == RefreshCallQueueProtocol.class) {
proxy = (T) createNNProxyWithRefreshCallQueueProtocol(nnAddr, conf, ugi);
+ } else if (xface == InMemoryAliasMapProtocol.class) {
+ proxy = (T) createNNProxyWithInMemoryAliasMapProtocol(nnAddr, conf, ugi);
} else {
String message = "Unsupported protocol found when creating the proxy " +
"connection to NameNode: " +
@@ -194,7 +199,15 @@ public class NameNodeProxies {
return new ProxyAndInfo<T>(proxy, dtService, nnAddr);
}
-
+
+ private static InMemoryAliasMapProtocol createNNProxyWithInMemoryAliasMapProtocol(
+ InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
+ throws IOException {
+ AliasMapProtocolPB proxy = (AliasMapProtocolPB) createNameNodeProxy(
+ address, conf, ugi, AliasMapProtocolPB.class, 30000);
+ return new InMemoryAliasMapProtocolClientSideTranslatorPB(proxy);
+ }
+
private static JournalProtocol createNNProxyWithJournalProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
throws IOException {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
index fc23c88..2025c16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InMemoryAliasMapProtocolClientSideTranslatorPB.java
@@ -20,27 +20,38 @@ import com.google.protobuf.ServiceException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation;
import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap;
import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol;
import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
+import org.apache.hadoop.hdfs.server.namenode.ha.InMemoryAliasMapFailoverProxyProvider;
import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.Nonnull;
+import java.io.Closeable;
import java.io.IOException;
-import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSUtil.addKeySuffixes;
+import static org.apache.hadoop.hdfs.DFSUtil.createUri;
+import static org.apache.hadoop.hdfs.DFSUtilClient.getNameServiceIds;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX;
import static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.*;
import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.*;
@@ -52,7 +63,7 @@ import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.*;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class InMemoryAliasMapProtocolClientSideTranslatorPB
- implements InMemoryAliasMapProtocol {
+ implements InMemoryAliasMapProtocol, Closeable {
private static final Logger LOG =
LoggerFactory
@@ -60,22 +71,61 @@ public class InMemoryAliasMapProtocolClientSideTranslatorPB
private AliasMapProtocolPB rpcProxy;
- public InMemoryAliasMapProtocolClientSideTranslatorPB(Configuration conf) {
- String addr = conf.getTrimmed(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS,
- DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT);
- InetSocketAddress aliasMapAddr = NetUtils.createSocketAddr(addr);
+ public InMemoryAliasMapProtocolClientSideTranslatorPB(
+ AliasMapProtocolPB rpcProxy) {
+ this.rpcProxy = rpcProxy;
+ }
- RPC.setProtocolEngine(conf, AliasMapProtocolPB.class,
- ProtobufRpcEngine.class);
- LOG.info("Connecting to address: " + addr);
- try {
- rpcProxy = RPC.getProxy(AliasMapProtocolPB.class,
- RPC.getProtocolVersion(AliasMapProtocolPB.class), aliasMapAddr, null,
- conf, NetUtils.getDefaultSocketFactory(conf), 0);
- } catch (IOException e) {
- throw new RuntimeException(
- "Error in connecting to " + addr + " Got: " + e);
+ public static Collection<InMemoryAliasMapProtocol> init(Configuration conf) {
+ Collection<InMemoryAliasMapProtocol> aliasMaps = new ArrayList<>();
+ // Try to connect to all configured nameservices as it is not known which
+ // nameservice supports the AliasMap.
+ for (String nsId : getNameServiceIds(conf)) {
+ try {
+ URI namenodeURI = null;
+ Configuration newConf = new Configuration(conf);
+ if (HAUtil.isHAEnabled(conf, nsId)) {
+ // set the failover-proxy provider if HA is enabled.
+ newConf.setClass(
+ addKeySuffixes(PROXY_PROVIDER_KEY_PREFIX, nsId),
+ InMemoryAliasMapFailoverProxyProvider.class,
+ AbstractNNFailoverProxyProvider.class);
+ namenodeURI = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId);
+ } else {
+ String key =
+ addKeySuffixes(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS, nsId);
+ String addr = conf.get(key);
+ if (addr != null) {
+ namenodeURI = createUri(HdfsConstants.HDFS_URI_SCHEME,
+ NetUtils.createSocketAddr(addr));
+ }
+ }
+ if (namenodeURI != null) {
+ aliasMaps.add(NameNodeProxies
+ .createProxy(newConf, namenodeURI, InMemoryAliasMapProtocol.class)
+ .getProxy());
+ LOG.info("Connected to InMemoryAliasMap at {}", namenodeURI);
+ }
+ } catch (IOException | URISyntaxException e) {
+ LOG.warn("Exception in connecting to InMemoryAliasMap for nameservice "
+ + "{}: {}", nsId, e);
+ }
}
+ // if a separate AliasMap is configured using
+ // DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS, try to connect it.
+ if (conf.get(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS) != null) {
+ URI uri = createUri("hdfs", NetUtils.createSocketAddr(
+ conf.get(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS)));
+ try {
+ aliasMaps.add(NameNodeProxies
+ .createProxy(conf, uri, InMemoryAliasMapProtocol.class).getProxy());
+ LOG.info("Connected to InMemoryAliasMap at {}", uri);
+ } catch (IOException e) {
+ LOG.warn("Exception in connecting to InMemoryAliasMap at {}: {}", uri,
+ e);
+ }
+ }
+ return aliasMaps;
}
@Override
@@ -168,7 +218,12 @@ public class InMemoryAliasMapProtocolClientSideTranslatorPB
}
}
- public void stop() {
- RPC.stopProxy(rpcProxy);
+ @Override
+ public void close() throws IOException {
+ LOG.info("Stopping rpcProxy in" +
+ "InMemoryAliasMapProtocolClientSideTranslatorPB");
+ if (rpcProxy != null) {
+ RPC.stopProxy(rpcProxy);
+ }
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMapProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMapProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMapProtocol.java
index 89f590c..c3824e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMapProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryAliasMapProtocol.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation;
import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.io.retry.Idempotent;
import javax.annotation.Nonnull;
import java.io.IOException;
@@ -69,6 +70,7 @@ public interface InMemoryAliasMapProtocol {
* FileRegions and the next marker.
* @throws IOException
*/
+ @Idempotent
InMemoryAliasMap.IterationResult list(Optional<Block> marker)
throws IOException;
@@ -80,6 +82,7 @@ public interface InMemoryAliasMapProtocol {
* @throws IOException
*/
@Nonnull
+ @Idempotent
Optional<ProvidedStorageLocation> read(@Nonnull Block block)
throws IOException;
@@ -90,6 +93,7 @@ public interface InMemoryAliasMapProtocol {
* @param providedStorageLocation
* @throws IOException
*/
+ @Idempotent
void write(@Nonnull Block block,
@Nonnull ProvidedStorageLocation providedStorageLocation)
throws IOException;
@@ -99,5 +103,6 @@ public interface InMemoryAliasMapProtocol {
* @return the block pool id associated with the Namenode running
* the in-memory alias map.
*/
+ @Idempotent
String getBlockPoolId() throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
index 4edc9a2..1d06f13 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/aliasmap/InMemoryLevelDBAliasMapServer.java
@@ -25,7 +25,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation;
import org.apache.hadoop.hdfs.protocolPB.AliasMapProtocolPB;
@@ -34,9 +33,13 @@ import org.apache.hadoop.ipc.RPC;
import javax.annotation.Nonnull;
import java.io.Closeable;
import java.io.IOException;
+import java.net.InetSocketAddress;
import java.util.Optional;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_BIND_HOST;
+import static org.apache.hadoop.hdfs.DFSUtil.getBindAddress;
import static org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.*;
import static org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap.CheckedFunction2;
@@ -79,18 +82,16 @@ public class InMemoryLevelDBAliasMapServer implements InMemoryAliasMapProtocol,
AliasMapProtocolService
.newReflectiveBlockingService(aliasMapProtocolXlator);
- String rpcAddress =
- conf.get(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS,
- DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT);
- String[] split = rpcAddress.split(":");
- String bindHost = split[0];
- Integer port = Integer.valueOf(split[1]);
+ InetSocketAddress rpcAddress = getBindAddress(conf,
+ DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS,
+ DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS_DEFAULT,
+ DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_BIND_HOST);
aliasMapServer = new RPC.Builder(conf)
.setProtocol(AliasMapProtocolPB.class)
.setInstance(aliasMapProtocolService)
- .setBindAddress(bindHost)
- .setPort(port)
+ .setBindAddress(rpcAddress.getHostName())
+ .setPort(rpcAddress.getPort())
.setNumHandlers(1)
.setVerbose(true)
.build();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/InMemoryLevelDBAliasMapClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/InMemoryLevelDBAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/InMemoryLevelDBAliasMapClient.java
index d389184..fb5ee93 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/InMemoryLevelDBAliasMapClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/InMemoryLevelDBAliasMapClient.java
@@ -24,11 +24,17 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation;
import org.apache.hadoop.hdfs.protocolPB.InMemoryAliasMapProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap;
+import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol;
import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
@@ -44,17 +50,28 @@ import java.util.Optional;
public class InMemoryLevelDBAliasMapClient extends BlockAliasMap<FileRegion>
implements Configurable {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(InMemoryLevelDBAliasMapClient.class);
private Configuration conf;
- private InMemoryAliasMapProtocolClientSideTranslatorPB aliasMap;
- private String blockPoolID;
+ private Collection<InMemoryAliasMapProtocol> aliasMaps;
@Override
public void close() {
- aliasMap.stop();
+ if (aliasMaps != null) {
+ for (InMemoryAliasMapProtocol aliasMap : aliasMaps) {
+ RPC.stopProxy(aliasMap);
+ }
+ }
}
class LevelDbReader extends BlockAliasMap.Reader<FileRegion> {
+ private InMemoryAliasMapProtocol aliasMap;
+
+ LevelDbReader(InMemoryAliasMapProtocol aliasMap) {
+ this.aliasMap = aliasMap;
+ }
+
@Override
public Optional<FileRegion> resolve(Block block) throws IOException {
Optional<ProvidedStorageLocation> read = aliasMap.read(block);
@@ -114,6 +131,13 @@ public class InMemoryLevelDBAliasMapClient extends BlockAliasMap<FileRegion>
}
class LevelDbWriter extends BlockAliasMap.Writer<FileRegion> {
+
+ private InMemoryAliasMapProtocol aliasMap;
+
+ LevelDbWriter(InMemoryAliasMapProtocol aliasMap) {
+ this.aliasMap = aliasMap;
+ }
+
@Override
public void store(FileRegion fileRegion) throws IOException {
aliasMap.write(fileRegion.getBlock(),
@@ -130,40 +154,53 @@ public class InMemoryLevelDBAliasMapClient extends BlockAliasMap<FileRegion>
throw new UnsupportedOperationException("Unable to start "
+ "InMemoryLevelDBAliasMapClient as security is enabled");
}
+ aliasMaps = new ArrayList<>();
}
-
- @Override
- public Reader<FileRegion> getReader(Reader.Options opts, String blockPoolID)
+ private InMemoryAliasMapProtocol getAliasMap(String blockPoolID)
throws IOException {
- if (this.blockPoolID == null) {
- this.blockPoolID = aliasMap.getBlockPoolId();
+ if (blockPoolID == null) {
+ throw new IOException("Block pool id required to get aliasmap reader");
}
// if a block pool id has been supplied, and doesn't match the associated
- // block pool id, return null.
- if (blockPoolID != null && this.blockPoolID != null
- && !this.blockPoolID.equals(blockPoolID)) {
- return null;
+ // block pool ids, return null.
+ for (InMemoryAliasMapProtocol aliasMap : aliasMaps) {
+ try {
+ String aliasMapBlockPoolId = aliasMap.getBlockPoolId();
+ if (aliasMapBlockPoolId != null &&
+ aliasMapBlockPoolId.equals(blockPoolID)) {
+ return aliasMap;
+ }
+ } catch (IOException e) {
+ LOG.error("Exception in retrieving block pool id {}", e);
+ }
}
- return new LevelDbReader();
+ throw new IOException(
+ "Unable to retrive InMemoryAliasMap for block pool id " + blockPoolID);
+ }
+
+ @Override
+ public Reader<FileRegion> getReader(Reader.Options opts, String blockPoolID)
+ throws IOException {
+ InMemoryAliasMapProtocol aliasMap = getAliasMap(blockPoolID);
+ LOG.info("Loading InMemoryAliasMapReader for block pool id {}",
+ blockPoolID);
+ return new LevelDbReader(aliasMap);
}
@Override
public Writer<FileRegion> getWriter(Writer.Options opts, String blockPoolID)
throws IOException {
- if (this.blockPoolID == null) {
- this.blockPoolID = aliasMap.getBlockPoolId();
- }
- if (blockPoolID != null && !this.blockPoolID.equals(blockPoolID)) {
- return null;
- }
- return new LevelDbWriter();
+ InMemoryAliasMapProtocol aliasMap = getAliasMap(blockPoolID);
+ LOG.info("Loading InMemoryAliasMapWriter for block pool id {}",
+ blockPoolID);
+ return new LevelDbWriter(aliasMap);
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
- this.aliasMap = new InMemoryAliasMapProtocolClientSideTranslatorPB(conf);
+ aliasMaps = InMemoryAliasMapProtocolClientSideTranslatorPB.init(conf);
}
@Override
@@ -174,5 +211,4 @@ public class InMemoryLevelDBAliasMapClient extends BlockAliasMap<FileRegion>
@Override
public void refresh() throws IOException {
}
-
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 146ae6c..6dd2d92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4817,9 +4817,27 @@
<property>
<name>dfs.provided.aliasmap.inmemory.dnrpc-address</name>
- <value>0.0.0.0:50200</value>
+ <value></value>
+ <description>
+ The address where the aliasmap server will be running. In the case of
+ HA/Federation where multiple namenodes exist, and if the Namenode is
+ configured to run the aliasmap server
+ (dfs.provided.aliasmap.inmemory.enabled is set to true),
+ the name service id is added to the name, e.g.,
+ dfs.provided.aliasmap.inmemory.rpc.address.EXAMPLENAMESERVICE.
+ The value of this property will take the form of host:rpc-port.
+ </description>
+ </property>
+
+ <property>
+ <name>dfs.provided.aliasmap.inmemory.rpc.bind-host</name>
+ <value></value>
<description>
- The address where the aliasmap server will be running
+ The actual address the in-memory aliasmap server will bind to.
+ If this optional address is set, it overrides the hostname portion of
+ dfs.provided.aliasmap.inmemory.rpc.address.
+ This is useful for making the name node listen on all interfaces by
+ setting it to 0.0.0.0.
</description>
</property>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index c2e2a68..a2e5951 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1187,7 +1187,7 @@ public class MiniDFSCluster implements AutoCloseable {
}
- private void initNameNodeConf(Configuration conf, String nameserviceId, int nsIndex, String nnId,
+ protected void initNameNodeConf(Configuration conf, String nameserviceId, int nsIndex, String nnId,
boolean manageNameDfsDirs, boolean enableManagedDfsDirsRedundancy, int nnIndex)
throws IOException {
if (nameserviceId != null) {
@@ -1379,6 +1379,17 @@ public class MiniDFSCluster implements AutoCloseable {
return null;
}
+ public List<Integer> getNNIndexes(String nameserviceId) {
+ int count = 0;
+ List<Integer> nnIndexes = new ArrayList<>();
+ for (NameNodeInfo nn : namenodes.values()) {
+ if (nn.getNameserviceId().equals(nameserviceId)) {
+ nnIndexes.add(count);
+ }
+ count++;
+ }
+ return nnIndexes;
+ }
/**
* wait for the given namenode to get out of safemode.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
index b9786a3..c21ff80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
@@ -227,7 +227,7 @@ public class MiniDFSNNTopology {
this.nnId = nnId;
}
- String getNnId() {
+ public String getNnId() {
return nnId;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
index 61a1558..f062633 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/blockaliasmap/impl/TestInMemoryLevelDBAliasMapClient.java
@@ -32,6 +32,7 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
@@ -341,4 +342,10 @@ public class TestInMemoryLevelDBAliasMapClient {
assertThat(actualFileRegions).containsExactlyInAnyOrder(
expectedFileRegions.toArray(new FileRegion[0]));
}
+
+ @Test
+ public void testServerBindHost() throws Exception {
+ conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, "0.0.0.0");
+ writeRead();
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1804a315/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
index 7d3ab0e..e3f4dec 100644
--- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
+++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
@@ -24,12 +24,18 @@ import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.net.InetSocketAddress;
+import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
-import java.nio.file.Files;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
import java.util.Random;
import java.util.Set;
@@ -44,14 +50,14 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap;
-import org.apache.hadoop.hdfs.server.aliasmap.InMemoryLevelDBAliasMapServer;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
@@ -60,8 +66,10 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.ProvidedStorageMap;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.InMemoryLevelDBAliasMapClient;
+import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.LevelDBFileRegionAliasMap;
import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -71,6 +79,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NodeBase;
import org.junit.After;
import org.junit.Before;
@@ -80,6 +90,12 @@ import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_ENABLED;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_BIND_HOST;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LEVELDB_PATH;
+import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap.fileNameFromBlockPoolID;
import static org.apache.hadoop.net.NodeBase.PATH_SEPARATOR_STR;
import static org.junit.Assert.*;
@@ -106,6 +122,7 @@ public class ITestProvidedImplementation {
private final int baseFileLen = 1024;
private long providedDataSize = 0;
private final String bpid = "BP-1234-10.1.1.1-1224";
+ private static final String clusterID = "CID-PROVIDED";
private Configuration conf;
private MiniDFSCluster cluster;
@@ -214,36 +231,78 @@ public class ITestProvidedImplementation {
StorageType[] storageTypes,
StorageType[][] storageTypesPerDatanode,
boolean doFormat, String[] racks) throws IOException {
+ startCluster(nspath, numDatanodes,
+ storageTypes, storageTypesPerDatanode,
+ doFormat, racks, null,
+ new MiniDFSCluster.Builder(conf));
+ }
+
+ void startCluster(Path nspath, int numDatanodes,
+ StorageType[] storageTypes,
+ StorageType[][] storageTypesPerDatanode,
+ boolean doFormat, String[] racks,
+ MiniDFSNNTopology topo,
+ MiniDFSCluster.Builder builder) throws IOException {
conf.set(DFS_NAMENODE_NAME_DIR_KEY, nspath.toString());
+ builder.format(doFormat)
+ .manageNameDfsDirs(doFormat)
+ .numDataNodes(numDatanodes)
+ .racks(racks);
if (storageTypesPerDatanode != null) {
- cluster = new MiniDFSCluster.Builder(conf)
- .format(doFormat)
- .manageNameDfsDirs(doFormat)
- .numDataNodes(numDatanodes)
- .storageTypes(storageTypesPerDatanode)
- .racks(racks)
- .build();
+ builder.storageTypes(storageTypesPerDatanode);
} else if (storageTypes != null) {
- cluster = new MiniDFSCluster.Builder(conf)
- .format(doFormat)
- .manageNameDfsDirs(doFormat)
- .numDataNodes(numDatanodes)
- .storagesPerDatanode(storageTypes.length)
- .storageTypes(storageTypes)
- .racks(racks)
- .build();
- } else {
- cluster = new MiniDFSCluster.Builder(conf)
- .format(doFormat)
- .manageNameDfsDirs(doFormat)
- .numDataNodes(numDatanodes)
- .racks(racks)
- .build();
+ builder.storagesPerDatanode(storageTypes.length)
+ .storageTypes(storageTypes);
}
+ if (topo != null) {
+ builder.nnTopology(topo);
+ // If HA or Federation is enabled and formatting is set to false,
+ // copy the FSImage to all Namenode directories.
+ if ((topo.isHA() || topo.isFederated()) && !doFormat) {
+ builder.manageNameDfsDirs(true);
+ builder.enableManagedDfsDirsRedundancy(false);
+ builder.manageNameDfsSharedDirs(false);
+ List<File> nnDirs =
+ getProvidedNamenodeDirs(MiniDFSCluster.getBaseDirectory(), topo);
+ for (File nnDir : nnDirs) {
+ MiniDFSCluster.copyNameDirs(
+ Collections.singletonList(nspath.toUri()),
+ Collections.singletonList(fileAsURI(nnDir)),
+ conf);
+ }
+ }
+ }
+ cluster = builder.build();
cluster.waitActive();
}
+ private static List<File> getProvidedNamenodeDirs(String baseDir,
+ MiniDFSNNTopology topo) {
+ List<File> nnDirs = new ArrayList<>();
+ int nsCounter = 0;
+ for (MiniDFSNNTopology.NSConf nsConf : topo.getNameservices()) {
+ int nnCounter = nsCounter;
+ for (MiniDFSNNTopology.NNConf nnConf : nsConf.getNNs()) {
+ if (providedNameservice.equals(nsConf.getId())) {
+ // only add the first one
+ File[] nnFiles =
+ MiniDFSCluster.getNameNodeDirectory(
+ baseDir, nsCounter, nnCounter);
+ if (nnFiles == null || nnFiles.length == 0) {
+ throw new RuntimeException("Failed to get a location for the"
+ + "Namenode directory for namespace: " + nsConf.getId()
+ + " and namenodeId: " + nnConf.getNnId());
+ }
+ nnDirs.add(nnFiles[0]);
+ }
+ nnCounter++;
+ }
+ nsCounter = nnCounter;
+ }
+ return nnDirs;
+ }
+
@Test(timeout=20000)
public void testLoadImage() throws Exception {
final long seed = r.nextLong();
@@ -405,8 +464,8 @@ public class ITestProvidedImplementation {
return ret;
}
- private void verifyFileSystemContents() throws Exception {
- FileSystem fs = cluster.getFileSystem();
+ private void verifyFileSystemContents(int nnIndex) throws Exception {
+ FileSystem fs = cluster.getFileSystem(nnIndex);
int count = 0;
// read NN metadata, verify contents match
for (TreePath e : new FSTreeWalk(providedPath, conf)) {
@@ -766,41 +825,255 @@ public class ITestProvidedImplementation {
}
}
-
- @Test
- public void testInMemoryAliasMap() throws Exception {
- conf.setClass(ImageWriter.Options.UGI_CLASS,
- FsUGIResolver.class, UGIResolver.class);
+ private File createInMemoryAliasMapImage() throws Exception {
+ conf.setClass(ImageWriter.Options.UGI_CLASS, FsUGIResolver.class,
+ UGIResolver.class);
conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
InMemoryLevelDBAliasMapClient.class, BlockAliasMap.class);
- conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS,
- "localhost:32445");
+ conf.set(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS, "localhost:32445");
File tempDirectory =
- Files.createTempDirectory("in-memory-alias-map").toFile();
- File leveDBPath = new File(tempDirectory, bpid);
- leveDBPath.mkdirs();
- conf.set(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR,
+ new File(new Path(nnDirPath, "in-memory-alias-map").toUri());
+ File levelDBDir = new File(tempDirectory, bpid);
+ levelDBDir.mkdirs();
+ conf.set(DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR,
tempDirectory.getAbsolutePath());
- conf.setBoolean(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_INMEMORY_ENABLED, true);
conf.setInt(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES, 10);
- InMemoryLevelDBAliasMapServer levelDBAliasMapServer =
- new InMemoryLevelDBAliasMapServer(InMemoryAliasMap::init, bpid);
- levelDBAliasMapServer.setConf(conf);
- levelDBAliasMapServer.start();
+ conf.set(DFS_PROVIDED_ALIASMAP_LEVELDB_PATH,
+ tempDirectory.getAbsolutePath());
createImage(new FSTreeWalk(providedPath, conf),
nnDirPath,
- FixedBlockResolver.class, "",
- InMemoryLevelDBAliasMapClient.class);
- levelDBAliasMapServer.close();
+ FixedBlockResolver.class, clusterID,
+ LevelDBFileRegionAliasMap.class);
+
+ return tempDirectory;
+ }
+ @Test
+ public void testInMemoryAliasMap() throws Exception {
+ File aliasMapImage = createInMemoryAliasMapImage();
// start cluster with two datanodes,
// each with 1 PROVIDED volume and other DISK volume
+ conf.setBoolean(DFS_PROVIDED_ALIASMAP_INMEMORY_ENABLED, true);
+ conf.setInt(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES, 10);
startCluster(nnDirPath, 2,
new StorageType[] {StorageType.PROVIDED, StorageType.DISK},
null, false);
- verifyFileSystemContents();
- FileUtils.deleteDirectory(tempDirectory);
+ verifyFileSystemContents(0);
+ FileUtils.deleteDirectory(aliasMapImage);
+ }
+
+ /**
+ * Find a free port that hasn't been assigned yet.
+ *
+ * @param usedPorts set of ports that have already been assigned.
+ * @param maxTrials maximum number of random ports to try before failure.
+ * @return an unassigned port.
+ */
+ private int getUnAssignedPort(Set<Integer> usedPorts, int maxTrials) {
+ int count = 0;
+ while (count < maxTrials) {
+ int port = NetUtils.getFreeSocketPort();
+ if (usedPorts.contains(port)) {
+ count++;
+ } else {
+ return port;
+ }
+ }
+ return -1;
+ }
+
+ private static String providedNameservice;
+
+ /**
+ * Extends the {@link MiniDFSCluster.Builder} to create instances of
+ * {@link MiniDFSClusterBuilderAliasMap}.
+ */
+ private static class MiniDFSClusterBuilderAliasMap
+ extends MiniDFSCluster.Builder {
+
+ MiniDFSClusterBuilderAliasMap(Configuration conf) {
+ super(conf);
+ }
+
+ @Override
+ public MiniDFSCluster build() throws IOException {
+ return new MiniDFSClusterAliasMap(this);
+ }
+ }
+
+ /**
+ * Extends {@link MiniDFSCluster} to correctly configure the InMemoryAliasMap.
+ */
+ private static class MiniDFSClusterAliasMap extends MiniDFSCluster {
+
+ private Map<String, Collection<URI>> formattedDirsByNamespaceId;
+ private Set<Integer> completedNNs;
+
+ MiniDFSClusterAliasMap(MiniDFSCluster.Builder builder) throws IOException {
+ super(builder);
+ }
+
+ @Override
+ protected void initNameNodeConf(Configuration conf, String nameserviceId,
+ int nsIndex, String nnId, boolean manageNameDfsDirs,
+ boolean enableManagedDfsDirsRedundancy, int nnIndex)
+ throws IOException {
+
+ if (formattedDirsByNamespaceId == null) {
+ formattedDirsByNamespaceId = new HashMap<>();
+ completedNNs = new HashSet<>();
+ }
+
+ super.initNameNodeConf(conf, nameserviceId, nsIndex, nnId,
+ manageNameDfsDirs, enableManagedDfsDirsRedundancy, nnIndex);
+
+ if (providedNameservice.equals(nameserviceId)) {
+ // configure the InMemoryAliasMp.
+ conf.setBoolean(DFS_PROVIDED_ALIASMAP_INMEMORY_ENABLED, true);
+ String directory = conf.get(DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR);
+ if (directory == null) {
+ throw new IllegalArgumentException("In-memory alias map configured"
+ + "with the proper location; Set "
+ + DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR);
+ }
+ // get the name of the directory (final component in path) used for map.
+ // Assume that the aliasmap configured with the same final component
+ // name in all Namenodes but is located in the path specified by
+ // DFS_NAMENODE_NAME_DIR_KEY
+ String dirName = new Path(directory).getName();
+ String nnDir =
+ conf.getTrimmedStringCollection(DFS_NAMENODE_NAME_DIR_KEY)
+ .iterator().next();
+ conf.set(DFS_PROVIDED_ALIASMAP_INMEMORY_LEVELDB_DIR,
+ new File(new Path(nnDir, dirName).toUri()).getAbsolutePath());
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true);
+ } else {
+ if (!completedNNs.contains(nnIndex)) {
+ // format the NN directories for non-provided namespaces
+ // if the directory for a namespace has been formatted, copy it over.
+ Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
+ if (formattedDirsByNamespaceId.containsKey(nameserviceId)) {
+ copyNameDirs(formattedDirsByNamespaceId.get(nameserviceId),
+ namespaceDirs, conf);
+ } else {
+ for (URI nameDirUri : namespaceDirs) {
+ File nameDir = new File(nameDirUri);
+ if (nameDir.exists() && !FileUtil.fullyDelete(nameDir)) {
+ throw new IOException("Could not fully delete " + nameDir);
+ }
+ }
+ HdfsServerConstants.StartupOption.FORMAT.setClusterId(clusterID);
+ DFSTestUtil.formatNameNode(conf);
+ formattedDirsByNamespaceId.put(nameserviceId, namespaceDirs);
+ }
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, false);
+ completedNNs.add(nnIndex);
+ }
+ }
+ }
+ }
+
+ /**
+ * Configures the addresseses of the InMemoryAliasMap.
+ *
+ * @param topology the MiniDFS topology to use.
+ * @param providedNameservice the nameservice id that supports provided.
+ */
+ private void configureAliasMapAddresses(MiniDFSNNTopology topology,
+ String providedNameservice) {
+ conf.unset(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS);
+ Set<Integer> assignedPorts = new HashSet<>();
+ for (MiniDFSNNTopology.NSConf nsConf : topology.getNameservices()) {
+ for (MiniDFSNNTopology.NNConf nnConf : nsConf.getNNs()) {
+ if (providedNameservice.equals(nsConf.getId())) {
+ String key =
+ DFSUtil.addKeySuffixes(DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_ADDRESS,
+ nsConf.getId(), nnConf.getNnId());
+ int port = getUnAssignedPort(assignedPorts, 10);
+ if (port == -1) {
+ throw new RuntimeException("No free ports available");
+ }
+ assignedPorts.add(port);
+ conf.set(key, "127.0.0.1:" + port);
+
+ String binHostKey =
+ DFSUtil.addKeySuffixes(
+ DFS_PROVIDED_ALIASMAP_INMEMORY_RPC_BIND_HOST,
+ nsConf.getId(), nnConf.getNnId());
+ conf.set(binHostKey, "0.0.0.0");
+ }
+ }
+ }
+ }
+
+ /**
+ * Verify the mounted contents of the Filesystem.
+ *
+ * @param topology the topology of the cluster.
+ * @param providedNameservice the namespace id of the provided namenodes.
+ * @throws Exception
+ */
+ private void verifyPathsWithHAFailoverIfNecessary(MiniDFSNNTopology topology,
+ String providedNameservice) throws Exception {
+ List<Integer> nnIndexes = cluster.getNNIndexes(providedNameservice);
+ if (topology.isHA()) {
+ int nn1 = nnIndexes.get(0);
+ int nn2 = nnIndexes.get(1);
+ try {
+ verifyFileSystemContents(nn1);
+ fail("Read operation should fail as no Namenode is active");
+ } catch (RemoteException e) {
+ LOG.info("verifyPaths failed!. Expected exception: {}" + e);
+ }
+ cluster.transitionToActive(nn1);
+ LOG.info("Verifying data from NN with index = {}", nn1);
+ verifyFileSystemContents(nn1);
+ // transition to the second namenode.
+ cluster.transitionToStandby(nn1);
+ cluster.transitionToActive(nn2);
+ LOG.info("Verifying data from NN with index = {}", nn2);
+ verifyFileSystemContents(nn2);
+
+ cluster.shutdownNameNodes();
+ try {
+ verifyFileSystemContents(nn2);
+ fail("Read operation should fail as no Namenode is active");
+ } catch (NullPointerException e) {
+ LOG.info("verifyPaths failed!. Expected exception: {}" + e);
+ }
+ } else {
+ verifyFileSystemContents(nnIndexes.get(0));
+ }
+ }
+
+ @Test
+ public void testInMemoryAliasMapMultiTopologies() throws Exception {
+ MiniDFSNNTopology[] topologies =
+ new MiniDFSNNTopology[] {
+ MiniDFSNNTopology.simpleHATopology(),
+ MiniDFSNNTopology.simpleFederatedTopology(3),
+ MiniDFSNNTopology.simpleHAFederatedTopology(3)
+ };
+
+ for (MiniDFSNNTopology topology : topologies) {
+ LOG.info("Starting test with topology with HA = {}, federation = {}",
+ topology.isHA(), topology.isFederated());
+ setSeed();
+ createInMemoryAliasMapImage();
+ conf.setBoolean(DFS_PROVIDED_ALIASMAP_INMEMORY_ENABLED, true);
+ conf.setInt(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES, 10);
+ providedNameservice = topology.getNameservices().get(0).getId();
+ // configure the AliasMap addresses
+ configureAliasMapAddresses(topology, providedNameservice);
+ startCluster(nnDirPath, 2,
+ new StorageType[] {StorageType.PROVIDED, StorageType.DISK},
+ null, false, null, topology,
+ new MiniDFSClusterBuilderAliasMap(conf));
+
+ verifyPathsWithHAFailoverIfNecessary(topology, providedNameservice);
+ shutdown();
+ }
}
private DatanodeDescriptor getDatanodeDescriptor(DatanodeManager dnm,
@@ -919,7 +1192,7 @@ public class ITestProvidedImplementation {
startCluster(nnDirPath, racks.length,
new StorageType[]{StorageType.PROVIDED, StorageType.DISK},
null, false, racks);
- verifyFileSystemContents();
+ verifyFileSystemContents(0);
setAndUnsetReplication("/" + filePrefix + (numFiles - 1) + fileSuffix);
cluster.shutdown();
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[31/45] hadoop git commit: HADOOP-15574: Suppress build error if
there are no docs after excluding private annotations. Contributed by
Takanobu Asanuma.
Posted by xy...@apache.org.
HADOOP-15574: Suppress build error if there are no docs after excluding private annotations. Contributed by Takanobu Asanuma.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4012184
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4012184
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4012184
Branch: refs/heads/HDDS-4
Commit: d40121845e0e1ace6b349180ced53292353dbb7c
Parents: cdb0844
Author: Nanda kumar <na...@apache.org>
Authored: Sun Jul 1 17:11:46 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Sun Jul 1 17:11:46 2018 +0530
----------------------------------------------------------------------
.../tools/ExcludePrivateAnnotationsStandardDoclet.java | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4012184/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
index 2176ea5..5c535c8 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
@@ -38,7 +38,11 @@ public class ExcludePrivateAnnotationsStandardDoclet {
public static boolean start(RootDoc root) {
System.out.println(
ExcludePrivateAnnotationsStandardDoclet.class.getSimpleName());
- return Standard.start(RootDocProcessor.process(root));
+ RootDoc excludedDoc = RootDocProcessor.process(root);
+ if (excludedDoc.specifiedPackages().length == 0) {
+ return true;
+ }
+ return Standard.start(excludedDoc);
}
public static int optionLength(String option) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[28/45] hadoop git commit: YARN-8451. Multiple NM heartbeat thread
created when a slow NM resync with RM. Contributed by Botong Huang
Posted by xy...@apache.org.
YARN-8451. Multiple NM heartbeat thread created when a slow NM resync with RM. Contributed by Botong Huang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10047014
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10047014
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10047014
Branch: refs/heads/HDDS-4
Commit: 100470140d86eede0fa240a9aa93226f274ee4f5
Parents: a820738
Author: Jason Lowe <jl...@apache.org>
Authored: Fri Jun 29 13:06:28 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Fri Jun 29 13:06:28 2018 -0500
----------------------------------------------------------------------
.../yarn/server/nodemanager/NodeManager.java | 66 +++++++++++++-------
.../nodemanager/TestNodeManagerResync.java | 56 +++++++++++++++++
2 files changed, 98 insertions(+), 24 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/10047014/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 2748a8f..c8234bd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -128,6 +128,7 @@ public class NodeManager extends CompositeService
// the NM collector service is set only if the timeline service v.2 is enabled
private NMCollectorService nmCollectorService;
private NodeStatusUpdater nodeStatusUpdater;
+ private AtomicBoolean resyncingWithRM = new AtomicBoolean(false);
private NodeResourceMonitor nodeResourceMonitor;
private static CompositeServiceShutdownHook nodeManagerShutdownHook;
private NMStateStoreService nmStore = null;
@@ -393,7 +394,7 @@ public class NodeManager extends CompositeService
addService(del);
// NodeManager level dispatcher
- this.dispatcher = new AsyncDispatcher("NM Event dispatcher");
+ this.dispatcher = createNMDispatcher();
nodeHealthChecker =
new NodeHealthCheckerService(
@@ -517,31 +518,41 @@ public class NodeManager extends CompositeService
}
protected void resyncWithRM() {
- //we do not want to block dispatcher thread here
- new Thread() {
- @Override
- public void run() {
- try {
- if (!rmWorkPreservingRestartEnabled) {
- LOG.info("Cleaning up running containers on resync");
- containerManager.cleanupContainersOnNMResync();
- // Clear all known collectors for resync.
- if (context.getKnownCollectors() != null) {
- context.getKnownCollectors().clear();
+ // Create a thread for resync because we do not want to block dispatcher
+ // thread here. Also use locking to make sure only one thread is running at
+ // a time.
+ if (this.resyncingWithRM.getAndSet(true)) {
+ // Some other thread is already created for resyncing, do nothing
+ } else {
+ // We have got the lock, create a new thread
+ new Thread() {
+ @Override
+ public void run() {
+ try {
+ if (!rmWorkPreservingRestartEnabled) {
+ LOG.info("Cleaning up running containers on resync");
+ containerManager.cleanupContainersOnNMResync();
+ // Clear all known collectors for resync.
+ if (context.getKnownCollectors() != null) {
+ context.getKnownCollectors().clear();
+ }
+ } else {
+ LOG.info("Preserving containers on resync");
+ // Re-register known timeline collectors.
+ reregisterCollectors();
}
- } else {
- LOG.info("Preserving containers on resync");
- // Re-register known timeline collectors.
- reregisterCollectors();
+ ((NodeStatusUpdaterImpl) nodeStatusUpdater)
+ .rebootNodeStatusUpdaterAndRegisterWithRM();
+ } catch (YarnRuntimeException e) {
+ LOG.error("Error while rebooting NodeStatusUpdater.", e);
+ shutDown(NodeManagerStatus.EXCEPTION.getExitCode());
+ } finally {
+ // Release lock
+ resyncingWithRM.set(false);
}
- ((NodeStatusUpdaterImpl) nodeStatusUpdater)
- .rebootNodeStatusUpdaterAndRegisterWithRM();
- } catch (YarnRuntimeException e) {
- LOG.error("Error while rebooting NodeStatusUpdater.", e);
- shutDown(NodeManagerStatus.EXCEPTION.getExitCode());
}
- }
- }.start();
+ }.start();
+ }
}
/**
@@ -946,7 +957,14 @@ public class NodeManager extends CompositeService
ContainerManagerImpl getContainerManager() {
return containerManager;
}
-
+
+ /**
+ * Unit test friendly.
+ */
+ protected AsyncDispatcher createNMDispatcher() {
+ return new AsyncDispatcher("NM Event dispatcher");
+ }
+
//For testing
Dispatcher getNMDispatcher(){
return dispatcher;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/10047014/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
index cf33775..b3f4e1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
@@ -37,6 +37,7 @@ import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
@@ -64,7 +65,9 @@ import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -107,6 +110,7 @@ public class TestNodeManagerResync {
private FileContext localFS;
private CyclicBarrier syncBarrier;
private CyclicBarrier updateBarrier;
+ private AtomicInteger resyncThreadCount;
private AtomicBoolean assertionFailedInThread = new AtomicBoolean(false);
private AtomicBoolean isNMShutdownCalled = new AtomicBoolean(false);
private final NodeManagerEvent resyncEvent =
@@ -125,6 +129,7 @@ public class TestNodeManagerResync {
nmLocalDir.mkdirs();
syncBarrier = new CyclicBarrier(2);
updateBarrier = new CyclicBarrier(2);
+ resyncThreadCount = new AtomicInteger(0);
}
@After
@@ -186,6 +191,41 @@ public class TestNodeManagerResync {
}
@SuppressWarnings("resource")
+ @Test(timeout = 30000)
+ public void testNMMultipleResyncEvent()
+ throws IOException, InterruptedException {
+ TestNodeManager1 nm = new TestNodeManager1(false);
+ YarnConfiguration conf = createNMConfig();
+
+ int resyncEventCount = 4;
+ try {
+ nm.init(conf);
+ nm.start();
+ Assert.assertEquals(1, nm.getNMRegistrationCount());
+ for (int i = 0; i < resyncEventCount; i++) {
+ nm.getNMDispatcher().getEventHandler().handle(resyncEvent);
+ }
+
+ DrainDispatcher dispatcher = (DrainDispatcher) nm.getNMDispatcher();
+ dispatcher.await();
+ LOG.info("NM dispatcher drained");
+
+ // Wait for the resync thread to finish
+ try {
+ syncBarrier.await();
+ } catch (BrokenBarrierException e) {
+ }
+ LOG.info("Barrier wait done for the resync thread");
+
+ // Resync should only happen once
+ Assert.assertEquals(2, nm.getNMRegistrationCount());
+ Assert.assertFalse("NM shutdown called.", isNMShutdownCalled.get());
+ } finally {
+ nm.stop();
+ }
+ }
+
+ @SuppressWarnings("resource")
@Test(timeout=10000)
public void testNMshutdownWhenResyncThrowException() throws IOException,
InterruptedException, YarnException {
@@ -400,6 +440,11 @@ public class TestNodeManagerResync {
}
@Override
+ protected AsyncDispatcher createNMDispatcher() {
+ return new DrainDispatcher();
+ }
+
+ @Override
protected NodeStatusUpdater createNodeStatusUpdater(Context context,
Dispatcher dispatcher, NodeHealthCheckerService healthChecker) {
return new TestNodeStatusUpdaterImpl1(context, dispatcher,
@@ -410,6 +455,14 @@ public class TestNodeManagerResync {
return registrationCount;
}
+ @Override
+ protected void shutDown(int exitCode) {
+ synchronized (isNMShutdownCalled) {
+ isNMShutdownCalled.set(true);
+ isNMShutdownCalled.notify();
+ }
+ }
+
class TestNodeStatusUpdaterImpl1 extends MockNodeStatusUpdater {
public TestNodeStatusUpdaterImpl1(Context context, Dispatcher dispatcher,
@@ -428,6 +481,9 @@ public class TestNodeManagerResync {
ConcurrentMap<ContainerId, org.apache.hadoop.yarn.server.nodemanager
.containermanager.container.Container> containers =
getNMContext().getContainers();
+ if (resyncThreadCount.incrementAndGet() > 1) {
+ throw new YarnRuntimeException("Multiple resync thread created!");
+ }
try {
try {
if (containersShouldBePreserved) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[10/45] hadoop git commit: Revert "HDDS-194. Remove NodePoolManager
and node pool handling from SCM. Contributed by Elek Marton"
Posted by xy...@apache.org.
Revert "HDDS-194. Remove NodePoolManager and node pool handling from SCM. Contributed by Elek Marton"
This reverts commit aaf03cc459a34af284f9735453aefd4ddb430d67.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d6fe5f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d6fe5f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d6fe5f3
Branch: refs/heads/HDDS-4
Commit: 0d6fe5f36be5b19aab89d995866e526c5feec758
Parents: aaf03cc
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jun 27 13:25:45 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jun 27 13:25:45 2018 -0700
----------------------------------------------------------------------
.../apache/hadoop/hdds/scm/ScmConfigKeys.java | 26 ++
.../org/apache/hadoop/ozone/OzoneConsts.java | 1 +
.../common/src/main/resources/ozone-default.xml | 47 +++
.../container/replication/ReplicationQueue.java | 78 -----
.../replication/ReplicationReqMsg.java | 107 ------
.../container/replication/package-info.java | 23 --
.../replication/TestReplicationQueue.java | 134 --------
.../container/replication/package-info.java | 23 --
.../hdds/scm/container/ContainerMapping.java | 10 +-
.../replication/ContainerSupervisor.java | 340 +++++++++++++++++++
.../container/replication/InProgressPool.java | 255 ++++++++++++++
.../scm/container/replication/PeriodicPool.java | 119 +++++++
.../scm/container/replication/package-info.java | 23 ++
.../hadoop/hdds/scm/node/NodeManager.java | 6 +
.../hadoop/hdds/scm/node/NodePoolManager.java | 71 ++++
.../hadoop/hdds/scm/node/SCMNodeManager.java | 23 ++
.../hdds/scm/node/SCMNodePoolManager.java | 269 +++++++++++++++
.../hdds/scm/container/MockNodeManager.java | 6 +
.../hdds/scm/node/TestSCMNodePoolManager.java | 160 +++++++++
.../testutils/ReplicationNodeManagerMock.java | 5 +
.../ReplicationNodePoolManagerMock.java | 133 ++++++++
.../hadoop/ozone/scm/TestContainerSQLCli.java | 31 ++
.../org/apache/hadoop/ozone/scm/cli/SQLCLI.java | 74 ++++
23 files changed, 1596 insertions(+), 368 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index df6fbf0..85407e6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -243,6 +243,32 @@ public final class ScmConfigKeys {
public static final String
OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s";
+ /**
+ * Don't start processing a pool if we have not had a minimum number of
+ * seconds from the last processing.
+ */
+ public static final String OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL =
+ "ozone.scm.container.report.processing.interval";
+ public static final String
+ OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT = "60s";
+
+ /**
+ * This determines the total number of pools to be processed in parallel.
+ */
+ public static final String OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS =
+ "ozone.scm.max.nodepool.processing.threads";
+ public static final int OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT = 1;
+ /**
+ * These 2 settings control the number of threads in executor pool and time
+ * outs for thw container reports from all nodes.
+ */
+ public static final String OZONE_SCM_MAX_CONTAINER_REPORT_THREADS =
+ "ozone.scm.max.container.report.threads";
+ public static final int OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT = 100;
+ public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT =
+ "ozone.scm.container.reports.wait.timeout";
+ public static final String OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT =
+ "5m";
public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY =
"ozone.scm.block.deletion.max.retry";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 08a5ffd..c40dc8e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -91,6 +91,7 @@ public final class OzoneConsts {
public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
public static final String BLOCK_DB = "block.db";
+ public static final String NODEPOOL_DB = "nodepool.db";
public static final String OPEN_CONTAINERS_DB = "openContainers.db";
public static final String DELETED_BLOCK_DB = "deletedBlock.db";
public static final String KSM_DB_NAME = "ksm.db";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 25365c8..7a91610 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -572,6 +572,25 @@
</description>
</property>
<property>
+ <name>ozone.scm.container.report.processing.interval</name>
+ <value>60s</value>
+ <tag>OZONE, PERFORMANCE</tag>
+ <description>Time interval for scm to process container reports
+ for a node pool. Scm handles node pool reports in a cyclic clock
+ manner, it fetches pools periodically with this time interval.
+ </description>
+ </property>
+ <property>
+ <name>ozone.scm.container.reports.wait.timeout</name>
+ <value>300s</value>
+ <tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
+ <description>Maximum time to wait in seconds for processing all container
+ reports from
+ a node pool. It determines the timeout for a
+ node pool report.
+ </description>
+ </property>
+ <property>
<name>ozone.scm.container.size.gb</name>
<value>5</value>
<tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
@@ -774,6 +793,17 @@
</description>
</property>
<property>
+ <name>ozone.scm.max.container.report.threads</name>
+ <value>100</value>
+ <tag>OZONE, PERFORMANCE</tag>
+ <description>
+ Maximum number of threads to process container reports in scm.
+ Each container report from a data node is processed by scm in a worker
+ thread, fetched from a thread pool. This property is used to control the
+ maximum size of the thread pool.
+ </description>
+ </property>
+ <property>
<name>ozone.scm.max.hb.count.to.process</name>
<value>5000</value>
<tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
@@ -785,6 +815,14 @@
</description>
</property>
<property>
+ <name>ozone.scm.max.nodepool.processing.threads</name>
+ <value>1</value>
+ <tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
+ <description>
+ Number of node pools to process in parallel.
+ </description>
+ </property>
+ <property>
<name>ozone.scm.names</name>
<value/>
<tag>OZONE</tag>
@@ -806,6 +844,15 @@
</description>
</property>
<property>
+ <name>ozone.scm.max.nodepool.processing.threads</name>
+ <value>1</value>
+ <tag>OZONE, SCM</tag>
+ <description>
+ Controls the number of node pools that can be processed in parallel by
+ Container Supervisor.
+ </description>
+ </property>
+ <property>
<name>ozone.trace.enabled</name>
<value>false</value>
<tag>OZONE, DEBUG</tag>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
deleted file mode 100644
index b83ecf1..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.util.List;
-import java.util.PriorityQueue;
-import java.util.Queue;
-
-/**
- * Priority queue to handle under-replicated and over replicated containers
- * in ozone. ReplicationManager will consume these messages and decide
- * accordingly.
- */
-public class ReplicationQueue {
-
- private final Queue<ReplicationReqMsg> queue;
-
- ReplicationQueue() {
- queue = new PriorityQueue<>();
- }
-
- public synchronized boolean add(ReplicationReqMsg repObj) {
- if (this.queue.contains(repObj)) {
- // Remove the earlier message and insert this one
- this.queue.remove(repObj);
- return this.queue.add(repObj);
- } else {
- return this.queue.add(repObj);
- }
- }
-
- public synchronized boolean remove(ReplicationReqMsg repObj) {
- return queue.remove(repObj);
- }
-
- /**
- * Retrieves, but does not remove, the head of this queue,
- * or returns {@code null} if this queue is empty.
- *
- * @return the head of this queue, or {@code null} if this queue is empty
- */
- public synchronized ReplicationReqMsg peek() {
- return queue.peek();
- }
-
- /**
- * Retrieves and removes the head of this queue,
- * or returns {@code null} if this queue is empty.
- *
- * @return the head of this queue, or {@code null} if this queue is empty
- */
- public synchronized ReplicationReqMsg poll() {
- return queue.poll();
- }
-
- public synchronized boolean removeAll(List<ReplicationReqMsg> repObjs) {
- return queue.removeAll(repObjs);
- }
-
- public int size() {
- return queue.size();
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java
deleted file mode 100644
index 8d26fc3..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationReqMsg.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.io.Serializable;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.commons.lang3.math.NumberUtils;
-
-/**
- * Wrapper class for hdds replication queue. Implements its natural
- * ordering for priority queue.
- */
-public class ReplicationReqMsg implements Comparable<ReplicationReqMsg>,
- Serializable {
- private final long containerId;
- private final short replicationCount;
- private final short expecReplicationCount;
- private final long timestamp;
-
- public ReplicationReqMsg(long containerId, short replicationCount,
- long timestamp, short expecReplicationCount) {
- this.containerId = containerId;
- this.replicationCount = replicationCount;
- this.timestamp = timestamp;
- this.expecReplicationCount = expecReplicationCount;
- }
-
- /**
- * Compares this object with the specified object for order. Returns a
- * negative integer, zero, or a positive integer as this object is less
- * than, equal to, or greater than the specified object.
- * @param o the object to be compared.
- * @return a negative integer, zero, or a positive integer as this object
- * is less than, equal to, or greater than the specified object.
- * @throws NullPointerException if the specified object is null
- * @throws ClassCastException if the specified object's type prevents it
- * from being compared to this object.
- */
- @Override
- public int compareTo(ReplicationReqMsg o) {
- if (this == o) {
- return 0;
- }
- if (o == null) {
- return 1;
- }
- int retVal = NumberUtils
- .compare(getReplicationCount() - getExpecReplicationCount(),
- o.getReplicationCount() - o.getExpecReplicationCount());
- if (retVal != 0) {
- return retVal;
- }
- return NumberUtils.compare(getTimestamp(), o.getTimestamp());
- }
-
- @Override
- public int hashCode() {
- return new HashCodeBuilder(91, 1011)
- .append(getContainerId())
- .toHashCode();
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) {
- return true;
- }
- if (o == null || getClass() != o.getClass()) {
- return false;
- }
- ReplicationReqMsg that = (ReplicationReqMsg) o;
- return new EqualsBuilder().append(getContainerId(), that.getContainerId())
- .isEquals();
- }
-
- public long getContainerId() {
- return containerId;
- }
-
- public short getReplicationCount() {
- return replicationCount;
- }
-
- public long getTimestamp() {
- return timestamp;
- }
-
- public short getExpecReplicationCount() {
- return expecReplicationCount;
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
deleted file mode 100644
index 7f335e3..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.replication;
-
-/**
- * Ozone Container replicaton related classes.
- */
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
deleted file mode 100644
index 39c61d3..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.replication;
-
-import java.util.Random;
-import java.util.UUID;
-import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Test class for ReplicationQueue.
- */
-public class TestReplicationQueue {
-
- private ReplicationQueue replicationQueue;
- private Random random;
-
- @Before
- public void setUp() {
- replicationQueue = new ReplicationQueue();
- random = new Random();
- }
-
- @Test
- public void testDuplicateAddOp() {
- long contId = random.nextLong();
- String nodeId = UUID.randomUUID().toString();
- ReplicationReqMsg obj1, obj2, obj3;
- long time = Time.monotonicNow();
- obj1 = new ReplicationReqMsg(contId, (short) 2, time, (short) 3);
- obj2 = new ReplicationReqMsg(contId, (short) 2, time + 1, (short) 3);
- obj3 = new ReplicationReqMsg(contId, (short) 1, time+2, (short) 3);
-
- replicationQueue.add(obj1);
- replicationQueue.add(obj2);
- replicationQueue.add(obj3);
- Assert.assertEquals("Should add only 1 msg as second one is duplicate",
- 1, replicationQueue.size());
- ReplicationReqMsg temp = replicationQueue.poll();
- Assert.assertEquals(temp, obj3);
- }
-
- @Test
- public void testPollOp() {
- long contId = random.nextLong();
- String nodeId = UUID.randomUUID().toString();
- ReplicationReqMsg msg1, msg2, msg3, msg4, msg5;
- msg1 = new ReplicationReqMsg(contId, (short) 1, Time.monotonicNow(),
- (short) 3);
- long time = Time.monotonicNow();
- msg2 = new ReplicationReqMsg(contId + 1, (short) 4, time, (short) 3);
- msg3 = new ReplicationReqMsg(contId + 2, (short) 0, time, (short) 3);
- msg4 = new ReplicationReqMsg(contId, (short) 2, time, (short) 3);
- // Replication message for same container but different nodeId
- msg5 = new ReplicationReqMsg(contId + 1, (short) 2, time, (short) 3);
-
- replicationQueue.add(msg1);
- replicationQueue.add(msg2);
- replicationQueue.add(msg3);
- replicationQueue.add(msg4);
- replicationQueue.add(msg5);
- Assert.assertEquals("Should have 3 objects",
- 3, replicationQueue.size());
-
- // Since Priority queue orders messages according to replication count,
- // message with lowest replication should be first
- ReplicationReqMsg temp;
- temp = replicationQueue.poll();
- Assert.assertEquals("Should have 2 objects",
- 2, replicationQueue.size());
- Assert.assertEquals(temp, msg3);
-
- temp = replicationQueue.poll();
- Assert.assertEquals("Should have 1 objects",
- 1, replicationQueue.size());
- Assert.assertEquals(temp, msg5);
-
- // Message 2 should be ordered before message 5 as both have same replication
- // number but message 2 has earlier timestamp.
- temp = replicationQueue.poll();
- Assert.assertEquals("Should have 0 objects",
- replicationQueue.size(), 0);
- Assert.assertEquals(temp, msg4);
- }
-
- @Test
- public void testRemoveOp() {
- long contId = random.nextLong();
- String nodeId = UUID.randomUUID().toString();
- ReplicationReqMsg obj1, obj2, obj3;
- obj1 = new ReplicationReqMsg(contId, (short) 1, Time.monotonicNow(),
- (short) 3);
- obj2 = new ReplicationReqMsg(contId + 1, (short) 2, Time.monotonicNow(),
- (short) 3);
- obj3 = new ReplicationReqMsg(contId + 2, (short) 3, Time.monotonicNow(),
- (short) 3);
-
- replicationQueue.add(obj1);
- replicationQueue.add(obj2);
- replicationQueue.add(obj3);
- Assert.assertEquals("Should have 3 objects",
- 3, replicationQueue.size());
-
- replicationQueue.remove(obj3);
- Assert.assertEquals("Should have 2 objects",
- 2, replicationQueue.size());
-
- replicationQueue.remove(obj2);
- Assert.assertEquals("Should have 1 objects",
- 1, replicationQueue.size());
-
- replicationQueue.remove(obj1);
- Assert.assertEquals("Should have 0 objects",
- 0, replicationQueue.size());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
deleted file mode 100644
index 5b1fd0f..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * SCM Testing and Mocking Utils.
- */
-package org.apache.hadoop.ozone.container.replication;
-// Test classes for Replication functionality.
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 9fd30f2..b563e90 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
@@ -79,6 +80,7 @@ public class ContainerMapping implements Mapping {
private final PipelineSelector pipelineSelector;
private final ContainerStateManager containerStateManager;
private final LeaseManager<ContainerInfo> containerLeaseManager;
+ private final ContainerSupervisor containerSupervisor;
private final float containerCloseThreshold;
private final ContainerCloser closer;
private final long size;
@@ -125,7 +127,9 @@ public class ContainerMapping implements Mapping {
OZONE_SCM_CONTAINER_SIZE_DEFAULT) * 1024 * 1024 * 1024;
this.containerStateManager =
new ContainerStateManager(conf, this);
-
+ this.containerSupervisor =
+ new ContainerSupervisor(conf, nodeManager,
+ nodeManager.getNodePoolManager());
this.containerCloseThreshold = conf.getFloat(
ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD,
ScmConfigKeys.OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT);
@@ -403,8 +407,8 @@ public class ContainerMapping implements Mapping {
throws IOException {
List<StorageContainerDatanodeProtocolProtos.ContainerInfo>
containerInfos = reports.getReportsList();
-
- for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
+ containerSupervisor.handleContainerReport(datanodeDetails, reports);
+ for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
containerInfos) {
byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID());
lock.lock();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
new file mode 100644
index 0000000..5bd0574
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
@@ -0,0 +1,340 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodePoolManager;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.HadoopExecutors;
+import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.PriorityQueue;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static com.google.common.util.concurrent.Uninterruptibles
+ .sleepUninterruptibly;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT;
+
+/**
+ * This class takes a set of container reports that belong to a pool and then
+ * computes the replication levels for each container.
+ */
+public class ContainerSupervisor implements Closeable {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(ContainerSupervisor.class);
+
+ private final NodePoolManager poolManager;
+ private final HashSet<String> poolNames;
+ private final PriorityQueue<PeriodicPool> poolQueue;
+ private final NodeManager nodeManager;
+ private final long containerProcessingLag;
+ private final AtomicBoolean runnable;
+ private final ExecutorService executorService;
+ private final long maxPoolWait;
+ private long poolProcessCount;
+ private final List<InProgressPool> inProgressPoolList;
+ private final AtomicInteger threadFaultCount;
+ private final int inProgressPoolMaxCount;
+
+ private final ReadWriteLock inProgressPoolListLock;
+
+ /**
+ * Returns the number of times we have processed pools.
+ * @return long
+ */
+ public long getPoolProcessCount() {
+ return poolProcessCount;
+ }
+
+
+ /**
+ * Constructs a class that computes Replication Levels.
+ *
+ * @param conf - OzoneConfiguration
+ * @param nodeManager - Node Manager
+ * @param poolManager - Pool Manager
+ */
+ public ContainerSupervisor(Configuration conf, NodeManager nodeManager,
+ NodePoolManager poolManager) {
+ Preconditions.checkNotNull(poolManager);
+ Preconditions.checkNotNull(nodeManager);
+ this.containerProcessingLag =
+ conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL,
+ OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT,
+ TimeUnit.SECONDS
+ ) * 1000;
+ int maxContainerReportThreads =
+ conf.getInt(OZONE_SCM_MAX_CONTAINER_REPORT_THREADS,
+ OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT
+ );
+ this.maxPoolWait =
+ conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT,
+ OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT,
+ TimeUnit.MILLISECONDS);
+ this.inProgressPoolMaxCount = conf.getInt(
+ OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS,
+ OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT);
+ this.poolManager = poolManager;
+ this.nodeManager = nodeManager;
+ this.poolNames = new HashSet<>();
+ this.poolQueue = new PriorityQueue<>();
+ this.runnable = new AtomicBoolean(true);
+ this.threadFaultCount = new AtomicInteger(0);
+ this.executorService = newCachedThreadPool(
+ new ThreadFactoryBuilder().setDaemon(true)
+ .setNameFormat("Container Reports Processing Thread - %d")
+ .build(), maxContainerReportThreads);
+ this.inProgressPoolList = new LinkedList<>();
+ this.inProgressPoolListLock = new ReentrantReadWriteLock();
+
+ initPoolProcessThread();
+ }
+
+ private ExecutorService newCachedThreadPool(ThreadFactory threadFactory,
+ int maxThreads) {
+ return new HadoopThreadPoolExecutor(0, maxThreads, 60L, TimeUnit.SECONDS,
+ new LinkedBlockingQueue<>(), threadFactory);
+ }
+
+ /**
+ * Returns the number of pools that are under process right now.
+ * @return int - Number of pools that are in process.
+ */
+ public int getInProgressPoolCount() {
+ return inProgressPoolList.size();
+ }
+
+ /**
+ * Exits the background thread.
+ */
+ public void setExit() {
+ this.runnable.set(false);
+ }
+
+ /**
+ * Adds or removes pools from names that we need to process.
+ *
+ * There are two different cases that we need to process.
+ * The case where some pools are being added and some times we have to
+ * handle cases where pools are removed.
+ */
+ private void refreshPools() {
+ List<String> pools = this.poolManager.getNodePools();
+ if (pools != null) {
+
+ HashSet<String> removedPools =
+ computePoolDifference(this.poolNames, new HashSet<>(pools));
+
+ HashSet<String> addedPools =
+ computePoolDifference(new HashSet<>(pools), this.poolNames);
+ // TODO: Support remove pool API in pool manager so that this code
+ // path can be tested. This never happens in the current code base.
+ for (String poolName : removedPools) {
+ for (PeriodicPool periodicPool : poolQueue) {
+ if (periodicPool.getPoolName().compareTo(poolName) == 0) {
+ poolQueue.remove(periodicPool);
+ }
+ }
+ }
+ // Remove the pool names that we have in the list.
+ this.poolNames.removeAll(removedPools);
+
+ for (String poolName : addedPools) {
+ poolQueue.add(new PeriodicPool(poolName));
+ }
+
+ // Add to the pool names we are tracking.
+ poolNames.addAll(addedPools);
+ }
+
+ }
+
+ /**
+ * Handle the case where pools are added.
+ *
+ * @param newPools - New Pools list
+ * @param oldPool - oldPool List.
+ */
+ private HashSet<String> computePoolDifference(HashSet<String> newPools,
+ Set<String> oldPool) {
+ Preconditions.checkNotNull(newPools);
+ Preconditions.checkNotNull(oldPool);
+ HashSet<String> newSet = new HashSet<>(newPools);
+ newSet.removeAll(oldPool);
+ return newSet;
+ }
+
+ private void initPoolProcessThread() {
+
+ /*
+ * Task that runs to check if we need to start a pool processing job.
+ * if so we create a pool reconciliation job and find out of all the
+ * expected containers are on the nodes.
+ */
+ Runnable processPools = () -> {
+ while (runnable.get()) {
+ // Make sure that we don't have any new pools.
+ refreshPools();
+ while (inProgressPoolList.size() < inProgressPoolMaxCount) {
+ PeriodicPool pool = poolQueue.poll();
+ if (pool != null) {
+ if (pool.getLastProcessedTime() + this.containerProcessingLag >
+ Time.monotonicNow()) {
+ LOG.debug("Not within the time window for processing: {}",
+ pool.getPoolName());
+ // we might over sleep here, not a big deal.
+ sleepUninterruptibly(this.containerProcessingLag,
+ TimeUnit.MILLISECONDS);
+ }
+ LOG.debug("Adding pool {} to container processing queue",
+ pool.getPoolName());
+ InProgressPool inProgressPool = new InProgressPool(maxPoolWait,
+ pool, this.nodeManager, this.poolManager, this.executorService);
+ inProgressPool.startReconciliation();
+ inProgressPoolListLock.writeLock().lock();
+ try {
+ inProgressPoolList.add(inProgressPool);
+ } finally {
+ inProgressPoolListLock.writeLock().unlock();
+ }
+ poolProcessCount++;
+ } else {
+ break;
+ }
+ }
+ sleepUninterruptibly(this.maxPoolWait, TimeUnit.MILLISECONDS);
+ inProgressPoolListLock.readLock().lock();
+ try {
+ for (InProgressPool inProgressPool : inProgressPoolList) {
+ inProgressPool.finalizeReconciliation();
+ poolQueue.add(inProgressPool.getPool());
+ }
+ } finally {
+ inProgressPoolListLock.readLock().unlock();
+ }
+ inProgressPoolListLock.writeLock().lock();
+ try {
+ inProgressPoolList.clear();
+ } finally {
+ inProgressPoolListLock.writeLock().unlock();
+ }
+ }
+ };
+
+ // We will have only one thread for pool processing.
+ Thread poolProcessThread = new Thread(processPools);
+ poolProcessThread.setDaemon(true);
+ poolProcessThread.setName("Pool replica thread");
+ poolProcessThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> {
+ // Let us just restart this thread after logging a critical error.
+ // if this thread is not running we cannot handle commands from SCM.
+ LOG.error("Critical Error : Pool replica thread encountered an " +
+ "error. Thread: {} Error Count : {}", t.toString(), e,
+ threadFaultCount.incrementAndGet());
+ poolProcessThread.start();
+ // TODO : Add a config to restrict how many times we will restart this
+ // thread in a single session.
+ });
+ poolProcessThread.start();
+ }
+
+ /**
+ * Adds a container report to appropriate inProgress Pool.
+ * @param containerReport -- Container report for a specific container from
+ * a datanode.
+ */
+ public void handleContainerReport(DatanodeDetails datanodeDetails,
+ ContainerReportsProto containerReport) {
+ inProgressPoolListLock.readLock().lock();
+ try {
+ String poolName = poolManager.getNodePool(datanodeDetails);
+ for (InProgressPool ppool : inProgressPoolList) {
+ if (ppool.getPoolName().equalsIgnoreCase(poolName)) {
+ ppool.handleContainerReport(datanodeDetails, containerReport);
+ return;
+ }
+ }
+ // TODO: Decide if we can do anything else with this report.
+ LOG.debug("Discarding the container report for pool {}. " +
+ "That pool is not currently in the pool reconciliation process." +
+ " Container Name: {}", poolName, datanodeDetails);
+ } catch (SCMException e) {
+ LOG.warn("Skipping processing container report from datanode {}, "
+ + "cause: failed to get the corresponding node pool",
+ datanodeDetails.toString(), e);
+ } finally {
+ inProgressPoolListLock.readLock().unlock();
+ }
+ }
+
+ /**
+ * Get in process pool list, used for testing.
+ * @return List of InProgressPool
+ */
+ @VisibleForTesting
+ public List<InProgressPool> getInProcessPoolList() {
+ return inProgressPoolList;
+ }
+
+ /**
+ * Shutdown the Container Replication Manager.
+ * @throws IOException if an I/O error occurs
+ */
+ @Override
+ public void close() throws IOException {
+ setExit();
+ HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
new file mode 100644
index 0000000..4b54731
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodePoolManager;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerInfo;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+/**
+ * These are pools that are actively checking for replication status of the
+ * containers.
+ */
+public final class InProgressPool {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(InProgressPool.class);
+
+ private final PeriodicPool pool;
+ private final NodeManager nodeManager;
+ private final NodePoolManager poolManager;
+ private final ExecutorService executorService;
+ private final Map<Long, Integer> containerCountMap;
+ private final Map<UUID, Boolean> processedNodeSet;
+ private final long startTime;
+ private ProgressStatus status;
+ private AtomicInteger nodeCount;
+ private AtomicInteger nodeProcessed;
+ private AtomicInteger containerProcessedCount;
+ private long maxWaitTime;
+ /**
+ * Constructs an pool that is being processed.
+ * @param maxWaitTime - Maximum wait time in milliseconds.
+ * @param pool - Pool that we are working against
+ * @param nodeManager - Nodemanager
+ * @param poolManager - pool manager
+ * @param executorService - Shared Executor service.
+ */
+ InProgressPool(long maxWaitTime, PeriodicPool pool,
+ NodeManager nodeManager, NodePoolManager poolManager,
+ ExecutorService executorService) {
+ Preconditions.checkNotNull(pool);
+ Preconditions.checkNotNull(nodeManager);
+ Preconditions.checkNotNull(poolManager);
+ Preconditions.checkNotNull(executorService);
+ Preconditions.checkArgument(maxWaitTime > 0);
+ this.pool = pool;
+ this.nodeManager = nodeManager;
+ this.poolManager = poolManager;
+ this.executorService = executorService;
+ this.containerCountMap = new ConcurrentHashMap<>();
+ this.processedNodeSet = new ConcurrentHashMap<>();
+ this.maxWaitTime = maxWaitTime;
+ startTime = Time.monotonicNow();
+ }
+
+ /**
+ * Returns periodic pool.
+ *
+ * @return PeriodicPool
+ */
+ public PeriodicPool getPool() {
+ return pool;
+ }
+
+ /**
+ * We are done if we have got reports from all nodes or we have
+ * done waiting for the specified time.
+ *
+ * @return true if we are done, false otherwise.
+ */
+ public boolean isDone() {
+ return (nodeCount.get() == nodeProcessed.get()) ||
+ (this.startTime + this.maxWaitTime) > Time.monotonicNow();
+ }
+
+ /**
+ * Gets the number of containers processed.
+ *
+ * @return int
+ */
+ public int getContainerProcessedCount() {
+ return containerProcessedCount.get();
+ }
+
+ /**
+ * Returns the start time in milliseconds.
+ *
+ * @return - Start Time.
+ */
+ public long getStartTime() {
+ return startTime;
+ }
+
+ /**
+ * Get the number of nodes in this pool.
+ *
+ * @return - node count
+ */
+ public int getNodeCount() {
+ return nodeCount.get();
+ }
+
+ /**
+ * Get the number of nodes that we have already processed container reports
+ * from.
+ *
+ * @return - Processed count.
+ */
+ public int getNodeProcessed() {
+ return nodeProcessed.get();
+ }
+
+ /**
+ * Returns the current status.
+ *
+ * @return Status
+ */
+ public ProgressStatus getStatus() {
+ return status;
+ }
+
+ /**
+ * Starts the reconciliation process for all the nodes in the pool.
+ */
+ public void startReconciliation() {
+ List<DatanodeDetails> datanodeDetailsList =
+ this.poolManager.getNodes(pool.getPoolName());
+ if (datanodeDetailsList.size() == 0) {
+ LOG.error("Datanode list for {} is Empty. Pool with no nodes ? ",
+ pool.getPoolName());
+ this.status = ProgressStatus.Error;
+ return;
+ }
+
+ nodeProcessed = new AtomicInteger(0);
+ containerProcessedCount = new AtomicInteger(0);
+ nodeCount = new AtomicInteger(0);
+ this.status = ProgressStatus.InProgress;
+ this.getPool().setLastProcessedTime(Time.monotonicNow());
+ }
+
+ /**
+ * Queues a container Report for handling. This is done in a worker thread
+ * since decoding a container report might be compute intensive . We don't
+ * want to block since we have asked for bunch of container reports
+ * from a set of datanodes.
+ *
+ * @param containerReport - ContainerReport
+ */
+ public void handleContainerReport(DatanodeDetails datanodeDetails,
+ ContainerReportsProto containerReport) {
+ if (status == ProgressStatus.InProgress) {
+ executorService.submit(processContainerReport(datanodeDetails,
+ containerReport));
+ } else {
+ LOG.debug("Cannot handle container report when the pool is in {} status.",
+ status);
+ }
+ }
+
+ private Runnable processContainerReport(DatanodeDetails datanodeDetails,
+ ContainerReportsProto reports) {
+ return () -> {
+ if (processedNodeSet.computeIfAbsent(datanodeDetails.getUuid(),
+ (k) -> true)) {
+ nodeProcessed.incrementAndGet();
+ LOG.debug("Total Nodes processed : {} Node Name: {} ", nodeProcessed,
+ datanodeDetails.getUuid());
+ for (ContainerInfo info : reports.getReportsList()) {
+ containerProcessedCount.incrementAndGet();
+ LOG.debug("Total Containers processed: {} Container Name: {}",
+ containerProcessedCount.get(), info.getContainerID());
+
+ // Update the container map with count + 1 if the key exists or
+ // update the map with 1. Since this is a concurrentMap the
+ // computation and update is atomic.
+ containerCountMap.merge(info.getContainerID(), 1, Integer::sum);
+ }
+ }
+ };
+ }
+
+ /**
+ * Filter the containers based on specific rules.
+ *
+ * @param predicate -- Predicate to filter by
+ * @return A list of map entries.
+ */
+ public List<Map.Entry<Long, Integer>> filterContainer(
+ Predicate<Map.Entry<Long, Integer>> predicate) {
+ return containerCountMap.entrySet().stream()
+ .filter(predicate).collect(Collectors.toList());
+ }
+
+ /**
+ * Used only for testing, calling this will abort container report
+ * processing. This is very dangerous call and should not be made by any users
+ */
+ @VisibleForTesting
+ public void setDoneProcessing() {
+ nodeProcessed.set(nodeCount.get());
+ }
+
+ /**
+ * Returns the pool name.
+ *
+ * @return Name of the pool.
+ */
+ String getPoolName() {
+ return pool.getPoolName();
+ }
+
+ public void finalizeReconciliation() {
+ status = ProgressStatus.Done;
+ //TODO: Add finalizing logic. This is where actual reconciliation happens.
+ }
+
+ /**
+ * Current status of the computing replication status.
+ */
+ public enum ProgressStatus {
+ InProgress, Done, Error
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
new file mode 100644
index 0000000..ef28aa7
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Periodic pool is a pool with a time stamp, this allows us to process pools
+ * based on a cyclic clock.
+ */
+public class PeriodicPool implements Comparable<PeriodicPool> {
+ private final String poolName;
+ private long lastProcessedTime;
+ private AtomicLong totalProcessedCount;
+
+ /**
+ * Constructs a periodic pool.
+ *
+ * @param poolName - Name of the pool
+ */
+ public PeriodicPool(String poolName) {
+ this.poolName = poolName;
+ lastProcessedTime = 0;
+ totalProcessedCount = new AtomicLong(0);
+ }
+
+ /**
+ * Get pool Name.
+ * @return PoolName
+ */
+ public String getPoolName() {
+ return poolName;
+ }
+
+ /**
+ * Compares this object with the specified object for order. Returns a
+ * negative integer, zero, or a positive integer as this object is less
+ * than, equal to, or greater than the specified object.
+ *
+ * @param o the object to be compared.
+ * @return a negative integer, zero, or a positive integer as this object is
+ * less than, equal to, or greater than the specified object.
+ * @throws NullPointerException if the specified object is null
+ * @throws ClassCastException if the specified object's type prevents it
+ * from being compared to this object.
+ */
+ @Override
+ public int compareTo(PeriodicPool o) {
+ return Long.compare(this.lastProcessedTime, o.lastProcessedTime);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ PeriodicPool that = (PeriodicPool) o;
+
+ return poolName.equals(that.poolName);
+ }
+
+ @Override
+ public int hashCode() {
+ return poolName.hashCode();
+ }
+
+ /**
+ * Returns the Total Times we have processed this pool.
+ *
+ * @return processed count.
+ */
+ public long getTotalProcessedCount() {
+ return totalProcessedCount.get();
+ }
+
+ /**
+ * Gets the last time we processed this pool.
+ * @return time in milliseconds
+ */
+ public long getLastProcessedTime() {
+ return this.lastProcessedTime;
+ }
+
+
+ /**
+ * Sets the last processed time.
+ *
+ * @param lastProcessedTime - Long in milliseconds.
+ */
+
+ public void setLastProcessedTime(long lastProcessedTime) {
+ this.lastProcessedTime = lastProcessedTime;
+ }
+
+ /*
+ * Increments the total processed count.
+ */
+ public void incTotalProcessedCount() {
+ this.totalProcessedCount.incrementAndGet();
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
new file mode 100644
index 0000000..7bbe2ef
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+/*
+ This package contains routines that manage replication of a container. This
+ relies on container reports to understand the replication level of a
+ container - UnderReplicated, Replicated, OverReplicated -- and manages the
+ replication level based on that.
+ */
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 72d7e94..4392633 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -124,6 +124,12 @@ public interface NodeManager extends StorageContainerNodeProtocol,
SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails);
/**
+ * Returns the NodePoolManager associated with the NodeManager.
+ * @return NodePoolManager
+ */
+ NodePoolManager getNodePoolManager();
+
+ /**
* Wait for the heartbeat is processed by NodeManager.
* @return true if heartbeat has been processed.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
new file mode 100644
index 0000000..46faf9ca
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Interface that defines SCM NodePoolManager.
+ */
+public interface NodePoolManager extends Closeable {
+
+ /**
+ * Add a node to a node pool.
+ * @param pool - name of the node pool.
+ * @param node - data node.
+ */
+ void addNode(String pool, DatanodeDetails node) throws IOException;
+
+ /**
+ * Remove a node from a node pool.
+ * @param pool - name of the node pool.
+ * @param node - data node.
+ * @throws SCMException
+ */
+ void removeNode(String pool, DatanodeDetails node)
+ throws SCMException;
+
+ /**
+ * Get a list of known node pools.
+ * @return a list of known node pool names or an empty list if not node pool
+ * is defined.
+ */
+ List<String> getNodePools();
+
+ /**
+ * Get all nodes of a node pool given the name of the node pool.
+ * @param pool - name of the node pool.
+ * @return a list of datanode ids or an empty list if the node pool was not
+ * found.
+ */
+ List<DatanodeDetails> getNodes(String pool);
+
+ /**
+ * Get the node pool name if the node has been added to a node pool.
+ * @param datanodeDetails - datanode ID.
+ * @return node pool name if it has been assigned.
+ * null if the node has not been assigned to any node pool yet.
+ */
+ String getNodePool(DatanodeDetails datanodeDetails) throws SCMException;
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index adca8ea..fc8b013 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
+import com.google.protobuf.GeneratedMessage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -158,6 +159,7 @@ public class SCMNodeManager
private ObjectName nmInfoBean;
// Node pool manager.
+ private final SCMNodePoolManager nodePoolManager;
private final StorageContainerManager scmManager;
public static final Event<CommandForDatanode> DATANODE_COMMAND =
@@ -208,6 +210,7 @@ public class SCMNodeManager
registerMXBean();
+ this.nodePoolManager = new SCMNodePoolManager(conf);
this.scmManager = scmManager;
}
@@ -679,6 +682,7 @@ public class SCMNodeManager
@Override
public void close() throws IOException {
unregisterMXBean();
+ nodePoolManager.close();
executorService.shutdown();
try {
if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
@@ -756,6 +760,20 @@ public class SCMNodeManager
LOG.info("Leaving startup chill mode.");
}
+ // TODO: define node pool policy for non-default node pool.
+ // For now, all nodes are added to the "DefaultNodePool" upon registration
+ // if it has not been added to any node pool yet.
+ try {
+ if (nodePoolManager.getNodePool(datanodeDetails) == null) {
+ nodePoolManager.addNode(SCMNodePoolManager.DEFAULT_NODEPOOL,
+ datanodeDetails);
+ }
+ } catch (IOException e) {
+ // TODO: make sure registration failure is handled correctly.
+ return RegisteredCommand.newBuilder()
+ .setErrorCode(ErrorCode.errorNodeNotPermitted)
+ .build();
+ }
// Updating Node Report, as registration is successful
updateNodeStat(datanodeDetails.getUuid(), nodeReport);
LOG.info("Data node with ID: {} Registered.",
@@ -842,6 +860,11 @@ public class SCMNodeManager
}
@Override
+ public NodePoolManager getNodePoolManager() {
+ return nodePoolManager;
+ }
+
+ @Override
public Map<String, Integer> getNodeCount() {
Map<String, Integer> nodeCountMap = new HashMap<String, Integer>();
for(NodeState state : NodeState.values()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
new file mode 100644
index 0000000..faf330e
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
@@ -0,0 +1,269 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.utils.MetadataStore;
+import org.apache.hadoop.utils.MetadataStoreBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_DB_CACHE_SIZE_MB;
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
+ .FAILED_TO_FIND_NODE_IN_POOL;
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
+ .FAILED_TO_LOAD_NODEPOOL;
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
+
+/**
+ * SCM node pool manager that manges node pools.
+ */
+public final class SCMNodePoolManager implements NodePoolManager {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SCMNodePoolManager.class);
+ private static final List<DatanodeDetails> EMPTY_NODE_LIST =
+ new ArrayList<>();
+ private static final List<String> EMPTY_NODEPOOL_LIST = new ArrayList<>();
+ public static final String DEFAULT_NODEPOOL = "DefaultNodePool";
+
+ // DB that saves the node to node pool mapping.
+ private MetadataStore nodePoolStore;
+
+ // In-memory node pool to nodes mapping
+ private HashMap<String, Set<DatanodeDetails>> nodePools;
+
+ // Read-write lock for nodepool operations
+ private ReadWriteLock lock;
+
+ /**
+ * Construct SCMNodePoolManager class that manages node to node pool mapping.
+ * @param conf - configuration.
+ * @throws IOException
+ */
+ public SCMNodePoolManager(final OzoneConfiguration conf)
+ throws IOException {
+ final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
+ OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
+ File metaDir = getOzoneMetaDirPath(conf);
+ String scmMetaDataDir = metaDir.getPath();
+ File nodePoolDBPath = new File(scmMetaDataDir, NODEPOOL_DB);
+ nodePoolStore = MetadataStoreBuilder.newBuilder()
+ .setConf(conf)
+ .setDbFile(nodePoolDBPath)
+ .setCacheSize(cacheSize * OzoneConsts.MB)
+ .build();
+ nodePools = new HashMap<>();
+ lock = new ReentrantReadWriteLock();
+ init();
+ }
+
+ /**
+ * Initialize the in-memory store based on persist store from level db.
+ * No lock is needed as init() is only invoked by constructor.
+ * @throws SCMException
+ */
+ private void init() throws SCMException {
+ try {
+ nodePoolStore.iterate(null, (key, value) -> {
+ try {
+ DatanodeDetails nodeId = DatanodeDetails.getFromProtoBuf(
+ HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(key));
+ String poolName = DFSUtil.bytes2String(value);
+
+ Set<DatanodeDetails> nodePool = null;
+ if (nodePools.containsKey(poolName)) {
+ nodePool = nodePools.get(poolName);
+ } else {
+ nodePool = new HashSet<>();
+ nodePools.put(poolName, nodePool);
+ }
+ nodePool.add(nodeId);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Adding node: {} to node pool: {}",
+ nodeId, poolName);
+ }
+ } catch (IOException e) {
+ LOG.warn("Can't add a datanode to node pool, continue next...");
+ }
+ return true;
+ });
+ } catch (IOException e) {
+ LOG.error("Loading node pool error " + e);
+ throw new SCMException("Failed to load node pool",
+ FAILED_TO_LOAD_NODEPOOL);
+ }
+ }
+
+ /**
+ * Add a datanode to a node pool.
+ * @param pool - name of the node pool.
+ * @param node - name of the datanode.
+ */
+ @Override
+ public void addNode(final String pool, final DatanodeDetails node)
+ throws IOException {
+ Preconditions.checkNotNull(pool, "pool name is null");
+ Preconditions.checkNotNull(node, "node is null");
+ lock.writeLock().lock();
+ try {
+ // add to the persistent store
+ nodePoolStore.put(node.getProtoBufMessage().toByteArray(),
+ DFSUtil.string2Bytes(pool));
+
+ // add to the in-memory store
+ Set<DatanodeDetails> nodePool = null;
+ if (nodePools.containsKey(pool)) {
+ nodePool = nodePools.get(pool);
+ } else {
+ nodePool = new HashSet<DatanodeDetails>();
+ nodePools.put(pool, nodePool);
+ }
+ nodePool.add(node);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
+ /**
+ * Remove a datanode from a node pool.
+ * @param pool - name of the node pool.
+ * @param node - datanode id.
+ * @throws SCMException
+ */
+ @Override
+ public void removeNode(final String pool, final DatanodeDetails node)
+ throws SCMException {
+ Preconditions.checkNotNull(pool, "pool name is null");
+ Preconditions.checkNotNull(node, "node is null");
+ lock.writeLock().lock();
+ try {
+ // Remove from the persistent store
+ byte[] kName = node.getProtoBufMessage().toByteArray();
+ byte[] kData = nodePoolStore.get(kName);
+ if (kData == null) {
+ throw new SCMException(String.format("Unable to find node %s from" +
+ " pool %s in DB.", DFSUtil.bytes2String(kName), pool),
+ FAILED_TO_FIND_NODE_IN_POOL);
+ }
+ nodePoolStore.delete(kName);
+
+ // Remove from the in-memory store
+ if (nodePools.containsKey(pool)) {
+ Set<DatanodeDetails> nodePool = nodePools.get(pool);
+ nodePool.remove(node);
+ } else {
+ throw new SCMException(String.format("Unable to find node %s from" +
+ " pool %s in MAP.", DFSUtil.bytes2String(kName), pool),
+ FAILED_TO_FIND_NODE_IN_POOL);
+ }
+ } catch (IOException e) {
+ throw new SCMException("Failed to remove node " + node.toString()
+ + " from node pool " + pool, e,
+ SCMException.ResultCodes.IO_EXCEPTION);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
+ /**
+ * Get all the node pools.
+ * @return all the node pools.
+ */
+ @Override
+ public List<String> getNodePools() {
+ lock.readLock().lock();
+ try {
+ if (!nodePools.isEmpty()) {
+ return nodePools.keySet().stream().collect(Collectors.toList());
+ } else {
+ return EMPTY_NODEPOOL_LIST;
+ }
+ } finally {
+ lock.readLock().unlock();
+ }
+ }
+
+ /**
+ * Get all datanodes of a specific node pool.
+ * @param pool - name of the node pool.
+ * @return all datanodes of the specified node pool.
+ */
+ @Override
+ public List<DatanodeDetails> getNodes(final String pool) {
+ Preconditions.checkNotNull(pool, "pool name is null");
+ if (nodePools.containsKey(pool)) {
+ return nodePools.get(pool).stream().collect(Collectors.toList());
+ } else {
+ return EMPTY_NODE_LIST;
+ }
+ }
+
+ /**
+ * Get the node pool name if the node has been added to a node pool.
+ * @param datanodeDetails - datanode ID.
+ * @return node pool name if it has been assigned.
+ * null if the node has not been assigned to any node pool yet.
+ * TODO: Put this in a in-memory map if performance is an issue.
+ */
+ @Override
+ public String getNodePool(final DatanodeDetails datanodeDetails)
+ throws SCMException {
+ Preconditions.checkNotNull(datanodeDetails, "node is null");
+ try {
+ byte[] result = nodePoolStore.get(
+ datanodeDetails.getProtoBufMessage().toByteArray());
+ return result == null ? null : DFSUtil.bytes2String(result);
+ } catch (IOException e) {
+ throw new SCMException("Failed to get node pool for node "
+ + datanodeDetails.toString(), e,
+ SCMException.ResultCodes.IO_EXCEPTION);
+ }
+ }
+
+ /**
+ * Close node pool level db store.
+ * @throws IOException
+ */
+ @Override
+ public void close() throws IOException {
+ nodePoolStore.close();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 80b5d6e..8c59462 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.container;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodePoolManager;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -272,6 +273,11 @@ public class MockNodeManager implements NodeManager {
return new SCMNodeMetric(nodeMetricMap.get(datanodeDetails.getUuid()));
}
+ @Override
+ public NodePoolManager getNodePoolManager() {
+ return Mockito.mock(NodePoolManager.class);
+ }
+
/**
* Used for testing.
*
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java
new file mode 100644
index 0000000..8f412de
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java
@@ -0,0 +1,160 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.commons.collections.ListUtils;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+ .ContainerPlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms
+ .SCMContainerPlacementCapacity;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.test.PathUtils;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for SCM node pool manager.
+ */
+public class TestSCMNodePoolManager {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestSCMNodePoolManager.class);
+
+ @Rule
+ public ExpectedException thrown = ExpectedException.none();
+
+ private final File testDir = PathUtils.getTestDir(
+ TestSCMNodePoolManager.class);
+
+ SCMNodePoolManager createNodePoolManager(OzoneConfiguration conf)
+ throws IOException {
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
+ testDir.getAbsolutePath());
+ conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
+ SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
+ return new SCMNodePoolManager(conf);
+ }
+
+ /**
+ * Test default node pool.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testDefaultNodePool() throws IOException {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ try {
+ final String defaultPool = "DefaultPool";
+ NodePoolManager npMgr = createNodePoolManager(conf);
+
+ final int nodeCount = 4;
+ final List<DatanodeDetails> nodes = TestUtils
+ .getListOfDatanodeDetails(nodeCount);
+ assertEquals(0, npMgr.getNodePools().size());
+ for (DatanodeDetails node: nodes) {
+ npMgr.addNode(defaultPool, node);
+ }
+ List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
+ assertEquals(nodeCount, nodesRetrieved.size());
+ assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
+
+ DatanodeDetails nodeRemoved = nodes.remove(2);
+ npMgr.removeNode(defaultPool, nodeRemoved);
+ List<DatanodeDetails> nodesAfterRemove = npMgr.getNodes(defaultPool);
+ assertTwoDatanodeListsEqual(nodes, nodesAfterRemove);
+
+ List<DatanodeDetails> nonExistSet = npMgr.getNodes("NonExistSet");
+ assertEquals(0, nonExistSet.size());
+ } finally {
+ FileUtil.fullyDelete(testDir);
+ }
+ }
+
+
+ /**
+ * Test default node pool reload.
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testDefaultNodePoolReload() throws IOException {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ final String defaultPool = "DefaultPool";
+ final int nodeCount = 4;
+ final List<DatanodeDetails> nodes = TestUtils
+ .getListOfDatanodeDetails(nodeCount);
+
+ try {
+ try {
+ SCMNodePoolManager npMgr = createNodePoolManager(conf);
+ assertEquals(0, npMgr.getNodePools().size());
+ for (DatanodeDetails node : nodes) {
+ npMgr.addNode(defaultPool, node);
+ }
+ List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
+ assertEquals(nodeCount, nodesRetrieved.size());
+ assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
+ npMgr.close();
+ } finally {
+ LOG.info("testDefaultNodePoolReload: Finish adding nodes to pool" +
+ " and close.");
+ }
+
+ // try reload with a new NodePoolManager instance
+ try {
+ SCMNodePoolManager npMgr = createNodePoolManager(conf);
+ List<DatanodeDetails> nodesRetrieved = npMgr.getNodes(defaultPool);
+ assertEquals(nodeCount, nodesRetrieved.size());
+ assertTwoDatanodeListsEqual(nodes, nodesRetrieved);
+ } finally {
+ LOG.info("testDefaultNodePoolReload: Finish reloading node pool.");
+ }
+ } finally {
+ FileUtil.fullyDelete(testDir);
+ }
+ }
+
+ /**
+ * Compare and verify that two datanode lists are equal.
+ * @param list1 - datanode list 1.
+ * @param list2 - datanode list 2.
+ */
+ private void assertTwoDatanodeListsEqual(List<DatanodeDetails> list1,
+ List<DatanodeDetails> list2) {
+ assertEquals(list1.size(), list2.size());
+ Collections.sort(list1);
+ Collections.sort(list2);
+ assertTrue(ListUtils.isEqualList(list1, list2));
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 1a4dcd7..072d821 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.node.CommandQueue;
import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodePoolManager;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -200,6 +201,10 @@ public class ReplicationNodeManagerMock implements NodeManager {
return null;
}
+ @Override
+ public NodePoolManager getNodePoolManager() {
+ return Mockito.mock(NodePoolManager.class);
+ }
/**
* Wait for the heartbeat is processed by NodeManager.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
new file mode 100644
index 0000000..ffcd752
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.testutils;
+
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodePoolManager;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Pool Manager replication mock.
+ */
+public class ReplicationNodePoolManagerMock implements NodePoolManager {
+
+ private final Map<DatanodeDetails, String> nodeMemberShip;
+
+ /**
+ * A node pool manager for testing.
+ */
+ public ReplicationNodePoolManagerMock() {
+ nodeMemberShip = new HashMap<>();
+ }
+
+ /**
+ * Add a node to a node pool.
+ *
+ * @param pool - name of the node pool.
+ * @param node - data node.
+ */
+ @Override
+ public void addNode(String pool, DatanodeDetails node) {
+ nodeMemberShip.put(node, pool);
+ }
+
+ /**
+ * Remove a node from a node pool.
+ *
+ * @param pool - name of the node pool.
+ * @param node - data node.
+ * @throws SCMException
+ */
+ @Override
+ public void removeNode(String pool, DatanodeDetails node)
+ throws SCMException {
+ nodeMemberShip.remove(node);
+
+ }
+
+ /**
+ * Get a list of known node pools.
+ *
+ * @return a list of known node pool names or an empty list if not node pool
+ * is defined.
+ */
+ @Override
+ public List<String> getNodePools() {
+ Set<String> poolSet = new HashSet<>();
+ for (Map.Entry<DatanodeDetails, String> entry : nodeMemberShip.entrySet()) {
+ poolSet.add(entry.getValue());
+ }
+ return new ArrayList<>(poolSet);
+
+ }
+
+ /**
+ * Get all nodes of a node pool given the name of the node pool.
+ *
+ * @param pool - name of the node pool.
+ * @return a list of datanode ids or an empty list if the node pool was not
+ * found.
+ */
+ @Override
+ public List<DatanodeDetails> getNodes(String pool) {
+ Set<DatanodeDetails> datanodeSet = new HashSet<>();
+ for (Map.Entry<DatanodeDetails, String> entry : nodeMemberShip.entrySet()) {
+ if (entry.getValue().equals(pool)) {
+ datanodeSet.add(entry.getKey());
+ }
+ }
+ return new ArrayList<>(datanodeSet);
+ }
+
+ /**
+ * Get the node pool name if the node has been added to a node pool.
+ *
+ * @param datanodeDetails DatanodeDetails.
+ * @return node pool name if it has been assigned. null if the node has not
+ * been assigned to any node pool yet.
+ */
+ @Override
+ public String getNodePool(DatanodeDetails datanodeDetails) {
+ return nodeMemberShip.get(datanodeDetails);
+ }
+
+ /**
+ * Closes this stream and releases any system resources associated
+ * with it. If the stream is already closed then invoking this
+ * method has no effect.
+ * <p>
+ * <p> As noted in {@link AutoCloseable#close()}, cases where the
+ * close may fail require careful attention. It is strongly advised
+ * to relinquish the underlying resources and to internally
+ * <em>mark</em> the {@code Closeable} as closed, prior to throwing
+ * the {@code IOException}.
+ *
+ * @throws IOException if an I/O error occurs
+ */
+ @Override
+ public void close() throws IOException {
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index b4ed2b1..4d70af8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -51,9 +51,12 @@ import java.util.Collection;
import java.util.HashMap;
import java.util.UUID;
+import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
import static org.apache.hadoop.ozone.OzoneConsts.KB;
+import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
/**
* This class tests the CLI that transforms container into SQLite DB files.
@@ -174,6 +177,34 @@ public class TestContainerSQLCli {
}
@Test
+ public void testConvertNodepoolDB() throws Exception {
+ String dbOutPath = GenericTestUtils.getTempPath(
+ UUID.randomUUID() + "/out_sql.db");
+ String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
+ String dbPath = dbRootPath + "/" + NODEPOOL_DB;
+ String[] args = {"-p", dbPath, "-o", dbOutPath};
+
+ cli.run(args);
+
+ // verify the sqlite db
+ HashMap<String, String> expectedPool = new HashMap<>();
+ for (DatanodeDetails dnid : nodeManager.getAllNodes()) {
+ expectedPool.put(dnid.getUuidString(), "DefaultNodePool");
+ }
+ Connection conn = connectDB(dbOutPath);
+ String sql = "SELECT * FROM nodePool";
+ ResultSet rs = executeQuery(conn, sql);
+ while(rs.next()) {
+ String datanodeUUID = rs.getString("datanodeUUID");
+ String poolName = rs.getString("poolName");
+ assertTrue(expectedPool.remove(datanodeUUID).equals(poolName));
+ }
+ assertEquals(0, expectedPool.size());
+
+ Files.delete(Paths.get(dbOutPath));
+ }
+
+ @Test
public void testConvertContainerDB() throws Exception {
String dbOutPath = GenericTestUtils.getTempPath(
UUID.randomUUID() + "/out_sql.db");
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[15/45] hadoop git commit: HDDS-193. Make Datanode heartbeat
dispatcher in SCM event based. Contributed by Elek, Marton.
Posted by xy...@apache.org.
HDDS-193. Make Datanode heartbeat dispatcher in SCM event based.
Contributed by Elek, Marton.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8752a485
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8752a485
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8752a485
Branch: refs/heads/HDDS-4
Commit: 8752a48564028cb5892c19e29d4e5b984d70c076
Parents: 1893271
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Jun 27 14:18:25 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Jun 27 14:18:25 2018 -0700
----------------------------------------------------------------------
.../server/SCMDatanodeHeartbeatDispatcher.java | 126 +++++++++
.../scm/server/SCMDatanodeProtocolServer.java | 14 +-
.../scm/server/StorageContainerManager.java | 4 +-
.../SCMDatanodeContainerReportHandler.java | 76 ------
.../report/SCMDatanodeHeartbeatDispatcher.java | 189 --------------
.../report/SCMDatanodeNodeReportHandler.java | 43 ----
.../server/report/SCMDatanodeReportHandler.java | 83 ------
.../report/SCMDatanodeReportHandlerFactory.java | 82 ------
.../hdds/scm/server/report/package-info.java | 57 -----
.../TestSCMDatanodeHeartbeatDispatcher.java | 119 +++++++++
.../TestSCMDatanodeContainerReportHandler.java | 34 ---
.../TestSCMDatanodeHeartbeatDispatcher.java | 138 ----------
.../TestSCMDatanodeNodeReportHandler.java | 36 ---
.../TestSCMDatanodeReportHandlerFactory.java | 51 ----
.../hdds/scm/server/report/package-info.java | 21 --
.../apache/hadoop/ozone/scm/TestSCMMetrics.java | 253 -------------------
16 files changed, 254 insertions(+), 1072 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
new file mode 100644
index 0000000..36f10a9
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.TypedEvent;
+
+import com.google.protobuf.GeneratedMessage;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class is responsible for dispatching heartbeat from datanode to
+ * appropriate EventHandler at SCM.
+ */
+public final class SCMDatanodeHeartbeatDispatcher {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SCMDatanodeHeartbeatDispatcher.class);
+
+ private EventPublisher eventPublisher;
+
+ public static final TypedEvent<NodeReportFromDatanode> NODE_REPORT =
+ new TypedEvent<>(NodeReportFromDatanode.class);
+
+ public static final TypedEvent<ContainerReportFromDatanode> CONTAINER_REPORT =
+ new TypedEvent<ContainerReportFromDatanode>(ContainerReportFromDatanode.class);
+
+ public SCMDatanodeHeartbeatDispatcher(EventPublisher eventPublisher) {
+ this.eventPublisher = eventPublisher;
+ }
+
+
+ /**
+ * Dispatches heartbeat to registered event handlers.
+ *
+ * @param heartbeat heartbeat to be dispatched.
+ */
+ public void dispatch(SCMHeartbeatRequestProto heartbeat) {
+ DatanodeDetails datanodeDetails =
+ DatanodeDetails.getFromProtoBuf(heartbeat.getDatanodeDetails());
+
+ if (heartbeat.hasNodeReport()) {
+ eventPublisher.fireEvent(NODE_REPORT,
+ new NodeReportFromDatanode(datanodeDetails,
+ heartbeat.getNodeReport()));
+ }
+
+ if (heartbeat.hasContainerReport()) {
+ eventPublisher.fireEvent(CONTAINER_REPORT,
+ new ContainerReportFromDatanode(datanodeDetails,
+ heartbeat.getContainerReport()));
+
+ }
+ }
+
+ /**
+ * Wrapper class for events with the datanode origin.
+ */
+ public static class ReportFromDatanode<T extends GeneratedMessage> {
+
+ private final DatanodeDetails datanodeDetails;
+
+ private final T report;
+
+ public ReportFromDatanode(DatanodeDetails datanodeDetails, T report) {
+ this.datanodeDetails = datanodeDetails;
+ this.report = report;
+ }
+
+ public DatanodeDetails getDatanodeDetails() {
+ return datanodeDetails;
+ }
+
+ public T getReport() {
+ return report;
+ }
+ }
+
+ /**
+ * Node report event payload with origin.
+ */
+ public static class NodeReportFromDatanode
+ extends ReportFromDatanode<NodeReportProto> {
+
+ public NodeReportFromDatanode(DatanodeDetails datanodeDetails,
+ NodeReportProto report) {
+ super(datanodeDetails, report);
+ }
+ }
+
+ /**
+ * Container report event payload with origin.
+ */
+ public static class ContainerReportFromDatanode
+ extends ReportFromDatanode<ContainerReportsProto> {
+
+ public ContainerReportFromDatanode(DatanodeDetails datanodeDetails,
+ ContainerReportsProto report) {
+ super(datanodeDetails, report);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index eb5ce1a..56b0719 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -73,7 +73,7 @@ import static org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.server.report.SCMDatanodeHeartbeatDispatcher;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
@@ -122,14 +122,19 @@ public class SCMDatanodeProtocolServer implements
private final SCMDatanodeHeartbeatDispatcher heartbeatDispatcher;
public SCMDatanodeProtocolServer(final OzoneConfiguration conf,
- StorageContainerManager scm) throws IOException {
+ StorageContainerManager scm, EventPublisher eventPublisher)
+ throws IOException {
Preconditions.checkNotNull(scm, "SCM cannot be null");
+ Preconditions.checkNotNull(eventPublisher, "EventPublisher cannot be null");
+
this.scm = scm;
final int handlerCount =
conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY,
OZONE_SCM_HANDLER_COUNT_DEFAULT);
+ heartbeatDispatcher = new SCMDatanodeHeartbeatDispatcher(eventPublisher);
+
RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
ProtobufRpcEngine.class);
BlockingService dnProtoPbService =
@@ -155,10 +160,6 @@ public class SCMDatanodeProtocolServer implements
conf, OZONE_SCM_DATANODE_ADDRESS_KEY, datanodeRpcAddr,
datanodeRpcServer);
- heartbeatDispatcher = SCMDatanodeHeartbeatDispatcher.newBuilder(conf, scm)
- .addHandlerFor(NodeReportProto.class)
- .addHandlerFor(ContainerReportsProto.class)
- .build();
}
public void start() {
@@ -319,7 +320,6 @@ public class SCMDatanodeProtocolServer implements
try {
LOG.info("Stopping the RPC server for DataNodes");
datanodeRpcServer.stop();
- heartbeatDispatcher.shutdown();
} catch (Exception ex) {
LOG.error(" datanodeRpcServer stop failed.", ex);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 5725d23..568a86a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -52,7 +52,6 @@ import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.common.Storage.StorageState;
import org.apache.hadoop.ozone.common.StorageInfo;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils;
@@ -182,7 +181,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
scmAdminUsernames.add(scmUsername);
}
- datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this);
+ datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this,
+ eventQueue);
blockProtocolServer = new SCMBlockProtocolServer(conf, this);
clientProtocolServer = new SCMClientProtocolServer(conf, this);
httpServer = new StorageContainerManagerHttpServer(conf);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java
deleted file mode 100644
index 00ce94d..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeContainerReportHandler.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Handler for Datanode Container Report.
- */
-public class SCMDatanodeContainerReportHandler extends
- SCMDatanodeReportHandler<ContainerReportsProto> {
-
- private static final Logger LOG = LoggerFactory.getLogger(
- SCMDatanodeContainerReportHandler.class);
-
- @Override
- public void processReport(DatanodeDetails datanodeDetails,
- ContainerReportsProto report) throws IOException {
- LOG.trace("Processing container report from {}.", datanodeDetails);
- updateContainerReportMetrics(datanodeDetails, report);
- getSCM().getScmContainerManager()
- .processContainerReports(datanodeDetails, report);
- }
-
- /**
- * Updates container report metrics in SCM.
- *
- * @param datanodeDetails Datanode Information
- * @param reports Container Reports
- */
- private void updateContainerReportMetrics(DatanodeDetails datanodeDetails,
- ContainerReportsProto reports) {
- ContainerStat newStat = new ContainerStat();
- for (StorageContainerDatanodeProtocolProtos.ContainerInfo info : reports
- .getReportsList()) {
- newStat.add(new ContainerStat(info.getSize(), info.getUsed(),
- info.getKeyCount(), info.getReadBytes(), info.getWriteBytes(),
- info.getReadCount(), info.getWriteCount()));
- }
- // update container metrics
- StorageContainerManager.getMetrics().setLastContainerStat(newStat);
-
- // Update container stat entry, this will trigger a removal operation if it
- // exists in cache.
- String datanodeUuid = datanodeDetails.getUuidString();
- getSCM().getContainerReportCache().put(datanodeUuid, newStat);
- // update global view container metrics
- StorageContainerManager.getMetrics().incrContainerStat(newStat);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java
deleted file mode 100644
index d50edff..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeHeartbeatDispatcher.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-
-/**
- * This class is responsible for dispatching heartbeat from datanode to
- * appropriate ReportHandlers at SCM.
- * Only one handler per report is supported now, it's very easy to support
- * multiple handlers for a report.
- */
-public final class SCMDatanodeHeartbeatDispatcher {
-
- private static final Logger LOG = LoggerFactory.getLogger(
- SCMDatanodeHeartbeatDispatcher.class);
-
- /**
- * This stores Report to Handler mapping.
- */
- private final Map<Class<? extends GeneratedMessage>,
- SCMDatanodeReportHandler<? extends GeneratedMessage>> handlers;
-
- /**
- * Executor service which will be used for processing reports.
- */
- private final ExecutorService executorService;
-
- /**
- * Constructs SCMDatanodeHeartbeatDispatcher instance with the given
- * handlers.
- *
- * @param handlers report to report handler mapping
- */
- private SCMDatanodeHeartbeatDispatcher(Map<Class<? extends GeneratedMessage>,
- SCMDatanodeReportHandler<? extends GeneratedMessage>> handlers) {
- this.handlers = handlers;
- this.executorService = HadoopExecutors.newCachedThreadPool(
- new ThreadFactoryBuilder().setDaemon(true)
- .setNameFormat("SCMDatanode Heartbeat Dispatcher Thread - %d")
- .build());
- }
-
- /**
- * Dispatches heartbeat to registered handlers.
- *
- * @param heartbeat heartbeat to be dispatched.
- */
- public void dispatch(SCMHeartbeatRequestProto heartbeat) {
- DatanodeDetails datanodeDetails = DatanodeDetails
- .getFromProtoBuf(heartbeat.getDatanodeDetails());
- if (heartbeat.hasNodeReport()) {
- processReport(datanodeDetails, heartbeat.getNodeReport());
- }
- if (heartbeat.hasContainerReport()) {
- processReport(datanodeDetails, heartbeat.getContainerReport());
- }
- }
-
- /**
- * Invokes appropriate ReportHandler and submits the task to executor
- * service for processing.
- *
- * @param datanodeDetails Datanode Information
- * @param report Report to be processed
- */
- @SuppressWarnings("unchecked")
- private void processReport(DatanodeDetails datanodeDetails,
- GeneratedMessage report) {
- executorService.submit(() -> {
- try {
- SCMDatanodeReportHandler handler = handlers.get(report.getClass());
- handler.processReport(datanodeDetails, report);
- } catch (IOException ex) {
- LOG.error("Exception wile processing report {}, from {}",
- report.getClass(), datanodeDetails, ex);
- }
- });
- }
-
- /**
- * Shuts down SCMDatanodeHeartbeatDispatcher.
- */
- public void shutdown() {
- executorService.shutdown();
- }
-
- /**
- * Returns a new Builder to construct {@link SCMDatanodeHeartbeatDispatcher}.
- *
- * @param conf Configuration to be used by SCMDatanodeHeartbeatDispatcher
- * @param scm {@link StorageContainerManager} instance to be used by report
- * handlers
- *
- * @return {@link SCMDatanodeHeartbeatDispatcher.Builder} instance
- */
- public static Builder newBuilder(Configuration conf,
- StorageContainerManager scm) {
- return new Builder(conf, scm);
- }
-
- /**
- * Builder for SCMDatanodeHeartbeatDispatcher.
- */
- public static class Builder {
-
- private final SCMDatanodeReportHandlerFactory reportHandlerFactory;
- private final Map<Class<? extends GeneratedMessage>,
- SCMDatanodeReportHandler<? extends GeneratedMessage>> report2handler;
-
- /**
- * Constructs SCMDatanodeHeartbeatDispatcher.Builder instance.
- *
- * @param conf Configuration object to be used.
- * @param scm StorageContainerManager instance to be used for report
- * handler initialization.
- */
- private Builder(Configuration conf, StorageContainerManager scm) {
- this.report2handler = new HashMap<>();
- this.reportHandlerFactory =
- new SCMDatanodeReportHandlerFactory(conf, scm);
- }
-
- /**
- * Adds new report handler for the given report.
- *
- * @param report Report for which handler has to be added
- *
- * @return Builder
- */
- public Builder addHandlerFor(Class<? extends GeneratedMessage> report) {
- report2handler.put(report, reportHandlerFactory.getHandlerFor(report));
- return this;
- }
-
- /**
- * Associates the given report handler for the given report.
- *
- * @param report Report to be associated with
- * @param handler Handler to be used for the report
- *
- * @return Builder
- */
- public Builder addHandler(Class<? extends GeneratedMessage> report,
- SCMDatanodeReportHandler<? extends GeneratedMessage> handler) {
- report2handler.put(report, handler);
- return this;
- }
-
- /**
- * Builds and returns {@link SCMDatanodeHeartbeatDispatcher} instance.
- *
- * @return SCMDatanodeHeartbeatDispatcher
- */
- public SCMDatanodeHeartbeatDispatcher build() {
- return new SCMDatanodeHeartbeatDispatcher(report2handler);
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java
deleted file mode 100644
index fb89b02..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeNodeReportHandler.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Handles Datanode Node Report.
- */
-public class SCMDatanodeNodeReportHandler extends
- SCMDatanodeReportHandler<NodeReportProto> {
-
- private static final Logger LOG = LoggerFactory.getLogger(
- SCMDatanodeNodeReportHandler.class);
-
- @Override
- public void processReport(DatanodeDetails datanodeDetails,
- NodeReportProto report) throws IOException {
- LOG.debug("Processing node report from {}.", datanodeDetails);
- //TODO: add logic to process node report.
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java
deleted file mode 100644
index d338649..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandler.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-
-import java.io.IOException;
-
-/**
- * Datanode Report handlers should implement this interface in order to get
- * call back whenever the report is received from datanode.
- *
- * @param <T> Type of report the handler is interested in.
- */
-public abstract class SCMDatanodeReportHandler<T extends GeneratedMessage>
- implements Configurable {
-
- private Configuration config;
- private StorageContainerManager scm;
-
- /**
- * Initializes SCMDatanodeReportHandler and associates it with the given
- * StorageContainerManager instance.
- *
- * @param storageContainerManager StorageContainerManager instance to be
- * associated with.
- */
- public void init(StorageContainerManager storageContainerManager) {
- this.scm = storageContainerManager;
- }
-
- /**
- * Returns the associated StorageContainerManager instance. This will be
- * used by the ReportHandler implementations.
- *
- * @return {@link StorageContainerManager}
- */
- protected StorageContainerManager getSCM() {
- return scm;
- }
-
- @Override
- public void setConf(Configuration conf) {
- this.config = conf;
- }
-
- @Override
- public Configuration getConf() {
- return config;
- }
-
- /**
- * Processes the report received from datanode. Each ReportHandler
- * implementation is responsible for providing the logic to process the
- * report it's interested in.
- *
- * @param datanodeDetails Datanode Information
- * @param report Report to be processed
- *
- * @throws IOException In case of any exception
- */
- abstract void processReport(DatanodeDetails datanodeDetails, T report)
- throws IOException;
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java
deleted file mode 100644
index e88495f..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/SCMDatanodeReportHandlerFactory.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import java.util.HashMap;
-import java.util.Map;
-
-
-/**
- * Factory class to construct {@link SCMDatanodeReportHandler} given a report.
- */
-public class SCMDatanodeReportHandlerFactory {
-
- private final Configuration conf;
- private final StorageContainerManager scm;
- private final Map<Class<? extends GeneratedMessage>,
- Class<? extends SCMDatanodeReportHandler<? extends GeneratedMessage>>>
- report2handler;
-
- /**
- * Constructs {@link SCMDatanodeReportHandler} instance.
- *
- * @param conf Configuration to be passed to the
- * {@link SCMDatanodeReportHandler}
- */
- public SCMDatanodeReportHandlerFactory(Configuration conf,
- StorageContainerManager scm) {
- this.conf = conf;
- this.scm = scm;
- this.report2handler = new HashMap<>();
-
- report2handler.put(NodeReportProto.class,
- SCMDatanodeNodeReportHandler.class);
- report2handler.put(ContainerReportsProto.class,
- SCMDatanodeContainerReportHandler.class);
- }
-
- /**
- * Returns the SCMDatanodeReportHandler for the corresponding report.
- *
- * @param report report
- *
- * @return report handler
- */
- public SCMDatanodeReportHandler<? extends GeneratedMessage> getHandlerFor(
- Class<? extends GeneratedMessage> report) {
- Class<? extends SCMDatanodeReportHandler<? extends GeneratedMessage>>
- handlerClass = report2handler.get(report);
- if (handlerClass == null) {
- throw new RuntimeException("No handler found for report " + report);
- }
- SCMDatanodeReportHandler<? extends GeneratedMessage> instance =
- ReflectionUtils.newInstance(handlerClass, conf);
- instance.init(scm);
- return instance;
- }
-
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java
deleted file mode 100644
index fda3993..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/report/package-info.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.server.report;
-/**
- * Handling of all the datanode reports in SCM which are received through
- * heartbeat is done here.
- *
- * SCM Datanode Report Processing State Diagram:
- *
- * SCMDatanode SCMDatanodeHeartbeat SCMDatanodeReport
- * ProtocolServer Dispatcher Handler
- * | | |
- * | | |
- * | construct | |
- * |----------------------->| |
- * | | |
- * | | register |
- * | |<-----------------------|
- * | | |
- * +------------+------------------------+------------------------+--------+
- * | loop | | | |
- * | | | | |
- * | | | | |
- * | heartbeat | | | |
- * - +----------->| | | |
- * | from | heartbeat | | |
- * | Datanode |----------------------->| | |
- * | | | report | |
- * | | |----------------------->| |
- * | | | | |
- * | DN | | | |
- * <-+------------| | | |
- * | commands | | | |
- * | | | | |
- * +------------+------------------------+------------------------+--------+
- * | | |
- * | | |
- * | shutdown | |
- * |----------------------->| |
- * | | |
- * | | |
- * - - -
- */
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
new file mode 100644
index 0000000..326a34b
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.server;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+ .ContainerReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+ .NodeReportFromDatanode;
+import org.apache.hadoop.hdds.server.events.Event;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * This class tests the behavior of SCMDatanodeHeartbeatDispatcher.
+ */
+public class TestSCMDatanodeHeartbeatDispatcher {
+
+
+ @Test
+ public void testNodeReportDispatcher() throws IOException {
+
+ Configuration conf = new OzoneConfiguration();
+
+ AtomicInteger eventReceived = new AtomicInteger();
+
+ NodeReportProto nodeReport = NodeReportProto.getDefaultInstance();
+
+ SCMDatanodeHeartbeatDispatcher dispatcher =
+ new SCMDatanodeHeartbeatDispatcher(new EventPublisher() {
+ @Override
+ public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
+ EVENT_TYPE event, PAYLOAD payload) {
+ Assert.assertEquals(event,
+ SCMDatanodeHeartbeatDispatcher.NODE_REPORT);
+ eventReceived.incrementAndGet();
+ Assert.assertEquals(nodeReport, ((NodeReportFromDatanode)payload).getReport());
+
+ }
+ });
+
+ DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+
+ SCMHeartbeatRequestProto heartbeat =
+ SCMHeartbeatRequestProto.newBuilder()
+ .setDatanodeDetails(datanodeDetails.getProtoBufMessage())
+ .setNodeReport(nodeReport)
+ .build();
+ dispatcher.dispatch(heartbeat);
+ Assert.assertEquals(1, eventReceived.get());
+
+
+ }
+
+ @Test
+ public void testContainerReportDispatcher() throws IOException {
+
+ Configuration conf = new OzoneConfiguration();
+
+ AtomicInteger eventReceived = new AtomicInteger();
+
+ ContainerReportsProto containerReport =
+ ContainerReportsProto.getDefaultInstance();
+
+ SCMDatanodeHeartbeatDispatcher dispatcher =
+ new SCMDatanodeHeartbeatDispatcher(new EventPublisher() {
+ @Override
+ public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
+ EVENT_TYPE event, PAYLOAD payload) {
+ Assert.assertEquals(event,
+ SCMDatanodeHeartbeatDispatcher.CONTAINER_REPORT);
+ Assert.assertEquals(containerReport, ((ContainerReportFromDatanode)payload).getReport());
+ eventReceived.incrementAndGet();
+ }
+ });
+
+ DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+
+ SCMHeartbeatRequestProto heartbeat =
+ SCMHeartbeatRequestProto.newBuilder()
+ .setDatanodeDetails(datanodeDetails.getProtoBufMessage())
+ .setContainerReport(containerReport)
+ .build();
+ dispatcher.dispatch(heartbeat);
+ Assert.assertEquals(1, eventReceived.get());
+
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java
deleted file mode 100644
index 776ae88..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeContainerReportHandler.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test cases to verify SCMDatanodeContainerReportHandler's behavior.
- */
-public class TestSCMDatanodeContainerReportHandler {
-
- //TODO: add test cases to verify SCMDatanodeContainerReportHandler.
-
- @Test
- public void dummyTest() {
- Assert.assertTrue(true);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java
deleted file mode 100644
index 5d08647..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeHeartbeatDispatcher.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-/**
- * This class tests the behavior of SCMDatanodeHeartbeatDispatcher.
- */
-public class TestSCMDatanodeHeartbeatDispatcher {
-
- @Test
- public void testSCMDatanodeHeartbeatDispatcherBuilder() {
- Configuration conf = new OzoneConfiguration();
- SCMDatanodeHeartbeatDispatcher dispatcher =
- SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null)
- .addHandlerFor(NodeReportProto.class)
- .addHandlerFor(ContainerReportsProto.class)
- .build();
- Assert.assertNotNull(dispatcher);
- }
-
- @Test
- public void testNodeReportDispatcher() throws IOException {
- Configuration conf = new OzoneConfiguration();
- SCMDatanodeNodeReportHandler nodeReportHandler =
- Mockito.mock(SCMDatanodeNodeReportHandler.class);
- SCMDatanodeHeartbeatDispatcher dispatcher =
- SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null)
- .addHandler(NodeReportProto.class, nodeReportHandler)
- .build();
-
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
- NodeReportProto nodeReport = NodeReportProto.getDefaultInstance();
- SCMHeartbeatRequestProto heartbeat =
- SCMHeartbeatRequestProto.newBuilder()
- .setDatanodeDetails(datanodeDetails.getProtoBufMessage())
- .setNodeReport(nodeReport)
- .build();
- dispatcher.dispatch(heartbeat);
- verify(nodeReportHandler,
- times(1))
- .processReport(any(DatanodeDetails.class), eq(nodeReport));
- }
-
- @Test
- public void testContainerReportDispatcher() throws IOException {
- Configuration conf = new OzoneConfiguration();
- SCMDatanodeContainerReportHandler containerReportHandler =
- Mockito.mock(SCMDatanodeContainerReportHandler.class);
- SCMDatanodeHeartbeatDispatcher dispatcher =
- SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null)
- .addHandler(ContainerReportsProto.class, containerReportHandler)
- .build();
-
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
- ContainerReportsProto containerReport =
- ContainerReportsProto.getDefaultInstance();
- SCMHeartbeatRequestProto heartbeat =
- SCMHeartbeatRequestProto.newBuilder()
- .setDatanodeDetails(datanodeDetails.getProtoBufMessage())
- .setContainerReport(containerReport)
- .build();
- dispatcher.dispatch(heartbeat);
- verify(containerReportHandler,
- times(1))
- .processReport(any(DatanodeDetails.class),
- any(ContainerReportsProto.class));
- }
-
- @Test
- public void testNodeAndContainerReportDispatcher() throws IOException {
- Configuration conf = new OzoneConfiguration();
- SCMDatanodeNodeReportHandler nodeReportHandler =
- Mockito.mock(SCMDatanodeNodeReportHandler.class);
- SCMDatanodeContainerReportHandler containerReportHandler =
- Mockito.mock(SCMDatanodeContainerReportHandler.class);
- SCMDatanodeHeartbeatDispatcher dispatcher =
- SCMDatanodeHeartbeatDispatcher.newBuilder(conf, null)
- .addHandler(NodeReportProto.class, nodeReportHandler)
- .addHandler(ContainerReportsProto.class, containerReportHandler)
- .build();
-
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
- NodeReportProto nodeReport = NodeReportProto.getDefaultInstance();
- ContainerReportsProto containerReport =
- ContainerReportsProto.getDefaultInstance();
- SCMHeartbeatRequestProto heartbeat =
- SCMHeartbeatRequestProto.newBuilder()
- .setDatanodeDetails(datanodeDetails.getProtoBufMessage())
- .setNodeReport(nodeReport)
- .setContainerReport(containerReport)
- .build();
- dispatcher.dispatch(heartbeat);
- verify(nodeReportHandler,
- times(1))
- .processReport(any(DatanodeDetails.class), any(NodeReportProto.class));
- verify(containerReportHandler,
- times(1))
- .processReport(any(DatanodeDetails.class),
- any(ContainerReportsProto.class));
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java
deleted file mode 100644
index 30a753c..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeNodeReportHandler.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test cases to verify TestSCMDatanodeNodeReportHandler's behavior.
- */
-public class TestSCMDatanodeNodeReportHandler {
-
-
- //TODO: add test cases to verify SCMDatanodeNodeReportHandler.
-
- @Test
- public void dummyTest() {
- Assert.assertTrue(true);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java
deleted file mode 100644
index 4b918f7..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/TestSCMDatanodeReportHandlerFactory.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test cases to verify the functionality of SCMDatanodeReportHandlerFactory.
- */
-public class TestSCMDatanodeReportHandlerFactory {
-
- @Test
- public void testNodeReportHandlerConstruction() {
- Configuration conf = new OzoneConfiguration();
- SCMDatanodeReportHandlerFactory factory =
- new SCMDatanodeReportHandlerFactory(conf, null);
- Assert.assertTrue(factory.getHandlerFor(NodeReportProto.class)
- instanceof SCMDatanodeNodeReportHandler);
- }
-
- @Test
- public void testContainerReporttHandlerConstruction() {
- Configuration conf = new OzoneConfiguration();
- SCMDatanodeReportHandlerFactory factory =
- new SCMDatanodeReportHandlerFactory(conf, null);
- Assert.assertTrue(factory.getHandlerFor(ContainerReportsProto.class)
- instanceof SCMDatanodeContainerReportHandler);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java
deleted file mode 100644
index 4a3f59f..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/report/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.report;
-/**
- * Contains test-cases to test Datanode report handlers in SCM.
- */
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8752a485/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
deleted file mode 100644
index ecddf8e..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getLongGauge;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.server.report
- .SCMDatanodeContainerReportHandler;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerReport;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
-import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-/**
- * This class tests the metrics of Storage Container Manager.
- */
-public class TestSCMMetrics {
- /**
- * Set the timeout for each test.
- */
- @Rule
- public Timeout testTimeout = new Timeout(90000);
-
- private static MiniOzoneCluster cluster = null;
-
- @Test
- public void testContainerMetrics() throws Exception {
- int nodeCount = 2;
- int numReport = 2;
- long size = OzoneConsts.GB * 5;
- long used = OzoneConsts.GB * 2;
- long readBytes = OzoneConsts.GB * 1;
- long writeBytes = OzoneConsts.GB * 2;
- int keyCount = 1000;
- int readCount = 100;
- int writeCount = 50;
- OzoneConfiguration conf = new OzoneConfiguration();
-
- try {
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(nodeCount).build();
- cluster.waitForClusterToBeReady();
-
- ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes,
- writeBytes, readCount, writeCount);
- StorageContainerManager scmManager = cluster.getStorageContainerManager();
- DatanodeDetails fstDatanodeDetails = TestUtils.getDatanodeDetails();
- ContainerReportsProto request = createContainerReport(numReport, stat);
- String fstDatanodeUuid = fstDatanodeDetails.getUuidString();
- SCMDatanodeContainerReportHandler containerReportHandler =
- new SCMDatanodeContainerReportHandler();
- containerReportHandler.setConf(conf);
- containerReportHandler.init(scmManager);
- containerReportHandler.processReport(
- fstDatanodeDetails, request);
-
- // verify container stat metrics
- MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
- assertEquals(size * numReport,
- getLongGauge("LastContainerReportSize", scmMetrics));
- assertEquals(used * numReport,
- getLongGauge("LastContainerReportUsed", scmMetrics));
- assertEquals(readBytes * numReport,
- getLongGauge("LastContainerReportReadBytes", scmMetrics));
- assertEquals(writeBytes * numReport,
- getLongGauge("LastContainerReportWriteBytes", scmMetrics));
-
- assertEquals(keyCount * numReport,
- getLongGauge("LastContainerReportKeyCount", scmMetrics));
- assertEquals(readCount * numReport,
- getLongGauge("LastContainerReportReadCount", scmMetrics));
- assertEquals(writeCount * numReport,
- getLongGauge("LastContainerReportWriteCount", scmMetrics));
-
- // add one new report
- DatanodeDetails sndDatanodeDetails = TestUtils.getDatanodeDetails();
- request = createContainerReport(1, stat);
- String sndDatanodeUuid = sndDatanodeDetails.getUuidString();
- containerReportHandler.processReport(
- sndDatanodeDetails, request);
-
- scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
- assertEquals(size * (numReport + 1),
- getLongCounter("ContainerReportSize", scmMetrics));
- assertEquals(used * (numReport + 1),
- getLongCounter("ContainerReportUsed", scmMetrics));
- assertEquals(readBytes * (numReport + 1),
- getLongCounter("ContainerReportReadBytes", scmMetrics));
- assertEquals(writeBytes * (numReport + 1),
- getLongCounter("ContainerReportWriteBytes", scmMetrics));
-
- assertEquals(keyCount * (numReport + 1),
- getLongCounter("ContainerReportKeyCount", scmMetrics));
- assertEquals(readCount * (numReport + 1),
- getLongCounter("ContainerReportReadCount", scmMetrics));
- assertEquals(writeCount * (numReport + 1),
- getLongCounter("ContainerReportWriteCount", scmMetrics));
-
- // Re-send reports but with different value for validating
- // the aggregation.
- stat = new ContainerStat(100, 50, 3, 50, 60, 5, 6);
- containerReportHandler.processReport(
- fstDatanodeDetails, createContainerReport(1, stat));
-
- stat = new ContainerStat(1, 1, 1, 1, 1, 1, 1);
- containerReportHandler.processReport(
- sndDatanodeDetails, createContainerReport(1, stat));
-
- // the global container metrics value should be updated
- scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
- assertEquals(101, getLongCounter("ContainerReportSize", scmMetrics));
- assertEquals(51, getLongCounter("ContainerReportUsed", scmMetrics));
- assertEquals(51, getLongCounter("ContainerReportReadBytes", scmMetrics));
- assertEquals(61, getLongCounter("ContainerReportWriteBytes", scmMetrics));
-
- assertEquals(4, getLongCounter("ContainerReportKeyCount", scmMetrics));
- assertEquals(6, getLongCounter("ContainerReportReadCount", scmMetrics));
- assertEquals(7, getLongCounter("ContainerReportWriteCount", scmMetrics));
- } finally {
- if (cluster != null) {
- cluster.shutdown();
- }
- }
- }
-
- @Test
- public void testStaleNodeContainerReport() throws Exception {
- int nodeCount = 2;
- int numReport = 2;
- long size = OzoneConsts.GB * 5;
- long used = OzoneConsts.GB * 2;
- long readBytes = OzoneConsts.GB * 1;
- long writeBytes = OzoneConsts.GB * 2;
- int keyCount = 1000;
- int readCount = 100;
- int writeCount = 50;
- OzoneConfiguration conf = new OzoneConfiguration();
-
- try {
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(nodeCount).build();
- cluster.waitForClusterToBeReady();
-
- ContainerStat stat = new ContainerStat(size, used, keyCount, readBytes,
- writeBytes, readCount, writeCount);
- StorageContainerManager scmManager = cluster.getStorageContainerManager();
-
- DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
- .getDatanodeDetails();
- SCMDatanodeContainerReportHandler containerReportHandler =
- new SCMDatanodeContainerReportHandler();
- containerReportHandler.setConf(conf);
- containerReportHandler.init(scmManager);
- ContainerReportsProto request = createContainerReport(numReport, stat);
- containerReportHandler.processReport(
- datanodeDetails, request);
-
- MetricsRecordBuilder scmMetrics = getMetrics(SCMMetrics.SOURCE_NAME);
- assertEquals(size * numReport,
- getLongCounter("ContainerReportSize", scmMetrics));
- assertEquals(used * numReport,
- getLongCounter("ContainerReportUsed", scmMetrics));
- assertEquals(readBytes * numReport,
- getLongCounter("ContainerReportReadBytes", scmMetrics));
- assertEquals(writeBytes * numReport,
- getLongCounter("ContainerReportWriteBytes", scmMetrics));
-
- assertEquals(keyCount * numReport,
- getLongCounter("ContainerReportKeyCount", scmMetrics));
- assertEquals(readCount * numReport,
- getLongCounter("ContainerReportReadCount", scmMetrics));
- assertEquals(writeCount * numReport,
- getLongCounter("ContainerReportWriteCount", scmMetrics));
-
- // reset stale interval time to move node from healthy to stale
- SCMNodeManager nodeManager = (SCMNodeManager) cluster
- .getStorageContainerManager().getScmNodeManager();
- nodeManager.setStaleNodeIntervalMs(100);
-
- // verify the metrics when node becomes stale
- GenericTestUtils.waitFor(() -> {
- MetricsRecordBuilder metrics = getMetrics(SCMMetrics.SOURCE_NAME);
- return 0 == getLongCounter("ContainerReportSize", metrics)
- && 0 == getLongCounter("ContainerReportUsed", metrics)
- && 0 == getLongCounter("ContainerReportReadBytes", metrics)
- && 0 == getLongCounter("ContainerReportWriteBytes", metrics)
- && 0 == getLongCounter("ContainerReportKeyCount", metrics)
- && 0 == getLongCounter("ContainerReportReadCount", metrics)
- && 0 == getLongCounter("ContainerReportWriteCount", metrics);
- }, 1000, 60000);
- } finally {
- if (cluster != null) {
- cluster.shutdown();
- }
- }
- }
-
- private ContainerReportsProto createContainerReport(int numReport,
- ContainerStat stat) {
- StorageContainerDatanodeProtocolProtos.ContainerReportsProto.Builder
- reportsBuilder = StorageContainerDatanodeProtocolProtos
- .ContainerReportsProto.newBuilder();
-
- for (int i = 0; i < numReport; i++) {
- ContainerReport report = new ContainerReport(
- RandomUtils.nextLong(), DigestUtils.sha256Hex("Simulated"));
- report.setSize(stat.getSize().get());
- report.setBytesUsed(stat.getUsed().get());
- report.setReadCount(stat.getReadCount().get());
- report.setReadBytes(stat.getReadBytes().get());
- report.setKeyCount(stat.getKeyCount().get());
- report.setWriteCount(stat.getWriteCount().get());
- report.setWriteBytes(stat.getWriteBytes().get());
- reportsBuilder.addReports(report.getProtoBufMessage());
- }
- return reportsBuilder.build();
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[04/45] hadoop git commit: YARN-8423. GPU does not get released even
though the application gets killed. (Sunil G via wangda)
Posted by xy...@apache.org.
YARN-8423. GPU does not get released even though the application gets killed. (Sunil G via wangda)
Change-Id: I570db7d60f8c6c21762dd618a9207d1107c486a0
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ada8f63d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ada8f63d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ada8f63d
Branch: refs/heads/HDDS-4
Commit: ada8f63d0b3739d245300461387b0516dc92ccf9
Parents: 62d83ca
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Jun 26 19:25:57 2018 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue Jun 26 19:25:57 2018 -0700
----------------------------------------------------------------------
.../containermanager/container/Container.java | 6 ++
.../container/ContainerImpl.java | 11 ++++
.../resources/gpu/GpuResourceAllocator.java | 68 +++++++++++++++++++-
.../resources/gpu/GpuResourceHandlerImpl.java | 1 -
.../nodemanager/webapp/MockContainer.java | 3 +
5 files changed, 85 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ada8f63d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
index 5d48d84..4912d02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/Container.java
@@ -113,4 +113,10 @@ public interface Container extends EventHandler<ContainerEvent> {
ResourceMappings getResourceMappings();
void sendPauseEvent(String description);
+
+ /**
+ * Verify container is in final states.
+ * @return true/false based on container's state
+ */
+ boolean isContainerInFinalStates();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ada8f63d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 0541544..f76e682 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -2223,4 +2223,15 @@ public class ContainerImpl implements Container {
SlidingWindowRetryPolicy getRetryPolicy() {
return retryPolicy;
}
+
+ @Override
+ public boolean isContainerInFinalStates() {
+ ContainerState state = getContainerState();
+ return state == ContainerState.KILLING || state == ContainerState.DONE
+ || state == ContainerState.LOCALIZATION_FAILED
+ || state == ContainerState.CONTAINER_RESOURCES_CLEANINGUP
+ || state == ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL
+ || state == ContainerState.EXITED_WITH_FAILURE
+ || state == ContainerState.EXITED_WITH_SUCCESS;
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ada8f63d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
index 5bdffc3..81a9655 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
@@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
@@ -36,10 +37,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin
import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
-import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -54,6 +53,7 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
*/
public class GpuResourceAllocator {
final static Log LOG = LogFactory.getLog(GpuResourceAllocator.class);
+ private static final int WAIT_MS_PER_LOOP = 1000;
private Set<GpuDevice> allowedGpuDevices = new TreeSet<>();
private Map<GpuDevice, ContainerId> usedDevices = new TreeMap<>();
@@ -168,7 +168,44 @@ public class GpuResourceAllocator {
* @return allocation results.
* @throws ResourceHandlerException When failed to assign GPUs.
*/
- public synchronized GpuAllocation assignGpus(Container container)
+ public GpuAllocation assignGpus(Container container)
+ throws ResourceHandlerException {
+ GpuAllocation allocation = internalAssignGpus(container);
+
+ // Wait for a maximum of 120 seconds if no available GPU are there which
+ // are yet to be released.
+ final int timeoutMsecs = 120 * WAIT_MS_PER_LOOP;
+ int timeWaiting = 0;
+ while (allocation == null) {
+ if (timeWaiting >= timeoutMsecs) {
+ break;
+ }
+
+ // Sleep for 1 sec to ensure there are some free GPU devices which are
+ // getting released.
+ try {
+ LOG.info("Container : " + container.getContainerId()
+ + " is waiting for free GPU devices.");
+ Thread.sleep(WAIT_MS_PER_LOOP);
+ timeWaiting += WAIT_MS_PER_LOOP;
+ allocation = internalAssignGpus(container);
+ } catch (InterruptedException e) {
+ // On any interrupt, break the loop and continue execution.
+ break;
+ }
+ }
+
+ if(allocation == null) {
+ String message = "Could not get valid GPU device for container '" +
+ container.getContainerId()
+ + "' as some other containers might not releasing GPUs.";
+ LOG.warn(message);
+ throw new ResourceHandlerException(message);
+ }
+ return allocation;
+ }
+
+ private synchronized GpuAllocation internalAssignGpus(Container container)
throws ResourceHandlerException {
Resource requestedResource = container.getResource();
ContainerId containerId = container.getContainerId();
@@ -176,6 +213,14 @@ public class GpuResourceAllocator {
// Assign Gpus to container if requested some.
if (numRequestedGpuDevices > 0) {
if (numRequestedGpuDevices > getAvailableGpus()) {
+ // If there are some devices which are getting released, wait for few
+ // seconds to get it.
+ if (numRequestedGpuDevices <= getReleasingGpus() + getAvailableGpus()) {
+ return null;
+ }
+ }
+
+ if (numRequestedGpuDevices > getAvailableGpus()) {
throw new ResourceHandlerException(
getResourceHandlerExceptionMessage(numRequestedGpuDevices,
containerId));
@@ -211,6 +256,23 @@ public class GpuResourceAllocator {
return new GpuAllocation(null, allowedGpuDevices);
}
+ private synchronized long getReleasingGpus() {
+ long releasingGpus = 0;
+ Iterator<Map.Entry<GpuDevice, ContainerId>> iter = usedDevices.entrySet()
+ .iterator();
+ while (iter.hasNext()) {
+ ContainerId containerId = iter.next().getValue();
+ Container container;
+ if ((container = nmContext.getContainers().get(containerId)) != null) {
+ if (container.isContainerInFinalStates()) {
+ releasingGpus = releasingGpus + container.getResource()
+ .getResourceInformation(ResourceInformation.GPU_URI).getValue();
+ }
+ }
+ }
+ return releasingGpus;
+ }
+
/**
* Clean up all Gpus assigned to containerId
* @param containerId containerId
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ada8f63d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
index 587fcb4..1184382 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceHandlerImpl.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu;
-import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ada8f63d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
index 325709b..67dfef2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/MockContainer.java
@@ -255,4 +255,7 @@ public class MockContainer implements Container {
public void sendPauseEvent(String description) {
}
+ @Override public boolean isContainerInFinalStates() {
+ return false;
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[27/45] hadoop git commit: YARN-8469. [UI2] URL needs to be trimmed
to handle index.html redirection while accessing via knox. Contributed by
Sunil Govindan.
Posted by xy...@apache.org.
YARN-8469. [UI2] URL needs to be trimmed to handle index.html redirection while accessing via knox. Contributed by Sunil Govindan.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a820738e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a820738e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a820738e
Branch: refs/heads/HDDS-4
Commit: a820738e347854aed7cdaa7758d9c0830df624e3
Parents: 469b29c
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Fri Jun 29 10:21:25 2018 -0700
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Fri Jun 29 10:21:25 2018 -0700
----------------------------------------------------------------------
.../src/main/webapp/app/initializers/loader.js | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a820738e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 6642869..96c8fcf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -205,5 +205,13 @@ export default {
const skipTrailingSlash = function(path) {
path = path.replace('ui2/', '');
- return path.replace(/\/$/, '');
+ path = path.replace(/\/$/, '');
+ console.log('base url:' + path)
+ if(path.includes("redirect")) {
+ var to = path.lastIndexOf('/');
+ to = to == -1 ? path.length : to + 1;
+ path = path.substring(0, to);
+ console.log('base url after redirect:' + path)
+ }
+ return path;
};
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[16/45] hadoop git commit: HADOOP-15495. Upgrade commons-lang version
to 3.7 in hadoop-common-project and hadoop-tools. Contributed by Takanobu
Asanuma.
Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 0ab8696..116827d 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -60,7 +60,7 @@ import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
index 49981ed..f0ffb44 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
@@ -23,7 +23,7 @@ import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
index ac10e08..527697f 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
@@ -39,7 +39,7 @@ import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
index bfac975..9e2f34d 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.fs.s3native;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -31,7 +31,7 @@ import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.util.Objects;
-import static org.apache.commons.lang.StringUtils.equalsIgnoreCase;
+import static org.apache.commons.lang3.StringUtils.equalsIgnoreCase;
/**
* Class to aid logging in to S3 endpoints.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
index f1799ac..aa6b5d8 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AConfiguration.java
@@ -22,8 +22,8 @@ import com.amazonaws.ClientConfiguration;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.S3ClientOptions;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.reflect.FieldUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.reflect.FieldUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java
index 50c9fb5..a8a78f6 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKey.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.fs.s3a;
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java
index 4c953bd..c170830 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AEncryptionSSEKMSUserDefinedKeyBlockOutputStream.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.fs.s3a;
import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
index d259bf1..869997b 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.fs.s3a;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java
index b8610d6..90e8894 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/AbstractCommitITest.java
@@ -28,7 +28,7 @@ import org.junit.Assert;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 3e149a6..79e8a69 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -42,7 +42,7 @@ import com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java
index 12c2e3f..a68e6ac 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java
+++ b/hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java
@@ -25,7 +25,7 @@ import java.net.URISyntaxException;
import com.microsoft.azure.datalake.store.oauth2.DeviceCodeTokenProvider;
import com.microsoft.azure.datalake.store.oauth2.MsiTokenProvider;
-import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.adl.common.CustomMockTokenProvider;
import org.apache.hadoop.fs.adl.oauth2.AzureADTokenProvider;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 9396a51..197ab22 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -41,7 +41,7 @@ import java.util.Locale;
import java.util.Map;
import java.util.Set;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java
index 9a85308..5f051ef 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/BlockBlobAppendStream.java
@@ -44,7 +44,7 @@ import java.util.concurrent.atomic.AtomicReference;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.commons.codec.binary.Base64;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java
index aa7ac2e..850e552 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/ClientThrottlingAnalyzer.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.fs.azure;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index dfc881a..5202762 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -48,7 +48,7 @@ import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectReader;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
index 68ddcdf..6e98755 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java
@@ -36,7 +36,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
index ea77510..76ced3b 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
@@ -23,7 +23,7 @@ import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.azure.security.Constants;
import org.apache.hadoop.io.retry.RetryPolicy;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
index a0204be..f4ec172 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SecureWasbRemoteCallHelper.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.fs.azure;
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.Validate;
import org.apache.hadoop.fs.azure.security.Constants;
import org.apache.hadoop.fs.azure.security.SpnegoToken;
import org.apache.hadoop.fs.azure.security.WasbDelegationTokenIdentifier;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java
index 0aa9393..f54a2e1 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestBlobDataValidation.java
@@ -29,7 +29,7 @@ import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.util.Arrays;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.azure.AzureNativeFileSystemStore.TestHookOperationContext;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java
index 29611bf..4e88b45 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemAppend.java
@@ -22,7 +22,7 @@ import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
index d5f6437..1739cff 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/MockStorageInterface.java
@@ -36,7 +36,7 @@ import java.util.TimeZone;
import java.util.List;
import org.apache.commons.codec.DecoderException;
import org.apache.commons.codec.net.URLCodec;
-import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.lang3.NotImplementedException;
import org.apache.http.client.utils.URIBuilder;
import com.microsoft.azure.storage.AccessCondition;
@@ -339,7 +339,7 @@ public class MockStorageInterface extends StorageInterface {
@Override
public StorageUri getStorageUri() {
- throw new NotImplementedException();
+ throw new NotImplementedException("Code is not implemented");
}
}
@@ -590,20 +590,20 @@ public class MockStorageInterface extends StorageInterface {
@Override
public void create(long length, BlobRequestOptions options,
OperationContext opContext) throws StorageException {
- throw new NotImplementedException();
+ throw new NotImplementedException("Code is not implemented");
}
@Override
public void uploadPages(InputStream sourceStream, long offset, long length,
BlobRequestOptions options, OperationContext opContext)
throws StorageException, IOException {
- throw new NotImplementedException();
+ throw new NotImplementedException("Code is not implemented");
}
@Override
public ArrayList<PageRange> downloadPageRanges(BlobRequestOptions options,
OperationContext opContext) throws StorageException {
- throw new NotImplementedException();
+ throw new NotImplementedException("Code is not implemented");
}
@Override
@@ -622,7 +622,7 @@ public class MockStorageInterface extends StorageInterface {
@Override
public StorageUri getStorageUri() {
- throw new NotImplementedException();
+ throw new NotImplementedException("Code is not implemented");
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java
index 820ce4f..b8cf5ba 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemBlockCompaction.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.fs.azure;
import com.microsoft.azure.storage.blob.BlockEntry;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
index 2fbbcd1..8d2a104 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/integration/AzureTestUtils.java
@@ -28,7 +28,7 @@ import org.junit.internal.AssumptionViolatedException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index ea99016..9db0eb5 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.tools;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
index 668b594..e49feb5 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
@@ -28,7 +28,7 @@ import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
index faa4aa2..c486bdb 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
@@ -22,7 +22,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
index 9568171..cf6da25 100644
--- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
+++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ClusterSummarizer.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.mapred.gridmix;
-import org.apache.commons.lang.time.FastDateFormat;
+import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
index 8f9d434..973838a 100644
--- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
+++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/ExecutionSummarizer.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.mapred.gridmix;
import java.io.IOException;
-import org.apache.commons.lang.time.FastDateFormat;
+import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -76,7 +76,7 @@ class ExecutionSummarizer implements StatListener<JobStats> {
startTime = System.currentTimeMillis();
// flatten the args string and store it
commandLineArgs =
- org.apache.commons.lang.StringUtils.join(args, ' ');
+ org.apache.commons.lang3.StringUtils.join(args, ' ');
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
index 4271742..73662bf 100644
--- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
+++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/JobFactory.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.mapred.gridmix;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
index 877d434..494b9a1 100644
--- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
+++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/RandomTextDataGenerator.java
@@ -21,7 +21,7 @@ import java.util.Arrays;
import java.util.List;
import java.util.Random;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java
index afafd55..2accbac 100644
--- a/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java
+++ b/hadoop-tools/hadoop-resourceestimator/src/main/java/org/apache/hadoop/resourceestimator/translator/impl/BaseLogParser.java
@@ -30,7 +30,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.commons.lang.CharSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.resourceestimator.common.api.RecurrenceId;
import org.apache.hadoop.resourceestimator.common.api.ResourceSkyline;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java
index 5856626..e6d09dc 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordListAnonymizerUtility.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.tools.rumen.anonymization;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
/**
* Utility class to handle commonly performed tasks in a
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
index 1c92caf..02fd48a 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/datatypes/util/MapReduceJobPropertiesParser.java
@@ -25,7 +25,7 @@ import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.MRJobConfig;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java
index bd50ae0..e1f6da5 100644
--- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java
+++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java
@@ -25,7 +25,7 @@ import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[17/45] hadoop git commit: HADOOP-15495. Upgrade commons-lang version
to 3.7 in hadoop-common-project and hadoop-tools. Contributed by Takanobu
Asanuma.
Posted by xy...@apache.org.
HADOOP-15495. Upgrade commons-lang version to 3.7 in hadoop-common-project and hadoop-tools. Contributed by Takanobu Asanuma.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b2399d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b2399d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b2399d6
Branch: refs/heads/HDDS-4
Commit: 2b2399d623539ab68e71a38fa9fbfc9a405bddb8
Parents: 8752a48
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Jun 28 14:29:40 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Jun 28 14:37:22 2018 +0900
----------------------------------------------------------------------
.../hadoop-client-minicluster/pom.xml | 8 ---
hadoop-common-project/hadoop-common/pom.xml | 5 --
.../hadoop/conf/ReconfigurationServlet.java | 42 ++++++-------
.../apache/hadoop/crypto/key/KeyProvider.java | 4 +-
.../org/apache/hadoop/crypto/key/KeyShell.java | 2 +-
.../main/java/org/apache/hadoop/fs/FsShell.java | 3 +-
.../main/java/org/apache/hadoop/fs/Path.java | 2 +-
.../java/org/apache/hadoop/fs/shell/Count.java | 2 +-
.../apache/hadoop/io/ElasticByteBufferPool.java | 2 +-
.../apache/hadoop/io/erasurecode/ECSchema.java | 4 +-
.../nativeio/SharedFileDescriptorFactory.java | 2 +-
.../org/apache/hadoop/ipc/CallerContext.java | 4 +-
.../apache/hadoop/ipc/DecayRpcScheduler.java | 4 +-
.../org/apache/hadoop/ipc/FairCallQueue.java | 4 +-
.../hadoop/metrics2/MetricsJsonBuilder.java | 2 +-
.../hadoop/metrics2/lib/MethodMetric.java | 2 +-
.../metrics2/lib/MutableMetricsFactory.java | 2 +-
.../hadoop/metrics2/lib/MutableQuantiles.java | 2 +-
.../metrics2/lib/MutableRollingAverages.java | 2 +-
.../apache/hadoop/metrics2/lib/MutableStat.java | 2 +-
.../metrics2/sink/RollingFileSystemSink.java | 2 +-
.../org/apache/hadoop/net/TableMapping.java | 2 +-
.../apache/hadoop/net/unix/DomainSocket.java | 2 +-
.../hadoop/net/unix/DomainSocketWatcher.java | 2 +-
.../security/ShellBasedUnixGroupsMapping.java | 2 +-
.../hadoop/security/alias/CredentialShell.java | 2 +-
.../hadoop/security/http/CrossOriginFilter.java | 2 +-
.../hadoop/security/token/DtFileOperations.java | 2 +-
.../org/apache/hadoop/tools/TableListing.java | 6 +-
.../org/apache/hadoop/util/StringUtils.java | 64 +++++++++++++++++++-
.../apache/hadoop/conf/TestConfiguration.java | 2 +-
.../conf/TestConfigurationFieldsBase.java | 2 +-
.../crypto/random/TestOsSecureRandom.java | 2 +-
.../org/apache/hadoop/fs/FSTestWrapper.java | 2 +-
.../apache/hadoop/fs/FileContextTestHelper.java | 1 -
.../hadoop/fs/TestDFCachingGetSpaceUsed.java | 2 +-
.../fs/TestFileSystemStorageStatistics.java | 18 +++---
.../hadoop/fs/shell/TestCopyFromLocal.java | 10 +--
.../TestSharedFileDescriptorFactory.java | 2 +-
.../org/apache/hadoop/ipc/TestProtoBufRpc.java | 2 +-
.../java/org/apache/hadoop/ipc/TestSaslRPC.java | 2 +-
.../org/apache/hadoop/net/TestNetUtils.java | 2 +-
.../hadoop/net/unix/TestDomainSocket.java | 2 +-
...tionTokenAuthenticationHandlerWithMocks.java | 2 +-
.../AbstractServiceLauncherTestBase.java | 2 +-
.../apache/hadoop/test/GenericTestUtils.java | 4 +-
.../hadoop/util/TestShutdownHookManager.java | 2 +-
.../apache/hadoop/util/TestSignalLogger.java | 2 +-
.../hdfs/server/namenode/FSNamesystemLock.java | 2 +-
hadoop-project/pom.xml | 5 --
.../aliyun/oss/AliyunCredentialsProvider.java | 2 +-
.../fs/aliyun/oss/AliyunOSSFileSystem.java | 2 +-
.../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 2 +-
.../hadoop/fs/aliyun/oss/AliyunOSSUtils.java | 2 +-
.../fs/aliyun/oss/AliyunOSSTestUtils.java | 2 +-
.../fs/s3a/AWSCredentialProviderList.java | 2 +-
.../fs/s3a/BasicAWSCredentialsProvider.java | 2 +-
.../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 4 +-
.../apache/hadoop/fs/s3a/S3AInputStream.java | 2 +-
.../java/org/apache/hadoop/fs/s3a/S3AUtils.java | 2 +-
.../fs/s3a/SimpleAWSCredentialsProvider.java | 2 +-
.../fs/s3a/TemporaryAWSCredentialsProvider.java | 2 +-
.../s3a/auth/AssumedRoleCredentialProvider.java | 2 +-
.../s3a/commit/files/SinglePendingCommit.java | 2 +-
.../hadoop/fs/s3a/commit/files/SuccessData.java | 2 +-
.../fs/s3a/commit/magic/MagicCommitTracker.java | 2 +-
.../hadoop/fs/s3a/commit/staging/Paths.java | 2 +-
.../fs/s3a/s3guard/DynamoDBClientFactory.java | 2 +-
.../fs/s3a/s3guard/DynamoDBMetadataStore.java | 2 +-
.../fs/s3a/s3guard/LocalMetadataStore.java | 2 +-
.../hadoop/fs/s3a/s3guard/S3GuardTool.java | 2 +-
.../hadoop/fs/s3native/S3xLoginHelper.java | 4 +-
.../hadoop/fs/s3a/ITestS3AConfiguration.java | 4 +-
.../ITestS3AEncryptionSSEKMSUserDefinedKey.java | 2 +-
...onSSEKMSUserDefinedKeyBlockOutputStream.java | 2 +-
.../org/apache/hadoop/fs/s3a/S3ATestUtils.java | 2 +-
.../fs/s3a/commit/AbstractCommitITest.java | 2 +-
.../org/apache/hadoop/fs/adl/AdlFileSystem.java | 2 +-
.../hadoop/fs/adl/TestAzureADTokenProvider.java | 2 +-
.../fs/azure/AzureNativeFileSystemStore.java | 2 +-
.../hadoop/fs/azure/BlockBlobAppendStream.java | 2 +-
.../fs/azure/ClientThrottlingAnalyzer.java | 2 +-
.../hadoop/fs/azure/NativeAzureFileSystem.java | 2 +-
.../hadoop/fs/azure/PageBlobOutputStream.java | 2 +-
.../fs/azure/RemoteWasbAuthorizerImpl.java | 2 +-
.../fs/azure/SecureWasbRemoteCallHelper.java | 2 +-
.../fs/azure/ITestBlobDataValidation.java | 2 +-
.../azure/ITestNativeAzureFileSystemAppend.java | 2 +-
.../hadoop/fs/azure/MockStorageInterface.java | 12 ++--
...estNativeAzureFileSystemBlockCompaction.java | 2 +-
.../fs/azure/integration/AzureTestUtils.java | 2 +-
.../org/apache/hadoop/tools/DistCpOptions.java | 2 +-
.../org/apache/hadoop/tools/OptionsParser.java | 2 +-
.../apache/hadoop/tools/mapred/CopyMapper.java | 2 +-
.../mapred/gridmix/ClusterSummarizer.java | 2 +-
.../mapred/gridmix/ExecutionSummarizer.java | 4 +-
.../hadoop/mapred/gridmix/JobFactory.java | 2 +-
.../mapred/gridmix/RandomTextDataGenerator.java | 2 +-
.../translator/impl/BaseLogParser.java | 1 -
.../WordListAnonymizerUtility.java | 2 +-
.../util/MapReduceJobPropertiesParser.java | 2 +-
.../hadoop/streaming/TestUnconsumedInput.java | 2 +-
102 files changed, 207 insertions(+), 168 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index b9363de..6fa24b4 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -167,10 +167,6 @@
<artifactId>commons-io</artifactId>
</exclusion>
<exclusion>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- </exclusion>
- <exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
@@ -496,10 +492,6 @@
<artifactId>commons-codec</artifactId>
</exclusion>
<exclusion>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- </exclusion>
- <exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 1a16dc4..67a5a54 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -157,11 +157,6 @@
<scope>test</scope>
</dependency>
<dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <scope>compile</scope>
- </dependency>
- <dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<scope>compile</scope>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
index 5a616f7..c5bdf4e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.conf;
-import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.commons.lang3.StringEscapeUtils;
import java.util.Collection;
import java.util.Enumeration;
@@ -72,10 +72,10 @@ public class ReconfigurationServlet extends HttpServlet {
private void printHeader(PrintWriter out, String nodeName) {
out.print("<html><head>");
out.printf("<title>%s Reconfiguration Utility</title>%n",
- StringEscapeUtils.escapeHtml(nodeName));
+ StringEscapeUtils.escapeHtml4(nodeName));
out.print("</head><body>\n");
out.printf("<h1>%s Reconfiguration Utility</h1>%n",
- StringEscapeUtils.escapeHtml(nodeName));
+ StringEscapeUtils.escapeHtml4(nodeName));
}
private void printFooter(PrintWriter out) {
@@ -103,20 +103,20 @@ public class ReconfigurationServlet extends HttpServlet {
out.print("<tr><td>");
if (!reconf.isPropertyReconfigurable(c.prop)) {
out.print("<font color=\"red\">" +
- StringEscapeUtils.escapeHtml(c.prop) + "</font>");
+ StringEscapeUtils.escapeHtml4(c.prop) + "</font>");
changeOK = false;
} else {
- out.print(StringEscapeUtils.escapeHtml(c.prop));
+ out.print(StringEscapeUtils.escapeHtml4(c.prop));
out.print("<input type=\"hidden\" name=\"" +
- StringEscapeUtils.escapeHtml(c.prop) + "\" value=\"" +
- StringEscapeUtils.escapeHtml(c.newVal) + "\"/>");
+ StringEscapeUtils.escapeHtml4(c.prop) + "\" value=\"" +
+ StringEscapeUtils.escapeHtml4(c.newVal) + "\"/>");
}
out.print("</td><td>" +
(c.oldVal == null ? "<it>default</it>" :
- StringEscapeUtils.escapeHtml(c.oldVal)) +
+ StringEscapeUtils.escapeHtml4(c.oldVal)) +
"</td><td>" +
(c.newVal == null ? "<it>default</it>" :
- StringEscapeUtils.escapeHtml(c.newVal)) +
+ StringEscapeUtils.escapeHtml4(c.newVal)) +
"</td>");
out.print("</tr>\n");
}
@@ -147,9 +147,9 @@ public class ReconfigurationServlet extends HttpServlet {
synchronized(oldConf) {
while (params.hasMoreElements()) {
String rawParam = params.nextElement();
- String param = StringEscapeUtils.unescapeHtml(rawParam);
+ String param = StringEscapeUtils.unescapeHtml4(rawParam);
String value =
- StringEscapeUtils.unescapeHtml(req.getParameter(rawParam));
+ StringEscapeUtils.unescapeHtml4(req.getParameter(rawParam));
if (value != null) {
if (value.equals(newConf.getRaw(param)) || value.equals("default") ||
value.equals("null") || value.isEmpty()) {
@@ -157,8 +157,8 @@ public class ReconfigurationServlet extends HttpServlet {
value.isEmpty()) &&
oldConf.getRaw(param) != null) {
out.println("<p>Changed \"" +
- StringEscapeUtils.escapeHtml(param) + "\" from \"" +
- StringEscapeUtils.escapeHtml(oldConf.getRaw(param)) +
+ StringEscapeUtils.escapeHtml4(param) + "\" from \"" +
+ StringEscapeUtils.escapeHtml4(oldConf.getRaw(param)) +
"\" to default</p>");
reconf.reconfigureProperty(param, null);
} else if (!value.equals("default") && !value.equals("null") &&
@@ -168,16 +168,16 @@ public class ReconfigurationServlet extends HttpServlet {
// change from default or value to different value
if (oldConf.getRaw(param) == null) {
out.println("<p>Changed \"" +
- StringEscapeUtils.escapeHtml(param) +
+ StringEscapeUtils.escapeHtml4(param) +
"\" from default to \"" +
- StringEscapeUtils.escapeHtml(value) + "\"</p>");
+ StringEscapeUtils.escapeHtml4(value) + "\"</p>");
} else {
out.println("<p>Changed \"" +
- StringEscapeUtils.escapeHtml(param) + "\" from \"" +
- StringEscapeUtils.escapeHtml(oldConf.
+ StringEscapeUtils.escapeHtml4(param) + "\" from \"" +
+ StringEscapeUtils.escapeHtml4(oldConf.
getRaw(param)) +
"\" to \"" +
- StringEscapeUtils.escapeHtml(value) + "\"</p>");
+ StringEscapeUtils.escapeHtml4(value) + "\"</p>");
}
reconf.reconfigureProperty(param, value);
} else {
@@ -185,10 +185,10 @@ public class ReconfigurationServlet extends HttpServlet {
}
} else {
// parameter value != newConf value
- out.println("<p>\"" + StringEscapeUtils.escapeHtml(param) +
+ out.println("<p>\"" + StringEscapeUtils.escapeHtml4(param) +
"\" not changed because value has changed from \"" +
- StringEscapeUtils.escapeHtml(value) + "\" to \"" +
- StringEscapeUtils.escapeHtml(newConf.getRaw(param)) +
+ StringEscapeUtils.escapeHtml4(value) + "\" to \"" +
+ StringEscapeUtils.escapeHtml4(newConf.getRaw(param)) +
"\" since approval</p>");
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index 050540b..286312c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -33,8 +33,8 @@ import java.util.Map;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
index 9fdf242..fa84c47 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
@@ -27,7 +27,7 @@ import java.util.Map;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 94d3389..5be6e5f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -23,7 +23,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
-import org.apache.commons.lang.WordUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@@ -275,7 +274,7 @@ public class FsShell extends Configured implements Tool {
listing = null;
}
- for (String descLine : WordUtils.wrap(
+ for (String descLine : StringUtils.wrap(
line, MAX_LINE_WIDTH, "\n", true).split("\n")) {
out.println(prefix + descLine);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index 252b3cc..b6244d6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -27,7 +27,7 @@ import java.net.URISyntaxException;
import java.util.regex.Pattern;
import org.apache.avro.reflect.Stringable;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
index 8f6fc4d..011e489 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
@@ -23,7 +23,7 @@ import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
index 9dd7771..bbedf2a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.io;
import com.google.common.collect.ComparisonChain;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import java.nio.ByteBuffer;
import java.util.Map;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index f008e24..0f95058 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -22,8 +22,8 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
index 4126344..4d820c2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/SharedFileDescriptorFactory.java
@@ -21,7 +21,7 @@ import java.io.FileInputStream;
import java.io.IOException;
import java.io.FileDescriptor;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
index bdfa471..b156d1f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.ipc;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
index 0a00ca7..f12ecb6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
@@ -39,7 +39,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AtomicDoubleArray;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.metrics2.MetricsCollector;
@@ -429,7 +429,7 @@ public class DecayRpcScheduler implements RpcScheduler,
updateAverageResponseTime(true);
} catch (Exception ex) {
LOG.error("decayCurrentCounts exception: " +
- ExceptionUtils.getFullStackTrace(ex));
+ ExceptionUtils.getStackTrace(ex));
throw ex;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
index 6d9ea3e..3a8c83d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
@@ -32,7 +32,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.lang3.NotImplementedException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
import org.apache.hadoop.metrics2.util.MBeans;
@@ -286,7 +286,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
*/
@Override
public Iterator<E> iterator() {
- throw new NotImplementedException();
+ throw new NotImplementedException("Code is not implemented");
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java
index ce6fbe1..1d62c0a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsJsonBuilder.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.map.ObjectMapper;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
index 3d7a90e..9b54adc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.metrics2.lib;
import java.lang.reflect.Method;
import static com.google.common.base.Preconditions.*;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.metrics2.MetricsException;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
index b2042e7..a3ca98d0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.metrics2.lib;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
index cc32975..6b30618 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
@@ -26,7 +26,7 @@ import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java
index 053cb55..22c288a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java
@@ -32,7 +32,7 @@ import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
index 92fe3d1..5ef3178 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.metrics2.lib;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.metrics2.MetricsInfo;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
index 0f6e9a9..92ac952 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
@@ -37,7 +37,7 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.commons.lang.time.FastDateFormat;
+import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
index ead9a74..45759df 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
@@ -28,7 +28,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
index ac118c0..9693220 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
@@ -28,7 +28,7 @@ import java.nio.channels.ClosedChannelException;
import java.nio.channels.ReadableByteChannel;
import java.nio.ByteBuffer;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.CloseableReferenceCount;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
index c7af97f..e36399f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
@@ -32,7 +32,7 @@ import java.util.Map;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.annotations.VisibleForTesting;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
index 976ddba..31f4398 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
@@ -25,7 +25,7 @@ import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
index 6085121..0a00d79 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
@@ -27,7 +27,7 @@ import java.util.List;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tools.CommandShell;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
index 34d9fe2..02c168f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
@@ -34,7 +34,7 @@ import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import java.util.stream.Collectors;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
index d36ad9b..f154f2d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
@@ -26,7 +26,7 @@ import java.util.ArrayList;
import java.util.Date;
import java.util.ServiceLoader;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
index bc2e2d4..85015fb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
@@ -20,8 +20,7 @@ package org.apache.hadoop.tools;
import java.util.ArrayList;
import java.util.LinkedList;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.WordUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
/**
@@ -103,7 +102,8 @@ public class TableListing {
// Line-wrap if it's too long
String[] lines = new String[] {raw};
if (wrap) {
- lines = WordUtils.wrap(lines[0], wrapWidth, "\n", true).split("\n");
+ lines = org.apache.hadoop.util.StringUtils.wrap(lines[0], wrapWidth,
+ "\n", true).split("\n");
}
for (int i=0; i<lines.length; i++) {
if (justification == Justification.LEFT) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 33a2010..3db805f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -35,7 +35,7 @@ import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.commons.lang3.time.FastDateFormat;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -987,7 +987,7 @@ public class StringUtils {
String[] words = split(StringUtils.toLowerCase(s), ESCAPE_CHAR, '_');
for (String word : words)
- sb.append(org.apache.commons.lang.StringUtils.capitalize(word));
+ sb.append(org.apache.commons.lang3.StringUtils.capitalize(word));
return sb.toString();
}
@@ -1183,4 +1183,64 @@ public class StringUtils {
return true;
}
+ /**
+ * Same as WordUtils#wrap in commons-lang 2.6. Unlike commons-lang3, leading
+ * spaces on the first line are NOT stripped.
+ *
+ * @param str the String to be word wrapped, may be null
+ * @param wrapLength the column to wrap the words at, less than 1 is treated
+ * as 1
+ * @param newLineStr the string to insert for a new line,
+ * <code>null</code> uses the system property line separator
+ * @param wrapLongWords true if long words (such as URLs) should be wrapped
+ * @return a line with newlines inserted, <code>null</code> if null input
+ */
+ public static String wrap(String str, int wrapLength, String newLineStr,
+ boolean wrapLongWords) {
+ if(str == null) {
+ return null;
+ } else {
+ if(newLineStr == null) {
+ newLineStr = System.lineSeparator();
+ }
+
+ if(wrapLength < 1) {
+ wrapLength = 1;
+ }
+
+ int inputLineLength = str.length();
+ int offset = 0;
+ StringBuffer wrappedLine = new StringBuffer(inputLineLength + 32);
+
+ while(inputLineLength - offset > wrapLength) {
+ if(str.charAt(offset) == 32) {
+ ++offset;
+ } else {
+ int spaceToWrapAt = str.lastIndexOf(32, wrapLength + offset);
+ if(spaceToWrapAt >= offset) {
+ wrappedLine.append(str.substring(offset, spaceToWrapAt));
+ wrappedLine.append(newLineStr);
+ offset = spaceToWrapAt + 1;
+ } else if(wrapLongWords) {
+ wrappedLine.append(str.substring(offset, wrapLength + offset));
+ wrappedLine.append(newLineStr);
+ offset += wrapLength;
+ } else {
+ spaceToWrapAt = str.indexOf(32, wrapLength + offset);
+ if(spaceToWrapAt >= 0) {
+ wrappedLine.append(str.substring(offset, spaceToWrapAt));
+ wrappedLine.append(newLineStr);
+ offset = spaceToWrapAt + 1;
+ } else {
+ wrappedLine.append(str.substring(offset));
+ offset = inputLineLength;
+ }
+ }
+ }
+ }
+
+ wrappedLine.append(str.substring(offset));
+ return wrappedLine.toString();
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index e865bf1..2361626 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -62,7 +62,7 @@ import static org.apache.hadoop.conf.StorageUnit.TB;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index 7f27d7d..152159b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.conf;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
index 6c2e5b8..2ea4523 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.crypto.random;
import java.io.IOException;
import java.util.Arrays;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.junit.Assume;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java
index da07105..8cbca8e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.fs;
import java.io.IOException;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.test.GenericTestUtils;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
index 1f37f74..b5307a4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
@@ -22,7 +22,6 @@ import java.io.IOException;
import java.io.FileNotFoundException;
import java.util.EnumSet;
-import org.apache.commons.lang.RandomStringUtils;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
import org.apache.hadoop.io.IOUtils;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
index 3def5d5..6b9a34c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFCachingGetSpaceUsed.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.fs;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java
index 597eb93..fa68264 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemStorageStatistics.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.fs;
-import org.apache.commons.lang.math.RandomUtils;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.fs.StorageStatistics.LongStatistic;
import org.junit.Before;
@@ -67,15 +67,15 @@ public class TestFileSystemStorageStatistics {
@Before
public void setup() {
- statistics.incrementBytesRead(RandomUtils.nextInt(100));
- statistics.incrementBytesWritten(RandomUtils.nextInt(100));
- statistics.incrementLargeReadOps(RandomUtils.nextInt(100));
- statistics.incrementWriteOps(RandomUtils.nextInt(100));
+ statistics.incrementBytesRead(RandomUtils.nextInt(0, 100));
+ statistics.incrementBytesWritten(RandomUtils.nextInt(0, 100));
+ statistics.incrementLargeReadOps(RandomUtils.nextInt(0, 100));
+ statistics.incrementWriteOps(RandomUtils.nextInt(0, 100));
- statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(100));
- statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(100));
- statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(100));
- statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(100));
+ statistics.incrementBytesReadByDistance(0, RandomUtils.nextInt(0, 100));
+ statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(0, 100));
+ statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(0, 100));
+ statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(0, 100));
}
@Test
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java
index 8e60540..e7f36fc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyFromLocal.java
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.fs.shell;
-import org.apache.commons.lang.RandomStringUtils;
-import org.apache.commons.lang.math.RandomUtils;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
@@ -56,11 +56,11 @@ public class TestCopyFromLocal {
fs.mkdirs(toDirPath);
int numTotalFiles = 0;
- int numDirs = RandomUtils.nextInt(5);
+ int numDirs = RandomUtils.nextInt(0, 5);
for (int dirCount = 0; dirCount < numDirs; ++dirCount) {
Path subDirPath = new Path(fromDirPath, "subdir" + dirCount);
fs.mkdirs(subDirPath);
- int numFiles = RandomUtils.nextInt(10);
+ int numFiles = RandomUtils.nextInt(0, 10);
for (int fileCount = 0; fileCount < numFiles; ++fileCount) {
numTotalFiles++;
Path subFile = new Path(subDirPath, "file" + fileCount);
@@ -115,7 +115,7 @@ public class TestCopyFromLocal {
Path dir = new Path("dir" + RandomStringUtils.randomNumeric(4));
int numFiles = TestCopyFromLocal.initialize(dir);
int maxThreads = Runtime.getRuntime().availableProcessors() * 2;
- int randThreads = RandomUtils.nextInt(maxThreads - 1) + 1;
+ int randThreads = RandomUtils.nextInt(0, maxThreads - 1) + 1;
String numThreads = Integer.toString(randThreads);
run(new TestMultiThreadedCopy(randThreads,
randThreads == 1 ? 0 : numFiles), "-t", numThreads,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
index fbe3fb8..17be587 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestSharedFileDescriptorFactory.java
@@ -26,7 +26,7 @@ import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.GenericTestUtils;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
index 41ae910..5fbd957 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ipc;
import com.google.protobuf.BlockingService;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.metrics.RpcMetrics;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index 0b463a5..5200420 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.ipc;
import com.google.protobuf.ServiceException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index 30176f2..62bd1b1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -38,7 +38,7 @@ import java.util.Enumeration;
import java.util.List;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.KerberosAuthException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
index 28b3cbe..c0d204f 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
@@ -43,7 +43,7 @@ import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket.DomainChannel;
import org.apache.hadoop.test.GenericTestUtils;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
index 9357f48..0f8f1e4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
@@ -21,7 +21,7 @@ import static org.apache.hadoop.security.token.delegation.web.DelegationTokenAut
import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.commons.lang.mutable.MutableBoolean;
+import org.apache.commons.lang3.mutable.MutableBoolean;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java
index 127b0b3..d7c8631 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/launcher/AbstractServiceLauncherTestBase.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.service.launcher;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.ServiceOperations;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 53eb2be..3e9da1b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -40,7 +40,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;
import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.FileUtil;
@@ -344,7 +344,7 @@ public abstract class GenericTestUtils {
throw new AssertionError(E_NULL_THROWABLE_STRING, t);
}
if (expectedText != null && !msg.contains(expectedText)) {
- String prefix = org.apache.commons.lang.StringUtils.isEmpty(message)
+ String prefix = org.apache.commons.lang3.StringUtils.isEmpty(message)
? "" : (message + ": ");
throw new AssertionError(
String.format("%s Expected to find '%s' %s: %s",
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
index 2aa5e95..d539823 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestShutdownHookManager.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.util;
-import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.slf4j.LoggerFactory;
import org.junit.Assert;
import org.junit.Test;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
index a9fa4c6..b61cebc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.util;
-import org.apache.commons.lang.SystemUtils;
+import org.apache.commons.lang3.SystemUtils;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index f8e69e2..5992e54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -332,7 +332,7 @@ class FSNamesystemLock {
private static String getMetricName(String operationName, boolean isWrite) {
return (isWrite ? WRITE_LOCK_METRIC_PREFIX : READ_LOCK_METRIC_PREFIX) +
- org.apache.commons.lang.StringUtils.capitalize(operationName) +
+ org.apache.commons.lang3.StringUtils.capitalize(operationName) +
LOCK_METRIC_SUFFIX;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index dfd1eac..8e28afe 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1044,11 +1044,6 @@
<version>4.11</version>
</dependency>
<dependency>
- <groupId>commons-lang</groupId>
- <artifactId>commons-lang</artifactId>
- <version>2.6</version>
- </dependency>
- <dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>3.2.2</version>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
index 58c14a9..32367af 100644
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
+++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
@@ -22,7 +22,7 @@ import com.aliyun.oss.common.auth.Credentials;
import com.aliyun.oss.common.auth.CredentialsProvider;
import com.aliyun.oss.common.auth.DefaultCredentials;
import com.aliyun.oss.common.auth.InvalidCredentialsException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 93e31d5..4fbb6fb 100644
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -30,7 +30,7 @@ import java.util.concurrent.TimeUnit;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CreateFlag;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
index cc050c8..5e21759 100644
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
+++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystemStore.java
@@ -45,7 +45,7 @@ import com.aliyun.oss.model.UploadPartCopyResult;
import com.aliyun.oss.model.UploadPartRequest;
import com.aliyun.oss.model.UploadPartResult;
import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java
index 2fe06c1..a7536d6 100644
--- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java
+++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSUtils.java
@@ -23,7 +23,7 @@ import java.io.IOException;
import com.aliyun.oss.common.auth.CredentialsProvider;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.security.ProviderUtils;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java
index 901cb2b..79e0de3 100644
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java
+++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSTestUtils.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.fs.aliyun.oss;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.junit.internal.AssumptionViolatedException;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
index e0bee0f..10201f0 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
@@ -23,7 +23,7 @@ import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AnonymousAWSCredentials;
import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.IOUtils;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
index b1899e2..01bcc6a 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/BasicAWSCredentialsProvider.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.fs.s3a;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.AWSCredentials;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 4b0c208..737d7da 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -121,8 +121,8 @@ import static org.apache.hadoop.fs.s3a.Constants.*;
import static org.apache.hadoop.fs.s3a.Invoker.*;
import static org.apache.hadoop.fs.s3a.S3AUtils.*;
import static org.apache.hadoop.fs.s3a.Statistic.*;
-import static org.apache.commons.lang.StringUtils.isNotBlank;
-import static org.apache.commons.lang.StringUtils.isNotEmpty;
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+import static org.apache.commons.lang3.StringUtils.isNotEmpty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index c54d3e26..440739d 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -24,7 +24,7 @@ import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.amazonaws.services.s3.model.SSECustomerKey;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CanSetReadahead;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 6a01a80..a5f7d75 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -35,7 +35,7 @@ import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java
index 9939bb2..7f9e57e 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/SimpleAWSCredentialsProvider.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.fs.s3a;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java
index e959908..3b89bde 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/TemporaryAWSCredentialsProvider.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.fs.s3a;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.BasicSessionCredentials;
import com.amazonaws.auth.AWSCredentials;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import java.io.IOException;
import java.net.URI;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
index 4b6a77e..fdaf9bd 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/AssumedRoleCredentialProvider.java
@@ -32,7 +32,7 @@ import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java
index 85cc38a..596dd95 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SinglePendingCommit.java
@@ -33,7 +33,7 @@ import java.util.Map;
import com.amazonaws.services.s3.model.PartETag;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java
index 6cf1f1e..cf84cb3 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/files/SuccessData.java
@@ -28,7 +28,7 @@ import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java
index cf365c2..a619fc7 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/MagicCommitTracker.java
@@ -28,7 +28,7 @@ import com.google.common.base.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.s3a.WriteOperationHelper;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
index a4d39d7..d5d256a 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/staging/Paths.java
@@ -30,7 +30,7 @@ import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.UncheckedExecutionException;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2399d6/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
index 66ada49..91e64cd 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
@@ -26,7 +26,7 @@ import com.amazonaws.regions.Regions;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.google.common.base.Preconditions;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[09/45] hadoop git commit: Revert "HDDS-194. Remove NodePoolManager
and node pool handling from SCM. Contributed by Elek Marton"
Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6fe5f3/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index edc0d7b..2bd43fb 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.scm.cli;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Longs;
+import com.google.protobuf.ByteString;
import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
@@ -59,11 +60,13 @@ import java.sql.Statement;
import java.util.HashSet;
import java.util.Set;
+import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
/**
@@ -108,6 +111,15 @@ public class SQLCLI extends Configured implements Tool {
private static final String INSERT_CONTAINER_MEMBERS =
"INSERT INTO containerMembers (containerName, datanodeUUID) " +
"VALUES (\"%s\", \"%s\")";
+ // for nodepool.db
+ private static final String CREATE_NODE_POOL =
+ "CREATE TABLE nodePool (" +
+ "datanodeUUID TEXT NOT NULL," +
+ "poolName TEXT NOT NULL," +
+ "PRIMARY KEY(datanodeUUID, poolName))";
+ private static final String INSERT_NODE_POOL =
+ "INSERT INTO nodePool (datanodeUUID, poolName) " +
+ "VALUES (\"%s\", \"%s\")";
// and reuse CREATE_DATANODE_INFO and INSERT_DATANODE_INFO
// for openContainer.db
private static final String CREATE_OPEN_CONTAINER =
@@ -273,6 +285,9 @@ public class SQLCLI extends Configured implements Tool {
if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) {
LOG.info("Converting container DB");
convertContainerDB(dbPath, outPath);
+ } else if (dbName.toString().equals(NODEPOOL_DB)) {
+ LOG.info("Converting node pool DB");
+ convertNodePoolDB(dbPath, outPath);
} else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
LOG.info("Converting open container DB");
convertOpenContainerDB(dbPath, outPath);
@@ -528,7 +543,66 @@ public class SQLCLI extends Configured implements Tool {
}
LOG.info("Insertion completed.");
}
+ /**
+ * Converts nodePool.db to sqlite. The schema of sql db:
+ * two tables, nodePool and datanodeInfo (the same datanode Info as for
+ * container.db).
+ *
+ * nodePool
+ * ---------------------------------------------------------
+ * datanodeUUID* | poolName*
+ * ---------------------------------------------------------
+ *
+ * datanodeInfo:
+ * ---------------------------------------------------------
+ * hostname | datanodeUUid* | xferPort | ipcPort
+ * ---------------------------------------------------------
+ *
+ * --------------------------------
+ * |containerPort
+ * --------------------------------
+ *
+ * @param dbPath path to container db.
+ * @param outPath path to output sqlite
+ * @throws IOException throws exception.
+ */
+ private void convertNodePoolDB(Path dbPath, Path outPath) throws Exception {
+ LOG.info("Create table for sql node pool db.");
+ File dbFile = dbPath.toFile();
+ try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
+ .setConf(conf).setDbFile(dbFile).build();
+ Connection conn = connectDB(outPath.toString())) {
+ executeSQL(conn, CREATE_NODE_POOL);
+ executeSQL(conn, CREATE_DATANODE_INFO);
+ dbStore.iterate(null, (key, value) -> {
+ DatanodeDetails nodeId = DatanodeDetails
+ .getFromProtoBuf(HddsProtos.DatanodeDetailsProto
+ .PARSER.parseFrom(key));
+ String blockPool = DFSUtil.bytes2String(value);
+ try {
+ insertNodePoolDB(conn, blockPool, nodeId);
+ return true;
+ } catch (SQLException e) {
+ throw new IOException(e);
+ }
+ });
+ }
+ }
+
+ private void insertNodePoolDB(Connection conn, String blockPool,
+ DatanodeDetails datanodeDetails) throws SQLException {
+ String insertNodePool = String.format(INSERT_NODE_POOL,
+ datanodeDetails.getUuidString(), blockPool);
+ executeSQL(conn, insertNodePool);
+
+ String insertDatanodeDetails = String
+ .format(INSERT_DATANODE_INFO, datanodeDetails.getHostName(),
+ datanodeDetails.getUuidString(), datanodeDetails.getIpAddress(),
+ datanodeDetails.getPort(DatanodeDetails.Port.Name.STANDALONE)
+ .getValue());
+ executeSQL(conn, insertDatanodeDetails);
+ }
/**
* Convert openContainer.db to sqlite db file. This is rather simple db,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[20/45] hadoop git commit: HDFS-13705:The native ISA-L library
loading failure should be made warning rather than an error message.
Contributed by Shashikant Banerjee.
Posted by xy...@apache.org.
HDFS-13705:The native ISA-L library loading failure should be made warning rather than an error message. Contributed by Shashikant Banerjee.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3fa83a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3fa83a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3fa83a4
Branch: refs/heads/HDDS-4
Commit: d3fa83a44b01c85f39bfb4deaf2972912ac61ca3
Parents: 85627e2
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Thu Jun 28 09:21:56 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Thu Jun 28 09:21:56 2018 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fa83a4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
index 3d6867a..ec317ee 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeNative.java
@@ -46,7 +46,7 @@ public final class ErasureCodeNative {
loadLibrary();
} catch (Throwable t) {
problem = "Loading ISA-L failed: " + t.getMessage();
- LOG.error("Loading ISA-L failed", t);
+ LOG.warn(problem);
}
LOADING_FAILURE_REASON = problem;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[39/45] hadoop git commit: HDDS-6. Enable SCM kerberos auth.
Contributed by Ajay Kumar.
Posted by xy...@apache.org.
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40c6c19e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40c6c19e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40c6c19e
Branch: refs/heads/HDDS-4
Commit: 40c6c19e5114ebce7b49342dfd194783d8d7bbaf
Parents: ab2f834
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Jul 2 13:19:01 2018 -0700
----------------------------------------------------------------------
.../authentication/util/KerberosUtil.java | 2 +-
.../conf/TestConfigurationFieldsBase.java | 2 +
.../java/org/apache/hadoop/hdds/HddsUtils.java | 13 +-
.../apache/hadoop/hdds/scm/ScmConfigKeys.java | 10 +-
.../scm/protocol/ScmBlockLocationProtocol.java | 3 +
.../StorageContainerLocationProtocol.java | 4 +
.../protocolPB/ScmBlockLocationProtocolPB.java | 6 +
.../StorageContainerLocationProtocolPB.java | 4 +
.../apache/hadoop/ozone/OzoneConfigKeys.java | 5 +
.../common/src/main/resources/ozone-default.xml | 44 ++++
.../StorageContainerDatanodeProtocol.java | 4 +
.../StorageContainerDatanodeProtocolPB.java | 6 +
.../scm/server/StorageContainerManager.java | 49 ++++-
.../StorageContainerManagerHttpServer.java | 5 +-
.../ozone/client/protocol/ClientProtocol.java | 3 +
hadoop-ozone/common/src/main/bin/start-ozone.sh | 7 +
hadoop-ozone/common/src/main/bin/stop-ozone.sh | 13 +-
hadoop-ozone/integration-test/pom.xml | 6 +
.../hadoop/ozone/MiniOzoneClusterImpl.java | 17 +-
.../hadoop/ozone/TestSecureOzoneCluster.java | 205 +++++++++++++++++++
.../ozone/TestStorageContainerManager.java | 4 +-
21 files changed, 379 insertions(+), 33 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
index c011045..4459928 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
@@ -167,7 +167,7 @@ public class KerberosUtil {
}
/* Return fqdn of the current host */
- static String getLocalHostName() throws UnknownHostException {
+ public static String getLocalHostName() throws UnknownHostException {
return InetAddress.getLocalHost().getCanonicalHostName();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index 152159b..bce1cd5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -436,6 +436,8 @@ public abstract class TestConfigurationFieldsBase {
// Create XML key/value map
LOG_XML.debug("Reading XML property files\n");
xmlKeyValueMap = extractPropertiesFromXml(xmlFilename);
+ // Remove hadoop property set in ozone-default.xml
+ xmlKeyValueMap.remove("hadoop.custom.tags");
LOG_XML.debug("\n=====\n");
// Create default configuration variable key/value map
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 48c6dce..17c99bb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -241,18 +241,7 @@ public final class HddsUtils {
}
public static boolean isHddsEnabled(Configuration conf) {
- String securityEnabled =
- conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
- "simple");
- boolean securityAuthorizationEnabled = conf.getBoolean(
- CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false);
-
- if (securityEnabled.equals("kerberos") || securityAuthorizationEnabled) {
- LOG.error("Ozone is not supported in a security enabled cluster. ");
- return false;
- } else {
- return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
- }
+ return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index df6fbf0..91e1cc2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -132,8 +132,9 @@ public final class ScmConfigKeys {
"ozone.scm.http-address";
public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
"ozone.scm.https-address";
- public static final String OZONE_SCM_KEYTAB_FILE =
- "ozone.scm.keytab.file";
+ public static final String OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY =
+ "ozone.scm.kerberos.keytab.file";
+ public static final String OZONE_SCM_KERBEROS_PRINCIPAL_KEY = "ozone.scm.kerberos.principal";
public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
@@ -253,6 +254,11 @@ public final class ScmConfigKeys {
public static final String OZONE_SCM_CONTAINER_CLOSE_THRESHOLD =
"ozone.scm.container.close.threshold";
public static final float OZONE_SCM_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
+
+ public static final String SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
+ "ozone.scm.web.authentication.kerberos.principal";
+ public static final String SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
+ "ozone.scm.web.authentication.kerberos.keytab";
/**
* Never constructed.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
index c8d4a80..e17f1c2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdds.scm.protocol;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -31,6 +33,7 @@ import java.util.List;
* ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
* to read/write a block.
*/
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
public interface ScmBlockLocationProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index e8d85e0..d36bdf3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -17,6 +17,8 @@
package org.apache.hadoop.hdds.scm.protocol;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@@ -27,11 +29,13 @@ import org.apache.hadoop.hdds.protocol.proto
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
/**
* ContainerLocationProtocol is used by an HDFS node to find the set of nodes
* that currently host a container.
*/
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
public interface StorageContainerLocationProtocol {
/**
* Asks SCM where a container should be allocated. SCM responds with the
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
index 837c95b..89bb066 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
@@ -18,9 +18,13 @@
package org.apache.hadoop.hdds.scm.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
.ScmBlockLocationProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol used from an HDFS node to StorageContainerManager. This extends the
@@ -30,6 +34,8 @@ import org.apache.hadoop.ipc.ProtocolInfo;
"org.apache.hadoop.ozone.protocol.ScmBlockLocationProtocol",
protocolVersion = 1)
@InterfaceAudience.Private
+@KerberosInfo(
+ serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
public interface ScmBlockLocationProtocolPB
extends ScmBlockLocationProtocolService.BlockingInterface {
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
index f234ad3..3bd83f9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
@@ -21,7 +21,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerLocationProtocolProtos
.StorageContainerLocationProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol used from an HDFS node to StorageContainerManager. This extends the
@@ -30,6 +32,8 @@ import org.apache.hadoop.ipc.ProtocolInfo;
@ProtocolInfo(protocolName =
"org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
protocolVersion = 1)
+@KerberosInfo(
+ serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private
public interface StorageContainerLocationProtocolPB
extends StorageContainerLocationProtocolService.BlockingInterface {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 856d088..33cfa93 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -256,6 +256,11 @@ public final class OzoneConfigKeys {
"hdds.datanode.storage.utilization.critical.threshold";
public static final double
HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.75;
+
+ public static final String OZONE_SECURITY_ENABLED_KEY = "ozone.security.enabled";
+ public static final String OZONE_SYSTEM_TAGS_KEY = "ozone.system.tags";
+ public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false;
+
/**
* There is no need to instantiate this class.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 25365c8..1b1d530 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -774,6 +774,17 @@
</description>
</property>
<property>
+ <name>ozone.scm.max.container.report.threads</name>
+ <value>100</value>
+ <tag>OZONE, PERFORMANCE</tag>
+ <description>
+ Maximum number of threads to process container reports in scm.
+ Each container report from a data node is processed by scm in a worker
+ thread, fetched from a thread pool. This property is used to control the
+ maximum size of the thread pool.
+ </description>
+ </property>
+ <property>
<name>ozone.scm.max.hb.count.to.process</name>
<value>5000</value>
<tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
@@ -1039,4 +1050,37 @@
</description>
</property>
+ <property>
+ <name>ozone.security.enabled</name>
+ <value>false</value>
+ <tag> OZONE, SECURITY, FLAG</tag>
+ <description>True if security is enabled for ozone. When this property is true, hadoop.security.authentication should be Kerberos.
+ </description>
+ </property>
+
+ <property>
+ <name>ozone.scm.kerberos.keytab.file</name>
+ <value></value>
+ <tag> OZONE, SECURITY</tag>
+ <description> The keytab file used by each SCM daemon to login as its
+ service principal. The principal name is configured with
+ ozone.scm.kerberos.principal.
+ </description>
+ </property>
+ <property>
+ <name>ozone.scm.kerberos.principal</name>
+ <value></value>
+ <tag> OZONE, SECURITY</tag>
+ <description>The SCM service principal. Ex scm/_HOST@REALM.TLD.</description>
+ </property>
+
+ <property>
+ <name>ozone.scm.web.authentication.kerberos.principal</name>
+ <value>HTTP/_HOST@EXAMPLE.COM</value>
+ </property>
+ <property>
+ <name>ozone.scm.web.authentication.kerberos.keytab</name>
+ <value>/etc/security/keytabs/HTTP.keytab</value>
+ </property>
+
</configuration>
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index a950a31..5b04c56 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -39,11 +39,15 @@ import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
import java.io.IOException;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.KerberosInfo;
/**
* The protocol spoken between datanodes and SCM. For specifics please the
* Protoc file that defines this protocol.
*/
+@KerberosInfo(
+ serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private
public interface StorageContainerDatanodeProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
index 9b28b5a..9c32ef8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
@@ -19,7 +19,10 @@ package org.apache.hadoop.ozone.protocolPB;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos
.StorageContainerDatanodeProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol used from a datanode to StorageContainerManager. This extends
@@ -29,6 +32,9 @@ import org.apache.hadoop.ipc.ProtocolInfo;
@ProtocolInfo(protocolName =
"org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol",
protocolVersion = 1)
+@KerberosInfo(
+ serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
public interface StorageContainerDatanodeProtocolPB extends
StorageContainerDatanodeProtocolService.BlockingInterface {
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 568a86a..e15157e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -28,9 +28,11 @@ import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.protobuf.BlockingService;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
import org.apache.hadoop.hdds.scm.block.BlockManager;
import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
@@ -53,6 +55,9 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.common.Storage.StorageState;
import org.apache.hadoop.ozone.common.StorageInfo;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
@@ -71,6 +76,10 @@ import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
/**
@@ -141,6 +150,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
* Key = DatanodeUuid, value = ContainerStat.
*/
private Cache<String, ContainerStat> containerReportCache;
+ private Configuration scmConf;
/**
* Creates a new StorageContainerManager. Configuration will be updated
@@ -149,13 +159,19 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
*
* @param conf configuration
*/
- private StorageContainerManager(OzoneConfiguration conf) throws IOException {
+ private StorageContainerManager(OzoneConfiguration conf)
+ throws IOException, AuthenticationException {
final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-
+ this.scmConf = conf;
StorageContainerManager.initMetrics();
initContainerReportCache(conf);
+ // Authenticate SCM if security is enabled
+ if (this.scmConf.getBoolean(OZONE_SECURITY_ENABLED_KEY,
+ OZONE_SECURITY_ENABLED_DEFAULT)) {
+ loginAsSCMUser(this.scmConf);
+ }
scmStorage = new SCMStorage(conf);
if (scmStorage.getState() != StorageState.INITIALIZED) {
@@ -191,6 +207,33 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
}
/**
+ * Login as the configured user for SCM.
+ *
+ * @param conf
+ */
+ private void loginAsSCMUser(Configuration conf)
+ throws IOException, AuthenticationException {
+ LOG.debug("Ozone security is enabled. Attempting login for SCM user. "
+ + "Principal: {}, keytab: {}", this.scmConf.get
+ (OZONE_SCM_KERBEROS_PRINCIPAL_KEY),
+ this.scmConf.get(OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY));
+
+ if (SecurityUtil.getAuthenticationMethod(conf).equals
+ (AuthenticationMethod.KERBEROS)) {
+ UserGroupInformation.setConfiguration(this.scmConf);
+ InetSocketAddress socAddr = HddsServerUtil
+ .getScmBlockClientBindAddress(conf);
+ SecurityUtil.login(conf, OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ OZONE_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+ } else {
+ throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
+ (conf) + " authentication method not support. "
+ + "SCM user login failed.");
+ }
+ LOG.info("SCM login successful.");
+ }
+
+ /**
* Builds a message for logging startup information about an RPC server.
*
* @param description RPC server description
@@ -274,7 +317,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
public static StorageContainerManager createSCM(String[] argv,
OzoneConfiguration conf)
- throws IOException {
+ throws IOException, AuthenticationException {
if (!HddsUtils.isHddsEnabled(conf)) {
System.err.println(
"SCM cannot be started in secure mode or when " + OZONE_ENABLED + "" +
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
index 75b2036..da936ad 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdds.scm.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.server.BaseHttpServer;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
import java.io.IOException;
@@ -63,11 +62,11 @@ public class StorageContainerManagerHttpServer extends BaseHttpServer {
}
@Override protected String getKeytabFile() {
- return ScmConfigKeys.OZONE_SCM_KEYTAB_FILE;
+ return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY;
}
@Override protected String getSpnegoPrincipal() {
- return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+ return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
}
@Override protected String getEnabledKey() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 94cc257..80b0a40 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.client.protocol;
import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -33,6 +34,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import java.io.IOException;
import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
/**
* An implementer of this interface is capable of connecting to Ozone Cluster
@@ -42,6 +44,7 @@ import java.util.List;
* includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and
* {@link org.apache.hadoop.ozone.client.rest.RestClient} for REST.
*/
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
public interface ClientProtocol {
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-ozone/common/src/main/bin/start-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index 92bc4a8..55225a4 100644
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -75,6 +75,13 @@ if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} ==
exit 1
fi
+#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
+# echo "Ozone is not supported in a security enabled cluster."
+# exit 1
+#fi
+
#---------------------------------------------------------
# Check if ozone is enabled
OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-ozone/common/src/main/bin/stop-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
index be55be4..ff332f2 100644
--- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
@@ -47,13 +47,12 @@ else
exit 1
fi
-SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
-
-if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
- echo "Ozone is not supported in a security enabled cluster."
- exit 1
-fi
+#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
+# echo "Ozone is not supported in a security enabled cluster."
+# exit 1
+#fi
#---------------------------------------------------------
# Check if ozone is enabled
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-ozone/integration-test/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml
index c8a932c..4aa1aa5 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -42,6 +42,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-minikdc</artifactId>
+ <scope>test</scope>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-objectstore-service</artifactId>
<scope>provided</scope>
</dependency>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index f0bfef1..fbd9565 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.scm.protocolPB
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.test.GenericTestUtils;
import org.slf4j.Logger;
@@ -289,9 +290,16 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
public MiniOzoneCluster build() throws IOException {
DefaultMetricsSystem.setMiniClusterMode(true);
initializeConfiguration();
- StorageContainerManager scm = createSCM();
- scm.start();
- KeySpaceManager ksm = createKSM();
+ StorageContainerManager scm;
+ KeySpaceManager ksm;
+ try {
+ scm = createSCM();
+ scm.start();
+ ksm = createKSM();
+ } catch (AuthenticationException ex) {
+ throw new IOException("Unable to build MiniOzoneCluster. ", ex);
+ }
+
ksm.start();
List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
hddsDatanodes.forEach((datanode) -> datanode.start(null));
@@ -318,7 +326,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
*
* @throws IOException
*/
- private StorageContainerManager createSCM() throws IOException {
+ private StorageContainerManager createSCM()
+ throws IOException, AuthenticationException {
configureSCM();
SCMStorage scmStore = new SCMStorage(conf);
scmStore.setClusterId(clusterId);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
new file mode 100644
index 0000000..9c430ad
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Properties;
+import java.util.UUID;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.server.SCMStorage;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.KerberosAuthException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test class to for security enabled Ozone cluster.
+ */
+@InterfaceAudience.Private
+public final class TestSecureOzoneCluster {
+
+ private Logger LOGGER = LoggerFactory
+ .getLogger(TestSecureOzoneCluster.class);
+
+ private MiniKdc miniKdc;
+ private OzoneConfiguration conf;
+ private File workDir;
+ private static Properties securityProperties;
+ private File scmKeytab;
+ private File spnegoKeytab;
+ private String curUser;
+
+ @Before
+ public void init() {
+ try {
+ conf = new OzoneConfiguration();
+ startMiniKdc();
+ setSecureConfig(conf);
+ createCredentialsInKDC(conf, miniKdc);
+ } catch (IOException e) {
+ LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
+ } catch (Exception e) {
+ LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
+ }
+ }
+
+ private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
+ throws Exception {
+ createPrincipal(scmKeytab,
+ conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
+ createPrincipal(spnegoKeytab,
+ conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+ }
+
+ private void createPrincipal(File keytab, String... principal)
+ throws Exception {
+ miniKdc.createPrincipal(keytab, principal);
+ }
+
+ private void startMiniKdc() throws Exception {
+ workDir = GenericTestUtils
+ .getTestDir(TestSecureOzoneCluster.class.getSimpleName());
+ securityProperties = MiniKdc.createConf();
+ miniKdc = new MiniKdc(securityProperties, workDir);
+ miniKdc.start();
+ }
+
+ private void setSecureConfig(Configuration conf) throws IOException {
+ conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+ String host = KerberosUtil.getLocalHostName();
+ String realm = miniKdc.getRealm();
+ curUser = UserGroupInformation.getCurrentUser()
+ .getUserName();
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
+ conf.set(OZONE_ADMINISTRATORS, curUser);
+
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ "scm/" + host + "@" + realm);
+ conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+ "HTTP_SCM/" + host + "@" + realm);
+
+ scmKeytab = new File(workDir, "scm.keytab");
+ spnegoKeytab = new File(workDir, "http.keytab");
+
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ scmKeytab.getAbsolutePath());
+ conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
+ spnegoKeytab.getAbsolutePath());
+
+ }
+
+ @Test
+ public void testSecureScmStartupSuccess() throws Exception {
+ final String path = GenericTestUtils
+ .getTempPath(UUID.randomUUID().toString());
+ Path scmPath = Paths.get(path, "scm-meta");
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+ conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+ SCMStorage scmStore = new SCMStorage(conf);
+ String clusterId = UUID.randomUUID().toString();
+ String scmId = UUID.randomUUID().toString();
+ scmStore.setClusterId(clusterId);
+ scmStore.setScmId(scmId);
+ // writes the version file properties
+ scmStore.initialize();
+ StorageContainerManager scm = StorageContainerManager.createSCM(null, conf);
+ //Reads the SCM Info from SCM instance
+ ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
+ Assert.assertEquals(clusterId, scmInfo.getClusterId());
+ Assert.assertEquals(scmId, scmInfo.getScmId());
+ }
+
+ @Test
+ public void testSecureScmStartupFailure() throws Exception {
+ final String path = GenericTestUtils
+ .getTempPath(UUID.randomUUID().toString());
+ Path scmPath = Paths.get(path, "scm-meta");
+
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+ conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+ conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ "scm@" + miniKdc.getRealm());
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "kerberos");
+
+ SCMStorage scmStore = new SCMStorage(conf);
+ String clusterId = UUID.randomUUID().toString();
+ String scmId = UUID.randomUUID().toString();
+ scmStore.setClusterId(clusterId);
+ scmStore.setScmId(scmId);
+ // writes the version file properties
+ scmStore.initialize();
+ LambdaTestUtils.intercept(IOException.class,
+ "Running in secure mode, but config doesn't have a keytab",
+ () -> {
+ StorageContainerManager.createSCM(null, conf);
+ });
+
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+ "scm/_HOST@EXAMPLE.com");
+ conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+ "/etc/security/keytabs/scm.keytab");
+
+ LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
+ + "to login: for principal:",
+ () -> {
+ StorageContainerManager.createSCM(null, conf);
+ });
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "OAuth2");
+
+ LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
+ + " attribute value for hadoop.security.authentication of OAuth2",
+ () -> {
+ StorageContainerManager.createSCM(null, conf);
+ });
+
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+ "KERBEROS_SSL");
+ LambdaTestUtils.intercept(AuthenticationException.class,
+ "KERBEROS_SSL authentication method not support.",
+ () -> {
+ StorageContainerManager.createSCM(null, conf);
+ });
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c6c19e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 0c1d8f2..17cfc22 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.junit.Rule;
import org.junit.Assert;
import org.junit.Test;
@@ -407,7 +408,8 @@ public class TestStorageContainerManager {
}
@Test
- public void testSCMInitializationFailure() throws IOException {
+ public void testSCMInitializationFailure()
+ throws IOException, AuthenticationException {
OzoneConfiguration conf = new OzoneConfiguration();
final String path =
GenericTestUtils.getTempPath(UUID.randomUUID().toString());
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[29/45] hadoop git commit: HADOOP-15548: Randomize local dirs.
Contributed by Jim Brennan.
Posted by xy...@apache.org.
HADOOP-15548: Randomize local dirs. Contributed by Jim Brennan.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d36f6b9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d36f6b9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d36f6b9e
Branch: refs/heads/HDDS-4
Commit: d36f6b9e93e4c30d24d0e837cb00bd24ffa8f274
Parents: 1004701
Author: Eric E Payne <er...@oath.com>
Authored: Fri Jun 29 18:18:32 2018 +0000
Committer: Eric E Payne <er...@oath.com>
Committed: Fri Jun 29 18:18:32 2018 +0000
----------------------------------------------------------------------
.../org/apache/hadoop/fs/LocalDirAllocator.java | 7 ++-
.../apache/hadoop/fs/TestLocalDirAllocator.java | 59 ++++++++++++++++++++
2 files changed, 65 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36f6b9e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
index c1e9d21..1c216f4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
@@ -418,7 +418,12 @@ public class LocalDirAllocator {
}
}
} else {
- int dirNum = ctx.getAndIncrDirNumLastAccessed();
+ // Start linear search with random increment if possible
+ int randomInc = 1;
+ if (numDirs > 2) {
+ randomInc += dirIndexRandomizer.nextInt(numDirs - 1);
+ }
+ int dirNum = ctx.getAndIncrDirNumLastAccessed(randomInc);
while (numDirsSearched < numDirs) {
long capacity = ctx.dirDF[dirNum].getAvailable();
if (capacity > size) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36f6b9e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
index 825efe0..acda898 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
@@ -265,6 +265,65 @@ public class TestLocalDirAllocator {
}
}
+ /**
+ * Five buffer dirs, on read-write disk.
+ *
+ * Try to create a whole bunch of files.
+ * Verify that each successive creation uses a different disk
+ * than the previous one (for sized requests).
+ *
+ * Would ideally check statistical properties of distribution, but
+ * we don't have the nerve to risk false-positives here.
+ *
+ * @throws Exception
+ */
+ @Test (timeout = 30000)
+ public void testCreateManyFilesRandom() throws Exception {
+ assumeNotWindows();
+ final int numDirs = 5;
+ final int numTries = 100;
+ String[] dirs = new String[numDirs];
+ for (int d = 0; d < numDirs; ++d) {
+ dirs[d] = buildBufferDir(ROOT, d);
+ }
+ boolean next_dir_not_selected_at_least_once = false;
+ try {
+ conf.set(CONTEXT, dirs[0] + "," + dirs[1] + "," + dirs[2] + ","
+ + dirs[3] + "," + dirs[4]);
+ Path[] paths = new Path[5];
+ for (int d = 0; d < numDirs; ++d) {
+ paths[d] = new Path(dirs[d]);
+ assertTrue(localFs.mkdirs(paths[d]));
+ }
+
+ int inDir=0;
+ int prevDir = -1;
+ int[] counts = new int[5];
+ for(int i = 0; i < numTries; ++i) {
+ File result = createTempFile(SMALL_FILE_SIZE);
+ for (int d = 0; d < numDirs; ++d) {
+ if (result.getPath().startsWith(paths[d].toUri().getPath())) {
+ inDir = d;
+ break;
+ }
+ }
+ // Verify we always select a different dir
+ assertNotEquals(prevDir, inDir);
+ // Verify we are not always selecting the next dir - that was the old
+ // algorithm.
+ if ((prevDir != -1) && (inDir != ((prevDir + 1) % numDirs))) {
+ next_dir_not_selected_at_least_once = true;
+ }
+ prevDir = inDir;
+ counts[inDir]++;
+ result.delete();
+ }
+ } finally {
+ rmBufferDirs();
+ }
+ assertTrue(next_dir_not_selected_at_least_once);
+ }
+
/** Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* getLocalPathForWrite with checkAccess set to false should create a parent
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[13/45] hadoop git commit: HDDS-170. Fix
TestBlockDeletingService#testBlockDeletionTimeout. Contributed by Lokesh
Jain.
Posted by xy...@apache.org.
HDDS-170. Fix TestBlockDeletingService#testBlockDeletionTimeout. Contributed by Lokesh Jain.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e305476
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e305476
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e305476
Branch: refs/heads/HDDS-4
Commit: 1e30547642c7c6c014745862dd06f90f091f90b6
Parents: e9ec3d7
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jun 27 13:56:45 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jun 27 13:56:45 2018 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/utils/BackgroundService.java | 2 +-
.../statemachine/background/BlockDeletingService.java | 8 ++++----
.../hadoop/ozone/container/ozoneimpl/OzoneContainer.java | 2 +-
.../testutils/BlockDeletingServiceTestImpl.java | 4 ++--
.../ozone/container/common/TestBlockDeletingService.java | 11 +++++++----
5 files changed, 15 insertions(+), 12 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e305476/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java
index 431da64..5718008 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java
@@ -126,7 +126,7 @@ public abstract class BackgroundService {
try {
// Collect task results
BackgroundTaskResult result = serviceTimeout > 0
- ? taskResultFuture.get(serviceTimeout, TimeUnit.MILLISECONDS)
+ ? taskResultFuture.get(serviceTimeout, unit)
: taskResultFuture.get();
if (LOG.isDebugEnabled()) {
LOG.debug("task execution result size {}", result.getSize());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e305476/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
index 63f57b4..bff5913 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
@@ -83,10 +83,10 @@ public class BlockDeletingService extends BackgroundService{
private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10;
public BlockDeletingService(ContainerManager containerManager,
- long serviceInterval, long serviceTimeout, Configuration conf) {
- super("BlockDeletingService", serviceInterval,
- TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE,
- serviceTimeout);
+ long serviceInterval, long serviceTimeout, TimeUnit unit,
+ Configuration conf) {
+ super("BlockDeletingService", serviceInterval, unit,
+ BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
this.containerManager = containerManager;
this.conf = conf;
this.blockLimitPerTask = conf.getInt(
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e305476/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 4156f5a..7931f6f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -122,7 +122,7 @@ public class OzoneContainer {
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
this.blockDeletingService = new BlockDeletingService(manager,
- svcInterval, serviceTimeout, ozoneConfig);
+ svcInterval, serviceTimeout, TimeUnit.MILLISECONDS, ozoneConfig);
this.dispatcher = new Dispatcher(manager, this.ozoneConfig);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e305476/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
index 86888aa..7c12945 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
@@ -44,8 +44,8 @@ public class BlockDeletingServiceTestImpl
public BlockDeletingServiceTestImpl(ContainerManager containerManager,
int serviceInterval, Configuration conf) {
- super(containerManager, serviceInterval,
- SERVICE_TIMEOUT_IN_MILLISECONDS, conf);
+ super(containerManager, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS,
+ TimeUnit.MILLISECONDS, conf);
}
@VisibleForTesting
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e305476/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 0686e4e..8d01c80 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -280,10 +280,11 @@ public class TestBlockDeletingService {
ContainerManager containerManager = createContainerManager(conf);
createToDeleteBlocks(containerManager, conf, 1, 3, 1, chunksDir);
- // set timeout value as 1ms to trigger timeout behavior
+ // set timeout value as 1ns to trigger timeout behavior
long timeout = 1;
- BlockDeletingService svc =
- new BlockDeletingService(containerManager, 1000, timeout, conf);
+ BlockDeletingService svc = new BlockDeletingService(containerManager,
+ TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS,
+ conf);
svc.start();
LogCapturer log = LogCapturer.captureLogs(BackgroundService.LOG);
@@ -303,7 +304,9 @@ public class TestBlockDeletingService {
// test for normal case that doesn't have timeout limitation
timeout = 0;
createToDeleteBlocks(containerManager, conf, 1, 3, 1, chunksDir);
- svc = new BlockDeletingService(containerManager, 1000, timeout, conf);
+ svc = new BlockDeletingService(containerManager,
+ TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS,
+ conf);
svc.start();
// get container meta data
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[12/45] hadoop git commit: HDDS-186. Create under replicated queue.
Contributed by Ajay Kumar.
Posted by xy...@apache.org.
HDDS-186. Create under replicated queue. Contributed by Ajay Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9ec3d78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9ec3d78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9ec3d78
Branch: refs/heads/HDDS-4
Commit: e9ec3d78f520a8543dc77d763d4b358aa608bae8
Parents: 56a4cdb
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed Jun 27 13:35:30 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Wed Jun 27 13:35:30 2018 -0700
----------------------------------------------------------------------
.../container/replication/ReplicationQueue.java | 76 +++++++++++
.../replication/ReplicationRequest.java | 106 +++++++++++++++
.../container/replication/package-info.java | 23 ++++
.../replication/TestReplicationQueue.java | 134 +++++++++++++++++++
.../container/replication/package-info.java | 23 ++++
5 files changed, 362 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9ec3d78/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
new file mode 100644
index 0000000..e0a2351
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationQueue.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.util.List;
+import java.util.PriorityQueue;
+import java.util.Queue;
+
+/**
+ * Priority queue to handle under-replicated and over replicated containers
+ * in ozone. ReplicationManager will consume these messages and decide
+ * accordingly.
+ */
+public class ReplicationQueue {
+
+ private final Queue<ReplicationRequest> queue;
+
+ ReplicationQueue() {
+ queue = new PriorityQueue<>();
+ }
+
+ public synchronized boolean add(ReplicationRequest repObj) {
+ if (this.queue.contains(repObj)) {
+ // Remove the earlier message and insert this one
+ this.queue.remove(repObj);
+ }
+ return this.queue.add(repObj);
+ }
+
+ public synchronized boolean remove(ReplicationRequest repObj) {
+ return queue.remove(repObj);
+ }
+
+ /**
+ * Retrieves, but does not remove, the head of this queue,
+ * or returns {@code null} if this queue is empty.
+ *
+ * @return the head of this queue, or {@code null} if this queue is empty
+ */
+ public synchronized ReplicationRequest peek() {
+ return queue.peek();
+ }
+
+ /**
+ * Retrieves and removes the head of this queue,
+ * or returns {@code null} if this queue is empty.
+ *
+ * @return the head of this queue, or {@code null} if this queue is empty
+ */
+ public synchronized ReplicationRequest poll() {
+ return queue.poll();
+ }
+
+ public synchronized boolean removeAll(List<ReplicationRequest> repObjs) {
+ return queue.removeAll(repObjs);
+ }
+
+ public int size() {
+ return queue.size();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9ec3d78/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java
new file mode 100644
index 0000000..a6ccce1
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationRequest.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.io.Serializable;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+
+/**
+ * Wrapper class for hdds replication queue. Implements its natural
+ * ordering for priority queue.
+ */
+public class ReplicationRequest implements Comparable<ReplicationRequest>,
+ Serializable {
+ private final long containerId;
+ private final short replicationCount;
+ private final short expecReplicationCount;
+ private final long timestamp;
+
+ public ReplicationRequest(long containerId, short replicationCount,
+ long timestamp, short expecReplicationCount) {
+ this.containerId = containerId;
+ this.replicationCount = replicationCount;
+ this.timestamp = timestamp;
+ this.expecReplicationCount = expecReplicationCount;
+ }
+
+ /**
+ * Compares this object with the specified object for order. Returns a
+ * negative integer, zero, or a positive integer as this object is less
+ * than, equal to, or greater than the specified object.
+ * @param o the object to be compared.
+ * @return a negative integer, zero, or a positive integer as this object
+ * is less than, equal to, or greater than the specified object.
+ * @throws NullPointerException if the specified object is null
+ * @throws ClassCastException if the specified object's type prevents it
+ * from being compared to this object.
+ */
+ @Override
+ public int compareTo(ReplicationRequest o) {
+ if (o == null) {
+ return 1;
+ }
+ if (this == o) {
+ return 0;
+ }
+ int retVal = Integer
+ .compare(getReplicationCount() - getExpecReplicationCount(),
+ o.getReplicationCount() - o.getExpecReplicationCount());
+ if (retVal != 0) {
+ return retVal;
+ }
+ return Long.compare(getTimestamp(), o.getTimestamp());
+ }
+
+ @Override
+ public int hashCode() {
+ return new HashCodeBuilder(91, 1011)
+ .append(getContainerId())
+ .toHashCode();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ ReplicationRequest that = (ReplicationRequest) o;
+ return new EqualsBuilder().append(getContainerId(), that.getContainerId())
+ .isEquals();
+ }
+
+ public long getContainerId() {
+ return containerId;
+ }
+
+ public short getReplicationCount() {
+ return replicationCount;
+ }
+
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ public short getExpecReplicationCount() {
+ return expecReplicationCount;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9ec3d78/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
new file mode 100644
index 0000000..7f335e3
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.replication;
+
+/**
+ * Ozone Container replicaton related classes.
+ */
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9ec3d78/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
new file mode 100644
index 0000000..6d74c68
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationQueue.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.util.Random;
+import java.util.UUID;
+import org.apache.hadoop.util.Time;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test class for ReplicationQueue.
+ */
+public class TestReplicationQueue {
+
+ private ReplicationQueue replicationQueue;
+ private Random random;
+
+ @Before
+ public void setUp() {
+ replicationQueue = new ReplicationQueue();
+ random = new Random();
+ }
+
+ @Test
+ public void testDuplicateAddOp() {
+ long contId = random.nextLong();
+ String nodeId = UUID.randomUUID().toString();
+ ReplicationRequest obj1, obj2, obj3;
+ long time = Time.monotonicNow();
+ obj1 = new ReplicationRequest(contId, (short) 2, time, (short) 3);
+ obj2 = new ReplicationRequest(contId, (short) 2, time + 1, (short) 3);
+ obj3 = new ReplicationRequest(contId, (short) 1, time+2, (short) 3);
+
+ replicationQueue.add(obj1);
+ replicationQueue.add(obj2);
+ replicationQueue.add(obj3);
+ Assert.assertEquals("Should add only 1 msg as second one is duplicate",
+ 1, replicationQueue.size());
+ ReplicationRequest temp = replicationQueue.poll();
+ Assert.assertEquals(temp, obj3);
+ }
+
+ @Test
+ public void testPollOp() {
+ long contId = random.nextLong();
+ String nodeId = UUID.randomUUID().toString();
+ ReplicationRequest msg1, msg2, msg3, msg4, msg5;
+ msg1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(),
+ (short) 3);
+ long time = Time.monotonicNow();
+ msg2 = new ReplicationRequest(contId + 1, (short) 4, time, (short) 3);
+ msg3 = new ReplicationRequest(contId + 2, (short) 0, time, (short) 3);
+ msg4 = new ReplicationRequest(contId, (short) 2, time, (short) 3);
+ // Replication message for same container but different nodeId
+ msg5 = new ReplicationRequest(contId + 1, (short) 2, time, (short) 3);
+
+ replicationQueue.add(msg1);
+ replicationQueue.add(msg2);
+ replicationQueue.add(msg3);
+ replicationQueue.add(msg4);
+ replicationQueue.add(msg5);
+ Assert.assertEquals("Should have 3 objects",
+ 3, replicationQueue.size());
+
+ // Since Priority queue orders messages according to replication count,
+ // message with lowest replication should be first
+ ReplicationRequest temp;
+ temp = replicationQueue.poll();
+ Assert.assertEquals("Should have 2 objects",
+ 2, replicationQueue.size());
+ Assert.assertEquals(temp, msg3);
+
+ temp = replicationQueue.poll();
+ Assert.assertEquals("Should have 1 objects",
+ 1, replicationQueue.size());
+ Assert.assertEquals(temp, msg5);
+
+ // Message 2 should be ordered before message 5 as both have same replication
+ // number but message 2 has earlier timestamp.
+ temp = replicationQueue.poll();
+ Assert.assertEquals("Should have 0 objects",
+ replicationQueue.size(), 0);
+ Assert.assertEquals(temp, msg4);
+ }
+
+ @Test
+ public void testRemoveOp() {
+ long contId = random.nextLong();
+ String nodeId = UUID.randomUUID().toString();
+ ReplicationRequest obj1, obj2, obj3;
+ obj1 = new ReplicationRequest(contId, (short) 1, Time.monotonicNow(),
+ (short) 3);
+ obj2 = new ReplicationRequest(contId + 1, (short) 2, Time.monotonicNow(),
+ (short) 3);
+ obj3 = new ReplicationRequest(contId + 2, (short) 3, Time.monotonicNow(),
+ (short) 3);
+
+ replicationQueue.add(obj1);
+ replicationQueue.add(obj2);
+ replicationQueue.add(obj3);
+ Assert.assertEquals("Should have 3 objects",
+ 3, replicationQueue.size());
+
+ replicationQueue.remove(obj3);
+ Assert.assertEquals("Should have 2 objects",
+ 2, replicationQueue.size());
+
+ replicationQueue.remove(obj2);
+ Assert.assertEquals("Should have 1 objects",
+ 1, replicationQueue.size());
+
+ replicationQueue.remove(obj1);
+ Assert.assertEquals("Should have 0 objects",
+ 0, replicationQueue.size());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9ec3d78/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
new file mode 100644
index 0000000..5b1fd0f
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * SCM Testing and Mocking Utils.
+ */
+package org.apache.hadoop.ozone.container.replication;
+// Test classes for Replication functionality.
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org
[19/45] hadoop git commit: HDDS-195. Create generic CommandWatcher
utility. Contributed by Elek, Marton.
Posted by xy...@apache.org.
HDDS-195. Create generic CommandWatcher utility.
Contributed by Elek, Marton.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85627e2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85627e2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85627e2c
Branch: refs/heads/HDDS-4
Commit: 85627e2cba91a61d675d20cdb35e188c6c81e3f2
Parents: ddbff7c
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Jun 27 23:31:32 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Jun 27 23:39:33 2018 -0700
----------------------------------------------------------------------
.../hadoop/hdds/server/events/EventWatcher.java | 157 +++++++++++++
.../server/events/IdentifiableEventPayload.java | 30 +++
.../hdds/server/events/EventHandlerStub.java | 38 ++++
.../hdds/server/events/TestEventWatcher.java | 220 +++++++++++++++++++
4 files changed, 445 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/85627e2c/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
new file mode 100644
index 0000000..19fddde
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
@@ -0,0 +1,157 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.ozone.lease.Lease;
+import org.apache.hadoop.ozone.lease.LeaseAlreadyExistException;
+import org.apache.hadoop.ozone.lease.LeaseExpiredException;
+import org.apache.hadoop.ozone.lease.LeaseManager;
+import org.apache.hadoop.ozone.lease.LeaseNotFoundException;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Event watcher the (re)send a message after timeout.
+ * <p>
+ * Event watcher will send the tracked payload/event after a timeout period
+ * unless a confirmation from the original event (completion event) is arrived.
+ *
+ * @param <TIMEOUT_PAYLOAD> The type of the events which are tracked.
+ * @param <COMPLETION_PAYLOAD> The type of event which could cancel the
+ * tracking.
+ */
+@SuppressWarnings("CheckStyle")
+public abstract class EventWatcher<TIMEOUT_PAYLOAD extends
+ IdentifiableEventPayload,
+ COMPLETION_PAYLOAD extends IdentifiableEventPayload> {
+
+ private static final Logger LOG = LoggerFactory.getLogger(EventWatcher.class);
+
+ private final Event<TIMEOUT_PAYLOAD> startEvent;
+
+ private final Event<COMPLETION_PAYLOAD> completionEvent;
+
+ private final LeaseManager<UUID> leaseManager;
+
+ protected final Map<UUID, TIMEOUT_PAYLOAD> trackedEventsByUUID =
+ new ConcurrentHashMap<>();
+
+ protected final Set<TIMEOUT_PAYLOAD> trackedEvents = new HashSet<>();
+
+ public EventWatcher(Event<TIMEOUT_PAYLOAD> startEvent,
+ Event<COMPLETION_PAYLOAD> completionEvent,
+ LeaseManager<UUID> leaseManager) {
+ this.startEvent = startEvent;
+ this.completionEvent = completionEvent;
+ this.leaseManager = leaseManager;
+
+ }
+
+ public void start(EventQueue queue) {
+
+ queue.addHandler(startEvent, this::handleStartEvent);
+
+ queue.addHandler(completionEvent, (completionPayload, publisher) -> {
+ UUID uuid = completionPayload.getUUID();
+ try {
+ handleCompletion(uuid, publisher);
+ } catch (LeaseNotFoundException e) {
+ //It's already done. Too late, we already retried it.
+ //Not a real problem.
+ LOG.warn("Completion event without active lease. UUID={}", uuid);
+ }
+ });
+
+ }
+
+ private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload,
+ EventPublisher publisher) {
+ UUID identifier = payload.getUUID();
+ trackedEventsByUUID.put(identifier, payload);
+ trackedEvents.add(payload);
+ try {
+ Lease<UUID> lease = leaseManager.acquire(identifier);
+ try {
+ lease.registerCallBack(() -> {
+ handleTimeout(publisher, identifier);
+ return null;
+ });
+
+ } catch (LeaseExpiredException e) {
+ handleTimeout(publisher, identifier);
+ }
+ } catch (LeaseAlreadyExistException e) {
+ //No problem at all. But timer is not reset.
+ }
+ }
+
+ private synchronized void handleCompletion(UUID uuid,
+ EventPublisher publisher) throws LeaseNotFoundException {
+ leaseManager.release(uuid);
+ TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(uuid);
+ trackedEvents.remove(payload);
+ onFinished(publisher, payload);
+ }
+
+ private synchronized void handleTimeout(EventPublisher publisher,
+ UUID identifier) {
+ TIMEOUT_PAYLOAD payload = trackedEventsByUUID.remove(identifier);
+ trackedEvents.remove(payload);
+ onTimeout(publisher, payload);
+ }
+
+
+ /**
+ * Check if a specific payload is in-progress.
+ */
+ public synchronized boolean contains(TIMEOUT_PAYLOAD payload) {
+ return trackedEvents.contains(payload);
+ }
+
+ public synchronized boolean remove(TIMEOUT_PAYLOAD payload) {
+ try {
+ leaseManager.release(payload.getUUID());
+ } catch (LeaseNotFoundException e) {
+ LOG.warn("Completion event without active lease. UUID={}",
+ payload.getUUID());
+ }
+ trackedEventsByUUID.remove(payload.getUUID());
+ return trackedEvents.remove(payload);
+
+ }
+
+ abstract void onTimeout(EventPublisher publisher, TIMEOUT_PAYLOAD payload);
+
+ abstract void onFinished(EventPublisher publisher, TIMEOUT_PAYLOAD payload);
+
+ public List<TIMEOUT_PAYLOAD> getTimeoutEvents(
+ Predicate<? super TIMEOUT_PAYLOAD> predicate) {
+ return trackedEventsByUUID.values().stream().filter(predicate)
+ .collect(Collectors.toList());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/85627e2c/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
new file mode 100644
index 0000000..e73e30f
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import java.util.UUID;
+
+/**
+ * Event with an additional unique identifier.
+ *
+ */
+public interface IdentifiableEventPayload {
+
+ UUID getUUID();
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/85627e2c/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
new file mode 100644
index 0000000..3f34a70
--- /dev/null
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Dummy class for testing to collect all the received events.
+ */
+public class EventHandlerStub<PAYLOAD> implements EventHandler<PAYLOAD> {
+
+ private List<PAYLOAD> receivedEvents = new ArrayList<>();
+
+ @Override
+ public void onMessage(PAYLOAD payload, EventPublisher publisher) {
+ receivedEvents.add(payload);
+ }
+
+ public List<PAYLOAD> getReceivedEvents() {
+ return receivedEvents;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/85627e2c/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
new file mode 100644
index 0000000..1731350
--- /dev/null
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.server.events;
+
+import java.util.List;
+import java.util.Objects;
+import java.util.UUID;
+
+import org.apache.hadoop.ozone.lease.LeaseManager;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test the basic functionality of event watcher.
+ */
+public class TestEventWatcher {
+
+ private static final TypedEvent<UnderreplicatedEvent> WATCH_UNDER_REPLICATED =
+ new TypedEvent<>(UnderreplicatedEvent.class);
+
+ private static final TypedEvent<UnderreplicatedEvent> UNDER_REPLICATED =
+ new TypedEvent<>(UnderreplicatedEvent.class);
+
+ private static final TypedEvent<ReplicationCompletedEvent>
+ REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class);
+
+ LeaseManager<UUID> leaseManager;
+
+ @Before
+ public void startLeaseManager() {
+ leaseManager = new LeaseManager<>(2000l);
+ leaseManager.start();
+ }
+
+ @After
+ public void stopLeaseManager() {
+ leaseManager.shutdown();
+ }
+
+
+ @Test
+ public void testEventHandling() throws InterruptedException {
+
+ EventQueue queue = new EventQueue();
+
+ EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
+ replicationWatcher = createEventWatcher();
+
+ EventHandlerStub<UnderreplicatedEvent> underReplicatedEvents =
+ new EventHandlerStub<>();
+
+ queue.addHandler(UNDER_REPLICATED, underReplicatedEvents);
+
+ replicationWatcher.start(queue);
+
+ UUID uuid1 = UUID.randomUUID();
+ UUID uuid2 = UUID.randomUUID();
+
+ queue.fireEvent(WATCH_UNDER_REPLICATED,
+ new UnderreplicatedEvent(uuid1, "C1"));
+
+ queue.fireEvent(WATCH_UNDER_REPLICATED,
+ new UnderreplicatedEvent(uuid2, "C2"));
+
+ Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size());
+
+ Thread.sleep(1000);
+
+ queue.fireEvent(REPLICATION_COMPLETED,
+ new ReplicationCompletedEvent(uuid1, "C2", "D1"));
+
+ Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size());
+
+ Thread.sleep(1500);
+
+ queue.processAll(1000L);
+
+ Assert.assertEquals(1, underReplicatedEvents.getReceivedEvents().size());
+ Assert.assertEquals(uuid2,
+ underReplicatedEvents.getReceivedEvents().get(0).UUID);
+
+ }
+
+ @Test
+ public void testInprogressFilter() throws InterruptedException {
+
+ EventQueue queue = new EventQueue();
+
+ EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
+ replicationWatcher = createEventWatcher();
+
+ EventHandlerStub<UnderreplicatedEvent> underReplicatedEvents =
+ new EventHandlerStub<>();
+
+ queue.addHandler(UNDER_REPLICATED, underReplicatedEvents);
+
+ replicationWatcher.start(queue);
+
+ UnderreplicatedEvent event1 =
+ new UnderreplicatedEvent(UUID.randomUUID(), "C1");
+
+ queue.fireEvent(WATCH_UNDER_REPLICATED, event1);
+
+ queue.fireEvent(WATCH_UNDER_REPLICATED,
+ new UnderreplicatedEvent(UUID.randomUUID(), "C2"));
+
+ queue.fireEvent(WATCH_UNDER_REPLICATED,
+ new UnderreplicatedEvent(UUID.randomUUID(), "C1"));
+
+ queue.processAll(1000L);
+ Thread.sleep(1000L);
+ List<UnderreplicatedEvent> c1todo = replicationWatcher
+ .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1"));
+
+ Assert.assertEquals(2, c1todo.size());
+ Assert.assertTrue(replicationWatcher.contains(event1));
+ Thread.sleep(1500L);
+
+ c1todo = replicationWatcher
+ .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1"));
+ Assert.assertEquals(0, c1todo.size());
+ Assert.assertFalse(replicationWatcher.contains(event1));
+
+
+ }
+
+ private EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>
+ createEventWatcher() {
+ return new EventWatcher<UnderreplicatedEvent, ReplicationCompletedEvent>(
+ WATCH_UNDER_REPLICATED, REPLICATION_COMPLETED, leaseManager) {
+
+ @Override
+ void onTimeout(EventPublisher publisher, UnderreplicatedEvent payload) {
+ publisher.fireEvent(UNDER_REPLICATED, payload);
+ }
+
+ @Override
+ void onFinished(EventPublisher publisher, UnderreplicatedEvent payload) {
+ //Good job. We did it.
+ }
+ };
+ }
+
+ private static class ReplicationCompletedEvent
+ implements IdentifiableEventPayload {
+
+ private final UUID UUID;
+
+ private final String containerId;
+
+ private final String datanodeId;
+
+ public ReplicationCompletedEvent(UUID UUID, String containerId,
+ String datanodeId) {
+ this.UUID = UUID;
+ this.containerId = containerId;
+ this.datanodeId = datanodeId;
+ }
+
+ public UUID getUUID() {
+ return UUID;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ ReplicationCompletedEvent that = (ReplicationCompletedEvent) o;
+ return Objects.equals(containerId, that.containerId) && Objects
+ .equals(datanodeId, that.datanodeId);
+ }
+
+ @Override
+ public int hashCode() {
+
+ return Objects.hash(containerId, datanodeId);
+ }
+ }
+
+ private static class UnderreplicatedEvent
+
+ implements IdentifiableEventPayload {
+
+ private final UUID UUID;
+
+ private final String containerId;
+
+ public UnderreplicatedEvent(UUID UUID, String containerId) {
+ this.containerId = containerId;
+ this.UUID = UUID;
+ }
+
+ public UUID getUUID() {
+ return UUID;
+ }
+ }
+
+}
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org