You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by bh...@apache.org on 2018/09/24 17:10:45 UTC
[2/4] hadoop git commit: HDDS-447. Separate ozone-dist and
hadoop-dist projects with real classpath separation. Contributed by Elek
Marton.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md b/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md
new file mode 100644
index 0000000..527ff41
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md
@@ -0,0 +1,73 @@
+<!---
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+# Compose files for local performance tests
+
+This directory contains docker-compose definition for an ozone cluster where
+all the metrics are saved to a prometheus instance.
+
+ Prometheus follows a pull based approach where the metrics are published
+ on a HTTP endpoint.
+
+ Our current approach:
+
+ 1. A Java agent activates a prometheus metrics endpoint in every JVM instance
+ (use `init.sh` to download the agent)
+
+ 2. The Java agent publishes all the jmx parameters in prometheus format AND
+ register the endpoint address to the consul.
+
+ 3. Prometheus polls all the endpoints which are registered to consul.
+
+
+
+## How to use
+
+First of all download the required Java agent with running `./init.sh`
+
+After that you can start the cluster with docker-compose:
+
+```
+docker-compose up -d
+```
+
+After a while the cluster will be started. You can check the ozone web ui-s:
+
+https://localhost:9874
+https://localhost:9876
+
+You can also scale up the datanodes:
+
+```
+docker-compose scale datanode=3
+```
+
+Freon (Ozone test generator tool) is not part of docker-compose by default,
+you can activate it using `compose-all.sh` instead of `docker-compose`:
+
+```
+compose-all.sh up -d
+```
+
+Now Freon is running. Let's try to check the metrics from the local Prometheus:
+
+http://localhost:9090/graph
+
+Example queries:
+
+```
+Hadoop_OzoneManager_NumKeyCommits
+rate(Hadoop_OzoneManager_NumKeyCommits[10m])
+rate(Hadoop_Ozone_BYTES_WRITTEN[10m])
+```
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozoneperf/compose-all.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/compose-all.sh b/hadoop-ozone/dist/src/main/compose/ozoneperf/compose-all.sh
new file mode 100755
index 0000000..82ab8b3
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/compose-all.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+docker-compose -f docker-compose.yaml -f docker-compose-freon.yaml "$@"
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose-freon.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose-freon.yaml b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose-freon.yaml
new file mode 100644
index 0000000..60bdc4a
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose-freon.yaml
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+ freon:
+ image: apache/hadoop-runner
+ volumes:
+ - ../../ozone:/opt/hadoop
+ - ./jmxpromo.jar:/opt/jmxpromo.jar
+ env_file:
+ - ./docker-config
+ command: ["/opt/hadoop/bin/ozone","freon"]
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml
new file mode 100644
index 0000000..12b28bb
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml
@@ -0,0 +1,64 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+ datanode:
+ image: apache/hadoop-runner
+ volumes:
+ - ../..:/opt/hadoop
+ - ./jmxpromo.jar:/opt/jmxpromo.jar
+ ports:
+ - 9864
+ command: ["/opt/hadoop/bin/ozone","datanode"]
+ env_file:
+ - ./docker-config
+ ozoneManager:
+ image: apache/hadoop-runner
+ volumes:
+ - ../..:/opt/hadoop
+ - ./jmxpromo.jar:/opt/jmxpromo.jar
+ ports:
+ - 9874:9874
+ environment:
+ ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
+ env_file:
+ - ./docker-config
+ command: ["/opt/hadoop/bin/ozone","om"]
+ scm:
+ image: apache/hadoop-runner
+ volumes:
+ - ../..:/opt/hadoop
+ - ./jmxpromo.jar:/opt/jmxpromo.jar
+ ports:
+ - 9876:9876
+ env_file:
+ - ./docker-config
+ environment:
+ ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
+ command: ["/opt/hadoop/bin/ozone","scm"]
+ consul:
+ image: consul
+ command: ["agent", "-dev", "-ui", "-client", "0.0.0.0"]
+ ports:
+ - 8500:8500
+ prometheus:
+ image: prom/prometheus
+ volumes:
+ - "./prometheus.yml:/etc/prometheus.yml"
+ command: ["--config.file","/etc/prometheus.yml"]
+ ports:
+ - 9090:9090
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
new file mode 100644
index 0000000..1ed1169
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+OZONE-SITE.XML_ozone.om.address=ozoneManager
+OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874
+OZONE-SITE.XML_ozone.scm.names=scm
+OZONE-SITE.XML_ozone.enabled=True
+OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
+OZONE-SITE.XML_ozone.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
+OZONE-SITE.XML_ozone.handler.type=distributed
+OZONE-SITE.XML_ozone.scm.client.address=scm
+HDFS-SITE.XML_rpc.metrics.quantile.enable=true
+HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
+LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
+LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
+HADOOP_OPTS=-javaagent:/opt/jmxpromo.jar=port=0:consulHost=consul:consulMode=node
+LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
+LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozoneperf/init.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/init.sh b/hadoop-ozone/dist/src/main/compose/ozoneperf/init.sh
new file mode 100755
index 0000000..cf25398
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/init.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+EXPORTER_FILE="$DIR/jmxpromo.jar"
+if [ ! -f "$EXPORTER_FILE" ]; then
+ wget https://github.com/flokkr/jmxpromo/releases/download/0.11/jmx_prometheus_javaagent-0.11.jar -O $EXPORTER_FILE
+fi
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozoneperf/prometheus.yml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/prometheus.yml b/hadoop-ozone/dist/src/main/compose/ozoneperf/prometheus.yml
new file mode 100644
index 0000000..80aa520
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozoneperf/prometheus.yml
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+global:
+ scrape_interval: 15s # By default, scrape targets every 15 seconds.
+
+scrape_configs:
+ - job_name: jmxexporter
+ consul_sd_configs:
+ - server: consul:8500
+ services:
+ - jmxexporter
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys
new file mode 100644
index 0000000..ae39052
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgEmLpYm4BrWtq1KG9hhZXCZgGrETntu0eNTo21U3VKc9nH9/ot7M6lAawsFcT9uXu4b58PTlnfvwH/TATlCFjC8n0Z7SOx+FU6L3Sn8URh9HaX4L0tF8u87oCAD4dBrUGhhB36eiuH9dBBWly6RKffYJvrjatbc7GxBO/e5OSUMtqk/DSVKksmBhZxutrKivCNjDish9ViGIf8b5yS/MlEGmaVKApik1fJ5iOlloM/GgpB60YV/hbqfCecbWgeiM1gK92gdOcA/Wx1C7fj8BSI5iDSE6eZeF80gM3421lvyPDWyVhFaGbka4rXBX/fb9QSRBA9RTqhRKAEmAIf49H hadoop@cdae967fa87a
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config
new file mode 100644
index 0000000..6506916
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+Host *
+ UserKnownHostsFile /dev/null
+ StrictHostKeyChecking no
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment
new file mode 100644
index 0000000..5685453
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa
new file mode 100644
index 0000000..6632ce5
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEA4BJi6WJuAa1ratShvYYWVwmYBqxE57btHjU6NtVN1SnPZx/f
+6LezOpQGsLBXE/bl7uG+fD05Z378B/0wE5QhYwvJ9Ge0jsfhVOi90p/FEYfR2l+C
+9LRfLvO6AgA+HQa1BoYQd+norh/XQQVpcukSn32Cb642rW3OxsQTv3uTklDLapPw
+0lSpLJgYWcbrayorwjYw4rIfVYhiH/G+ckvzJRBpmlSgKYpNXyeYjpZaDPxoKQet
+GFf4W6nwnnG1oHojNYCvdoHTnAP1sdQu34/AUiOYg0hOnmXhfNIDN+NtZb8jw1sl
+YRWhm5GuK1wV/32/UEkQQPUU6oUSgBJgCH+PRwIDAQABAoIBAQDI1TH6ZNKchkck
+9XgSWsBjOqIcOQN5fCeDT8nho8WjLVpL3/Hcr+ngsxRcAXHK3xyvw33r9SQic1qJ
+/pC8u6RBFivo95qJ7vU0GXcp9TG4yLd6tui1U4WMm784U+dYNM7EDh1snSaECt3v
+1V3yNJ0QfnoOh2NShn0zAkOA+M4H8Nx2SudMCsjcbK9+fYxzW3hX+sJpMKdjG1HW
+DUz+I7cW7t0EGaVrgVSV+eR58LiXu+14YDNMrySiejB4nD2sKrx93XgiCBECCsBN
+GLQGJCztaXoAY+5Kf/aJ9EEf2wBF3GecRk+XIAd87PeDmeahLQAVkAJ/rD1vsKFs
+8kWi6CrhAoGBAP7leG/dMBhlfvROpBddIfWm2i8B+oZiAlSjdYGz+/ZhUaByXk18
+pupMGiiMQR1ziPnEg0gNgR2ZkH54qrXPn5WcQa4rlSEtUsZEp5v5WblhfX2QwKzY
+G/uhA+mB7wXpQkSmXo0LclfPF2teROQrG1OyfWkWbxFH4i3+em7sL95jAoGBAOEK
+v+wscqkMLW7Q8ONbWMCCBlmMHr6baB3VDCYZx25lr+GIF5zmJJFTmF2rq2VSAlts
+qx1AGmaUSo78kC5FuJvSNTL6a1Us5ucdthQZM3N8pAz+OAE+QEU+BsdA27yAh3tO
+yKDsMFNHKtXcgy5LeB5gzENLlNyw2jgkRv2Ef77NAoGAVH8DHqoHEH9Mx3XuRWR1
+JnaqKx0PzE5fEWmiQV3Fr5XxNivTgQJKXq7dmQVtbHLpPErdbhwz6fkHAjXD+UMb
+VsAWscL2y6m3n8wQd87/5EkiDWbXyDRXimGE53pQHviFJDa2bzEVNXCMBeaZFb4I
+cAViN1zdcrAOlUqfkXewIpsCgYB8wsXl/DpRB+RENGfn0+OfTjaQ/IKq72NIbq1+
+jfondQ6N/TICFQEe5HZrL9okoNOXteYjoD9CsWGoZdLVJGgVUvOVYImSvgMBDFK+
+T75bfzU/0sxfvBBLkviVDJsFpUf3D5VgybB86s6Po+HCD6r3RHjZshRESXOhflMx
+B3z+3QKBgE2Lwo0DuwUGkm6k8psyn3x8EiXNsiNw12cojicFTyKUYLHxMBeVbCLW
+3j3pxSggJgRuBLLzixUHbHp91r2ARTy28naK7R/la8yKVqK6ojcikN2mQsCHYtwB
+nuFwXr42ytn6G+9Wn4xT64tGjRCqyZn0/v0XsPjVCyrZ6G7EtNHP
+-----END RSA PRIVATE KEY-----
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub
new file mode 100644
index 0000000..ae39052
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub
@@ -0,0 +1,16 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgEmLpYm4BrWtq1KG9hhZXCZgGrETntu0eNTo21U3VKc9nH9/ot7M6lAawsFcT9uXu4b58PTlnfvwH/TATlCFjC8n0Z7SOx+FU6L3Sn8URh9HaX4L0tF8u87oCAD4dBrUGhhB36eiuH9dBBWly6RKffYJvrjatbc7GxBO/e5OSUMtqk/DSVKksmBhZxutrKivCNjDish9ViGIf8b5yS/MlEGmaVKApik1fJ5iOlloM/GgpB60YV/hbqfCecbWgeiM1gK92gdOcA/Wx1C7fj8BSI5iDSE6eZeF80gM3421lvyPDWyVhFaGbka4rXBX/fb9QSRBA9RTqhRKAEmAIf49H hadoop@cdae967fa87a
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
new file mode 100644
index 0000000..3bdcb0c
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+FROM apache/hadoop-runner
+RUN sudo apt-get update && sudo apt-get install -y openssh-server
+
+RUN sudo mkdir -p /run/sshd
+RUN sudo sed -i "s/.*UsePrivilegeSeparation.*/UsePrivilegeSeparation no/g" /etc/ssh/sshd_config
+RUN sudo sed -i "s/.*PermitUserEnvironment.*/PermitUserEnvironment yes/g" /etc/ssh/sshd_config
+RUN sudo sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
+
+#/opt/hadoop is mounted, we can't use it as a home
+RUN sudo usermod -d /opt hadoop
+ADD .ssh /opt/.ssh
+RUN sudo chown -R hadoop /opt/.ssh
+RUN sudo chown hadoop /opt
+RUN sudo chmod 600 /opt/.ssh/*
+RUN sudo chmod 700 /opt/.ssh
+
+RUN sudo sh -c 'echo "export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64/" >> /etc/profile'
+CMD ["sudo","/usr/sbin/sshd","-D"]
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md b/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md
new file mode 100644
index 0000000..2531fa4
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md
@@ -0,0 +1,38 @@
+<!---
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+# start-ozone environment
+
+This is an example environment to use/test `./sbin/start-ozone.sh` and `./sbin/stop-ozone.sh` scripts.
+
+There are ssh connections between the containers and the start/stop scripts could handle the start/stop process
+similar to a real cluster.
+
+To use it, first start the cluster:
+
+```
+docker-copmose up -d
+```
+
+After a successfull startup (which starts only the ssh daemons) you can start ozone:
+
+```
+./start.sh
+```
+
+Check it the java processes are started:
+
+```
+./ps.sh
+```
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml
new file mode 100644
index 0000000..62f1163
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+ datanode:
+ build: .
+ volumes:
+ - ../..:/opt/hadoop
+ ports:
+ - 9864
+ env_file:
+ - ./docker-config
+ om:
+ build: .
+ volumes:
+ - ../..:/opt/hadoop
+ ports:
+ - 9874:9874
+ env_file:
+ - ./docker-config
+ scm:
+ build: .
+ volumes:
+ - ../..:/opt/hadoop
+ ports:
+ - 9876:9876
+ env_file:
+ - ./docker-config
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config
new file mode 100644
index 0000000..1afec73
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config
@@ -0,0 +1,38 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64/
+CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000
+OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.scm.names=scm
+OZONE-SITE.XML_ozone.enabled=true
+OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id
+OZONE-SITE.XML_ozone.om.address=om
+OZONE-SITE.XML_ozone.om.http-address=om:9874
+OZONE-SITE.XML_ozone.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
+OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
+HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
+HDFS-SITE.XML_rpc.metrics.quantile.enable=true
+HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
+LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
+LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR, stdout
+LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh
new file mode 100755
index 0000000..d5e2c38
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+docker-compose ps -q | xargs -n1 -I CONTAINER docker exec CONTAINER ps xa
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
new file mode 100755
index 0000000..3358b07
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+docker-compose ps | grep datanode | awk '{print $1}' | xargs -n1 docker inspect --format '{{ .Config.Hostname }}' > ../../etc/hadoop/workers
+docker-compose exec scm /opt/hadoop/bin/ozone scm -init
+docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh
+#We need a running SCM for om objectstore creation
+#TODO create a utility to wait for the startup
+sleep 10
+docker-compose exec om /opt/hadoop/bin/ozone om -createObjectStore
+docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh
new file mode 100755
index 0000000..a3ce08a
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+docker-compose exec scm /opt/hadoop/sbin/stop-ozone.sh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/ozone/README.txt
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/ozone/README.txt b/hadoop-ozone/dist/src/main/ozone/README.txt
new file mode 100644
index 0000000..6bbd83f
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/ozone/README.txt
@@ -0,0 +1,51 @@
+<!---
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+This is the distribution of Apache Hadoop Ozone.
+
+Ozone is a submodule of Hadoop with separated release cycle. For more information, check
+
+ http://ozone.hadoop.apache.org
+
+ and
+
+ https://cwiki.apache.org/confluence/display/HADOOP/Ozone+Contributor+Guide
+
+For more information about Hadoop, check:
+
+ http://hadoop.apache.org
+
+This distribution includes cryptographic software. The country in
+which you currently reside may have restrictions on the import,
+possession, use, and/or re-export to another country, of
+encryption software. BEFORE using any encryption software, please
+check your country's laws, regulations and policies concerning the
+import, possession, or use, and re-export of encryption software, to
+see if this is permitted. See <http://www.wassenaar.org/> for more
+information.
+
+The U.S. Government Department of Commerce, Bureau of Industry and
+Security (BIS), has classified this software as Export Commodity
+Control Number (ECCN) 5D002.C.1, which includes information security
+software using or performing cryptographic functions with asymmetric
+algorithms. The form and manner of this Apache Software Foundation
+distribution makes it eligible for export under the License Exception
+ENC Technology Software Unrestricted (TSU) exception (see the BIS
+Export Administration Regulations, Section 740.13) for both object
+code and source code.
+
+The following provides more details on the included cryptographic
+software:
+ Hadoop Core uses the SSL libraries from the Jetty project written
+by mortbay.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/smoketest/README.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/README.md b/hadoop-ozone/dist/src/main/smoketest/README.md
new file mode 100644
index 0000000..c521a54
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/README.md
@@ -0,0 +1,30 @@
+<!---
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+## Ozone Acceptance Tests
+
+This directory contains a [robotframework](http://robotframework.org/) based test suite for Ozone to make it easier to check the current state of the package.
+
+You can run in in any environment after [installing](https://github.com/robotframework/robotframework/blob/master/INSTALL.rst)
+
+```
+cd $DIRECTORY_OF_OZONE
+robot smoketest/basic
+```
+
+The argument of the `robot` could be any robot file or directory.
+
+The current configuration in the robot files (hostnames, ports) are adjusted for the docker-based setup but you can easily modify it for any environment.
+
+The `./test.sh` in this directory can start multiple type of clusters (ozone standalone or ozone + hdfs) and execute the test framework with all of the clusters.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
new file mode 100644
index 0000000..a69450d
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation Smoketest ozone cluster startup
+Library OperatingSystem
+Resource ../commonlib.robot
+
+*** Variables ***
+${COMMON_REST_HEADER} -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root"
+${DATANODE_HOST} localhost
+
+
+*** Test Cases ***
+
+Test rest interface
+ ${result} = Execute curl -i -X POST ${COMMON_RESTHEADER} "http://${DATANODE_HOST}:9880/volume1"
+ Should contain ${result} 201 Created
+ ${result} = Execute curl -i -X POST ${COMMON_RESTHEADER} "http://${DATANODE_HOST}:9880/volume1/bucket1"
+ Should contain ${result} 201 Created
+ ${result} = Execute curl -i -X DELETE ${COMMON_RESTHEADER} "http://${DATANODE_HOST}:9880/volume1/bucket1"
+ Should contain ${result} 200 OK
+ ${result} = Execute curl -i -X DELETE ${COMMON_RESTHEADER} "http://${DATANODE_HOST}:9880/volume1"
+ Should contain ${result} 200 OK
+
+Check webui static resources
+ ${result} = Execute curl -s -I http://scm:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
+ Should contain ${result} 200
+ ${result} = Execute curl -s -I http://ozoneManager:9874/static/bootstrap-3.3.7/js/bootstrap.min.js
+ Should contain ${result} 200
+
+Start freon testing
+ ${result} = Execute ozone freon randomkeys --numOfVolumes 5 --numOfBuckets 5 --numOfKeys 5 --numOfThreads 10
+ Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125
+ Should Not Contain ${result} ERROR
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
new file mode 100644
index 0000000..14a5761
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
@@ -0,0 +1,82 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation Test ozone shell CLI usage
+Library OperatingSystem
+Resource ../commonlib.robot
+Test Timeout 2 minute
+
+*** Variables ***
+
+*** Test Cases ***
+RestClient without http port
+ Test ozone shell http:// ozoneManager restwoport
+
+RestClient with http port
+ Test ozone shell http:// ozoneManager:9874 restwport
+
+RestClient without host name
+ Test ozone shell http:// ${EMPTY} restwohost
+
+RpcClient with port
+ Test ozone shell o3:// ozoneManager:9862 rpcwoport
+
+RpcClient without host
+ Test ozone shell o3:// ${EMPTY} rpcwport
+
+RpcClient without scheme
+ Test ozone shell ${EMPTY} ${EMPTY} rpcwoscheme
+
+
+*** Keywords ***
+Test ozone shell
+ [arguments] ${protocol} ${server} ${volume}
+ ${result} = Execute ozone sh volume create ${protocol}${server}/${volume} --user bilbo --quota 100TB --root
+ Should not contain ${result} Failed
+ Should contain ${result} Creating Volume: ${volume}
+ ${result} = Execute ozone sh volume list ${protocol}${server}/ --user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")'
+ Should contain ${result} createdOn
+ ${result} = Execute ozone sh volume list --user bilbo | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '.[] | select(.volumeName=="${volume}")'
+ Should contain ${result} createdOn
+ Execute ozone sh volume update ${protocol}${server}/${volume} --user bill --quota 10TB
+ ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name'
+ Should Be Equal ${result} bill
+ ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .quota | .size'
+ Should Be Equal ${result} 10
+ Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1
+ ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
+ Should Be Equal ${result} DISK
+ ${result} = Execute ozone sh bucket update ${protocol}${server}/${volume}/bb1 --addAcl user:frodo:rw,group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="samwise") | .type'
+ Should Be Equal ${result} GROUP
+ ${result} = Execute ozone sh bucket update ${protocol}${server}/${volume}/bb1 --removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
+ Should Be Equal ${result} USER
+ ${result} = Execute ozone sh bucket list ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+ Should Be Equal ${result} ${volume}
+ Run Keyword Test key handling ${protocol} ${server} ${volume}
+ Execute ozone sh bucket delete ${protocol}${server}/${volume}/bb1
+ Execute ozone sh volume delete ${protocol}${server}/${volume} --user bilbo
+
+Test key handling
+ [arguments] ${protocol} ${server} ${volume}
+ Execute ozone sh key put ${protocol}${server}/${volume}/bb1/key1 /opt/hadoop/NOTICE.txt
+ Execute rm -f NOTICE.txt.1
+ Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1 NOTICE.txt.1
+ Execute ls -l NOTICE.txt.1
+ ${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
+ Should contain ${result} createdOn
+ ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+ Should Be Equal ${result} key1
+ Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key1
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
new file mode 100644
index 0000000..e2620fa
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Keywords ***
+
+
+Execute
+ [arguments] ${command}
+ ${rc} ${output} = Run And Return Rc And Output ${command}
+ Log ${output}
+ Should Be Equal As Integers ${rc} 0
+ [return] ${output}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
new file mode 100644
index 0000000..fb7b98c
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation Ozonefs test
+Library OperatingSystem
+Resource ../commonlib.robot
+
+*** Variables ***
+
+
+*** Test Cases ***
+Create volume and bucket
+ Execute ozone sh volume create http://ozoneManager/fstest --user bilbo --quota 100TB --root
+ Execute ozone sh bucket create http://ozoneManager/fstest/bucket1
+
+Check volume from ozonefs
+ ${result} = Execute ozone fs -ls o3://bucket1.fstest/
+
+Create directory from ozonefs
+ Execute ozone fs -mkdir -p o3://bucket1.fstest/testdir/deep
+ ${result} = Execute ozone sh key list o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+ Should contain ${result} testdir/deep
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/dist/src/main/smoketest/test.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/test.sh b/hadoop-ozone/dist/src/main/smoketest/test.sh
new file mode 100755
index 0000000..534bbb7
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/test.sh
@@ -0,0 +1,101 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
+
+execute_tests(){
+ COMPOSE_FILE=$DIR/../compose/$1/docker-compose.yaml
+ TESTS=$2
+ echo "Executing test ${TESTS[*]} with $COMPOSE_FILE"
+ docker-compose -f "$COMPOSE_FILE" down
+ docker-compose -f "$COMPOSE_FILE" up -d
+ docker-compose -f "$COMPOSE_FILE" exec datanode sudo apt-get update
+ docker-compose -f "$COMPOSE_FILE" exec datanode sudo apt-get install -y python-pip
+ docker-compose -f "$COMPOSE_FILE" exec datanode sudo pip install robotframework
+ for TEST in "${TESTS[@]}"; do
+ set +e
+ docker-compose -f "$COMPOSE_FILE" exec datanode python -m robot "smoketest/$TEST"
+ set -e
+ done
+ if [ "$KEEP_RUNNING" = false ]; then
+ docker-compose -f "$COMPOSE_FILE" down
+ fi
+}
+RUN_ALL=true
+KEEP_RUNNING=false
+POSITIONAL=()
+while [[ $# -gt 0 ]]
+do
+key="$1"
+
+case $key in
+ --env)
+ DOCKERENV="$2"
+ RUN_ALL=false
+ shift # past argument
+ shift # past value
+ ;;
+ --keep)
+ KEEP_RUNNING=true
+ shift # past argument
+ ;;
+ --help|-h|-help)
+ cat << EOF
+
+ Acceptance test executor for ozone.
+
+ This is a lightweight test executor for ozone.
+
+ You can run it with
+
+ ./test.sh
+
+ Which executes all the tests in all the available environments.
+
+ Or you can run manually one test with
+
+ ./test.sh --keep --env ozone-hdfs basic
+
+ --keep means that docker cluster won't be stopped after the test (optional)
+ --env defines the subdirectory under the compose dir
+ The remaining parameters define the test suites under smoketest dir.
+ Could be any directory or robot file relative to the smoketest dir.
+EOF
+ exit 0
+ ;;
+ *)
+ POSITIONAL+=("$1") # save it in an array for later
+ shift # past argument
+ ;;
+esac
+done
+
+if [ "$RUN_ALL" = true ]; then
+#
+# This is the definition of the ozone acceptance test suite
+#
+# We select the test suites and execute them on multiple type of clusters
+#
+ DEFAULT_TESTS=("basic")
+ execute_tests ozone "${DEFAULT_TESTS[@]}"
+ TESTS=("ozonefs")
+ execute_tests ozonefs "${TESTS[@]}"
+
+else
+ execute_tests "$DOCKERENV" "${POSITIONAL[@]}"
+fi
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/docs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/pom.xml b/hadoop-ozone/docs/pom.xml
index 64d0ec8..d8edd15 100644
--- a/hadoop-ozone/docs/pom.xml
+++ b/hadoop-ozone/docs/pom.xml
@@ -28,11 +28,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<name>Apache Hadoop Ozone Documentation</name>
<packaging>jar</packaging>
- <properties>
- <hadoop.component>ozone</hadoop.component>
- <is.hadoop.component>true</is.hadoop.component>
- </properties>
-
<dependencies>
</dependencies>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/integration-test/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml
index 993e91f..bed2fce 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -28,37 +28,23 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<name>Apache Hadoop Ozone Integration Tests</name>
<packaging>jar</packaging>
- <properties>
- <hadoop.component>ozone</hadoop.component>
- <is.hadoop.component>true</is.hadoop.component>
- </properties>
-
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-common</artifactId>
- <scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-ozone-manager</artifactId>
- <scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-objectstore-service</artifactId>
- <scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-ozone-client</artifactId>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-ozone-tools</artifactId>
- <scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
deleted file mode 100644
index a2df50d..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests Freon, with MiniOzoneCluster and validate data.
- */
-public class TestDataValidate {
-
- private static MiniOzoneCluster cluster;
- private static OzoneConfiguration conf;
-
- /**
- * Create a MiniDFSCluster for testing.
- * <p>
- * Ozone is made active by setting OZONE_ENABLED = true
- *
- */
- @BeforeClass
- public static void init() throws Exception {
- conf = new OzoneConfiguration();
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5).build();
- cluster.waitForClusterToBeReady();
- }
-
- /**
- * Shutdown MiniDFSCluster.
- */
- @AfterClass
- public static void shutdown() {
- if (cluster != null) {
- cluster.shutdown();
- }
- }
-
- @Test
- public void ratisTestLargeKey() throws Exception {
- RandomKeyGenerator randomKeyGenerator =
- new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
- randomKeyGenerator.setNumOfVolumes(1);
- randomKeyGenerator.setNumOfBuckets(1);
- randomKeyGenerator.setNumOfKeys(1);
- randomKeyGenerator.setType(ReplicationType.RATIS);
- randomKeyGenerator.setFactor(ReplicationFactor.THREE);
- randomKeyGenerator.setKeySize(20971520);
- randomKeyGenerator.setValidateWrites(true);
- randomKeyGenerator.call();
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
- Assert.assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount());
- }
-
- @Test
- public void standaloneTestLargeKey() throws Exception {
- RandomKeyGenerator randomKeyGenerator =
- new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
- randomKeyGenerator.setNumOfVolumes(1);
- randomKeyGenerator.setNumOfBuckets(1);
- randomKeyGenerator.setNumOfKeys(1);
- randomKeyGenerator.setKeySize(20971520);
- randomKeyGenerator.setValidateWrites(true);
- randomKeyGenerator.call();
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
- Assert.assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount());
- }
-
- @Test
- public void validateWriteTest() throws Exception {
- RandomKeyGenerator randomKeyGenerator =
- new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
- randomKeyGenerator.setNumOfVolumes(2);
- randomKeyGenerator.setNumOfBuckets(5);
- randomKeyGenerator.setNumOfKeys(10);
- randomKeyGenerator.setValidateWrites(true);
- randomKeyGenerator.call();
- Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
- Assert.assertTrue(randomKeyGenerator.getValidateWrites());
- Assert.assertNotEquals(0, randomKeyGenerator.getTotalKeysValidated());
- Assert.assertNotEquals(0, randomKeyGenerator
- .getSuccessfulValidationCount());
- Assert.assertEquals(0, randomKeyGenerator
- .getUnsuccessfulValidationCount());
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
deleted file mode 100644
index d21d399..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.freon;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests Freon, with MiniOzoneCluster.
- */
-public class TestRandomKeyGenerator {
-
- private static MiniOzoneCluster cluster;
- private static OzoneConfiguration conf;
-
- /**
- * Create a MiniDFSCluster for testing.
- * <p>
- * Ozone is made active by setting OZONE_ENABLED = true
- *
- */
- @BeforeClass
- public static void init() throws Exception {
- conf = new OzoneConfiguration();
- cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build();
- cluster.waitForClusterToBeReady();
- }
-
- /**
- * Shutdown MiniDFSCluster.
- */
- @AfterClass
- public static void shutdown() {
- if (cluster != null) {
- cluster.shutdown();
- }
- }
-
- @Test
- public void defaultTest() throws Exception {
- RandomKeyGenerator randomKeyGenerator =
- new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
- randomKeyGenerator.setNumOfVolumes(2);
- randomKeyGenerator.setNumOfBuckets(5);
- randomKeyGenerator.setNumOfKeys(10);
- randomKeyGenerator.call();
- Assert.assertEquals(2, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
- Assert.assertEquals(10240 - 36, randomKeyGenerator.getKeyValueLength());
- }
-
- @Test
- public void multiThread() throws Exception {
- RandomKeyGenerator randomKeyGenerator =
- new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
- randomKeyGenerator.setNumOfVolumes(10);
- randomKeyGenerator.setNumOfBuckets(1);
- randomKeyGenerator.setNumOfKeys(10);
- randomKeyGenerator.setNumOfThreads(10);
- randomKeyGenerator.setKeySize(10240);
- randomKeyGenerator.call();
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
- }
-
- @Test
- public void ratisTest3() throws Exception {
- RandomKeyGenerator randomKeyGenerator =
- new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
- randomKeyGenerator.setNumOfVolumes(10);
- randomKeyGenerator.setNumOfBuckets(1);
- randomKeyGenerator.setNumOfKeys(10);
- randomKeyGenerator.setNumOfThreads(10);
- randomKeyGenerator.setKeySize(10240);
- randomKeyGenerator.setFactor(ReplicationFactor.THREE);
- randomKeyGenerator.setType(ReplicationType.RATIS);
- randomKeyGenerator.call();
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfVolumesCreated());
- Assert.assertEquals(10, randomKeyGenerator.getNumberOfBucketsCreated());
- Assert.assertEquals(100, randomKeyGenerator.getNumberOfKeysAdded());
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
deleted file mode 100644
index a3ff6c8..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
+++ /dev/null
@@ -1,285 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.scm.cli.SQLCLI;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This class tests the CLI that transforms om.db into SQLite DB files.
- */
-@RunWith(Parameterized.class)
-public class TestOmSQLCli {
- private MiniOzoneCluster cluster = null;
- private StorageHandler storageHandler;
- private UserArgs userArgs;
- private OzoneConfiguration conf;
- private SQLCLI cli;
-
- private String userName = "userTest";
- private String adminName = "adminTest";
- private String volumeName0 = "volumeTest0";
- private String volumeName1 = "volumeTest1";
- private String bucketName0 = "bucketTest0";
- private String bucketName1 = "bucketTest1";
- private String bucketName2 = "bucketTest2";
- private String keyName0 = "key0";
- private String keyName1 = "key1";
- private String keyName2 = "key2";
- private String keyName3 = "key3";
-
- @Parameterized.Parameters
- public static Collection<Object[]> data() {
- return Arrays.asList(new Object[][] {
- // Uncomment the below line if we support leveldb in future.
- //{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
- {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
- });
- }
-
- private String metaStoreType;
-
- public TestOmSQLCli(String type) {
- metaStoreType = type;
- }
-
- /**
- * Create a MiniDFSCluster for testing.
- * <p>
- * Ozone is made active by setting OZONE_ENABLED = true
- *
- * @throws IOException
- */
- @Before
- public void setup() throws Exception {
- conf = new OzoneConfiguration();
- cluster = MiniOzoneCluster.newBuilder(conf).build();
- cluster.waitForClusterToBeReady();
- storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
- userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
- null, null, null, null);
- cluster.waitForClusterToBeReady();
-
- VolumeArgs createVolumeArgs0 = new VolumeArgs(volumeName0, userArgs);
- createVolumeArgs0.setUserName(userName);
- createVolumeArgs0.setAdminName(adminName);
- storageHandler.createVolume(createVolumeArgs0);
- VolumeArgs createVolumeArgs1 = new VolumeArgs(volumeName1, userArgs);
- createVolumeArgs1.setUserName(userName);
- createVolumeArgs1.setAdminName(adminName);
- storageHandler.createVolume(createVolumeArgs1);
-
- BucketArgs bucketArgs0 = new BucketArgs(volumeName0, bucketName0, userArgs);
- storageHandler.createBucket(bucketArgs0);
- BucketArgs bucketArgs1 = new BucketArgs(volumeName1, bucketName1, userArgs);
- storageHandler.createBucket(bucketArgs1);
- BucketArgs bucketArgs2 = new BucketArgs(volumeName0, bucketName2, userArgs);
- storageHandler.createBucket(bucketArgs2);
-
- KeyArgs keyArgs0 =
- new KeyArgs(volumeName0, bucketName0, keyName0, userArgs);
- keyArgs0.setSize(100);
- KeyArgs keyArgs1 =
- new KeyArgs(volumeName1, bucketName1, keyName1, userArgs);
- keyArgs1.setSize(200);
- KeyArgs keyArgs2 =
- new KeyArgs(volumeName0, bucketName2, keyName2, userArgs);
- keyArgs2.setSize(300);
- KeyArgs keyArgs3 =
- new KeyArgs(volumeName0, bucketName2, keyName3, userArgs);
- keyArgs3.setSize(400);
-
- OutputStream stream = storageHandler.newKeyWriter(keyArgs0);
- stream.close();
- stream = storageHandler.newKeyWriter(keyArgs1);
- stream.close();
- stream = storageHandler.newKeyWriter(keyArgs2);
- stream.close();
- stream = storageHandler.newKeyWriter(keyArgs3);
- stream.close();
-
- cluster.getOzoneManager().stop();
- cluster.getStorageContainerManager().stop();
- conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);
- cli = new SQLCLI(conf);
- }
-
- @After
- public void shutdown() {
- if (cluster != null) {
- cluster.shutdown();
- }
- }
-
- // After HDDS-357, we have to fix SQLCli.
- // TODO: fix SQLCli
- @Ignore
- @Test
- public void testOmDB() throws Exception {
- String dbOutPath = GenericTestUtils.getTempPath(
- UUID.randomUUID() + "/out_sql.db");
-
- String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
- String dbPath = dbRootPath + "/" + OM_DB_NAME;
- String[] args = {"-p", dbPath, "-o", dbOutPath};
-
- cli.run(args);
-
- Connection conn = connectDB(dbOutPath);
- String sql = "SELECT * FROM volumeList";
- ResultSet rs = executeQuery(conn, sql);
- List<String> expectedValues =
- new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
- while (rs.next()) {
- String userNameRs = rs.getString("userName");
- String volumeNameRs = rs.getString("volumeName");
- assertEquals(userName, userNameRs.substring(1));
- assertTrue(expectedValues.remove(volumeNameRs));
- }
- assertEquals(0, expectedValues.size());
-
- sql = "SELECT * FROM volumeInfo";
- rs = executeQuery(conn, sql);
- expectedValues =
- new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
- while (rs.next()) {
- String adName = rs.getString("adminName");
- String ownerName = rs.getString("ownerName");
- String volumeName = rs.getString("volumeName");
- assertEquals(adminName, adName);
- assertEquals(userName, ownerName);
- assertTrue(expectedValues.remove(volumeName));
- }
- assertEquals(0, expectedValues.size());
-
- sql = "SELECT * FROM aclInfo";
- rs = executeQuery(conn, sql);
- expectedValues =
- new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
- while (rs.next()) {
- String adName = rs.getString("adminName");
- String ownerName = rs.getString("ownerName");
- String volumeName = rs.getString("volumeName");
- String type = rs.getString("type");
- String uName = rs.getString("userName");
- String rights = rs.getString("rights");
- assertEquals(adminName, adName);
- assertEquals(userName, ownerName);
- assertEquals("USER", type);
- assertEquals(userName, uName);
- assertEquals("READ_WRITE", rights);
- assertTrue(expectedValues.remove(volumeName));
- }
- assertEquals(0, expectedValues.size());
-
- sql = "SELECT * FROM bucketInfo";
- rs = executeQuery(conn, sql);
- HashMap<String, String> expectedMap = new HashMap<>();
- expectedMap.put(bucketName0, volumeName0);
- expectedMap.put(bucketName2, volumeName0);
- expectedMap.put(bucketName1, volumeName1);
- while (rs.next()) {
- String volumeName = rs.getString("volumeName");
- String bucketName = rs.getString("bucketName");
- boolean versionEnabled = rs.getBoolean("versionEnabled");
- String storegeType = rs.getString("storageType");
- assertEquals(volumeName, expectedMap.remove(bucketName));
- assertFalse(versionEnabled);
- assertEquals("DISK", storegeType);
- }
- assertEquals(0, expectedMap.size());
-
- sql = "SELECT * FROM keyInfo";
- rs = executeQuery(conn, sql);
- HashMap<String, List<String>> expectedMap2 = new HashMap<>();
- // no data written, data size will be 0
- expectedMap2.put(keyName0,
- Arrays.asList(volumeName0, bucketName0, "0"));
- expectedMap2.put(keyName1,
- Arrays.asList(volumeName1, bucketName1, "0"));
- expectedMap2.put(keyName2,
- Arrays.asList(volumeName0, bucketName2, "0"));
- expectedMap2.put(keyName3,
- Arrays.asList(volumeName0, bucketName2, "0"));
- while (rs.next()) {
- String volumeName = rs.getString("volumeName");
- String bucketName = rs.getString("bucketName");
- String keyName = rs.getString("keyName");
- int dataSize = rs.getInt("dataSize");
- List<String> vals = expectedMap2.remove(keyName);
- assertNotNull(vals);
- assertEquals(vals.get(0), volumeName);
- assertEquals(vals.get(1), bucketName);
- assertEquals(vals.get(2), Integer.toString(dataSize));
- }
- assertEquals(0, expectedMap2.size());
-
- conn.close();
- Files.delete(Paths.get(dbOutPath));
- }
-
- private ResultSet executeQuery(Connection conn, String sql)
- throws SQLException {
- Statement stmt = conn.createStatement();
- return stmt.executeQuery(sql);
- }
-
- private Connection connectDB(String dbPath) throws Exception {
- Class.forName("org.sqlite.JDBC");
- String connectPath =
- String.format("jdbc:sqlite:%s", dbPath);
- return DriverManager.getConnection(connectPath);
- }
-}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f817d3/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
deleted file mode 100644
index 4026348..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright containerOwnership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.ozone.scm.cli.SQLCLI;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
-import static org.apache.hadoop.ozone.OzoneConsts.KB;
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class tests the CLI that transforms container into SQLite DB files.
- */
-@RunWith(Parameterized.class)
-public class TestContainerSQLCli {
-
- private EventQueue eventQueue;
-
- @Parameterized.Parameters
- public static Collection<Object[]> data() {
- return Arrays.asList(new Object[][] {
- {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
- {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
- });
- }
-
- private static String metaStoreType;
-
- public TestContainerSQLCli(String type) {
- metaStoreType = type;
- }
-
- private static SQLCLI cli;
-
- private MiniOzoneCluster cluster;
- private OzoneConfiguration conf;
- private String datanodeIpAddress;
-
- private ContainerMapping mapping;
- private NodeManager nodeManager;
- private BlockManagerImpl blockManager;
-
- private HashMap<Long, Long> blockContainerMap;
-
- private final static long DEFAULT_BLOCK_SIZE = 4 * KB;
- private static HddsProtos.ReplicationFactor factor;
- private static HddsProtos.ReplicationType type;
- private static final String CONTAINER_OWNER = "OZONE";
-
-
- @Before
- public void setup() throws Exception {
- blockContainerMap = new HashMap<>();
-
- conf = new OzoneConfiguration();
- conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 2);
- conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
- SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
- if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
- ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){
- factor = HddsProtos.ReplicationFactor.THREE;
- type = HddsProtos.ReplicationType.RATIS;
- } else {
- factor = HddsProtos.ReplicationFactor.ONE;
- type = HddsProtos.ReplicationType.STAND_ALONE;
- }
- cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2).build();
- cluster.waitForClusterToBeReady();
- datanodeIpAddress = cluster.getHddsDatanodes().get(0)
- .getDatanodeDetails().getIpAddress();
- cluster.getOzoneManager().stop();
- cluster.getStorageContainerManager().stop();
- eventQueue = new EventQueue();
- nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
- mapping = new ContainerMapping(conf, nodeManager, 128,
- eventQueue);
- blockManager = new BlockManagerImpl(conf, nodeManager, mapping, eventQueue);
- eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager);
- eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false);
- GenericTestUtils.waitFor(() -> {
- return !blockManager.isScmInChillMode();
- }, 10, 1000 * 15);
- // blockManager.allocateBlock() will create containers if there is none
- // stored in levelDB. The number of containers to create is the value of
- // OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE which we set to 2.
- // so the first allocateBlock() will create two containers. A random one
- // is assigned for the block.
-
- // loop until both the two datanodes are up, try up to about 4 seconds.
- for (int c = 0; c < 40; c++) {
- if (nodeManager.getAllNodes().size() == 2) {
- break;
- }
- Thread.sleep(100);
- }
- assertEquals(2, nodeManager.getAllNodes().size());
- AllocatedBlock ab1 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, type,
- factor, CONTAINER_OWNER);
- blockContainerMap.put(ab1.getBlockID().getLocalID(),
- ab1.getBlockID().getContainerID());
-
- AllocatedBlock ab2;
- // we want the two blocks on the two provisioned containers respectively,
- // however blockManager picks containers randomly, keep retry until we
- // assign the second block to the other container. This seems to be the only
- // way to get the two containers.
- // although each retry will create a block and assign to a container. So
- // the size of blockContainerMap will vary each time the test is run.
- while (true) {
- ab2 = blockManager
- .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, CONTAINER_OWNER);
- blockContainerMap.put(ab2.getBlockID().getLocalID(),
- ab2.getBlockID().getContainerID());
- if (ab1.getBlockID().getContainerID() !=
- ab2.getBlockID().getContainerID()) {
- break;
- }
- }
-
- blockManager.close();
- mapping.close();
- nodeManager.close();
-
- conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);
- cli = new SQLCLI(conf);
-
- }
-
- @After
- public void shutdown() throws InterruptedException {
- if (cluster != null) {
- cluster.shutdown();
- }
- }
-
- @Test
- public void testConvertContainerDB() throws Exception {
- String dbOutPath = GenericTestUtils.getTempPath(
- UUID.randomUUID() + "/out_sql.db");
- // TODO : the following will fail due to empty Datanode list, need to fix.
- //String dnUUID = cluster.getDataNodes().get(0).getUuid();
- String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
- String dbPath = dbRootPath + "/" + SCM_CONTAINER_DB;
- String[] args = {"-p", dbPath, "-o", dbOutPath};
- Connection conn;
- String sql;
- ResultSet rs;
-
- cli.run(args);
-
- //verify the sqlite db
- // only checks the container names are as expected. Because other fields
- // such as datanode UUID are generated randomly each time
- conn = connectDB(dbOutPath);
- sql = "SELECT * FROM containerInfo";
- rs = executeQuery(conn, sql);
- ArrayList<Long> containerIDs = new ArrayList<>();
- while (rs.next()) {
- containerIDs.add(rs.getLong("containerID"));
- //assertEquals(dnUUID, rs.getString("leaderUUID"));
- }
- /* TODO: fix this later when the SQLCLI is fixed.
- assertTrue(containerIDs.size() == 2 &&
- containerIDs.contains(pipeline1.getContainerName()) &&
- containerIDs.contains(pipeline2.getContainerName()));
-
- sql = "SELECT * FROM containerMembers";
- rs = executeQuery(conn, sql);
- containerIDs = new ArrayList<>();
- while (rs.next()) {
- containerIDs.add(rs.getLong("containerID"));
- //assertEquals(dnUUID, rs.getString("datanodeUUID"));
- }
- assertTrue(containerIDs.size() == 2 &&
- containerIDs.contains(pipeline1.getContainerName()) &&
- containerIDs.contains(pipeline2.getContainerName()));
-
- sql = "SELECT * FROM datanodeInfo";
- rs = executeQuery(conn, sql);
- int count = 0;
- while (rs.next()) {
- assertEquals(datanodeIpAddress, rs.getString("ipAddress"));
- //assertEquals(dnUUID, rs.getString("datanodeUUID"));
- count += 1;
- }
- // the two containers maybe on the same datanode, maybe not.
- int expected = pipeline1.getLeader().getUuid().equals(
- pipeline2.getLeader().getUuid())? 1 : 2;
- assertEquals(expected, count);
- */
- Files.delete(Paths.get(dbOutPath));
- }
-
- private ResultSet executeQuery(Connection conn, String sql)
- throws SQLException {
- Statement stmt = conn.createStatement();
- return stmt.executeQuery(sql);
- }
-
- private Connection connectDB(String dbPath) throws Exception {
- Class.forName("org.sqlite.JDBC");
- String connectPath =
- String.format("jdbc:sqlite:%s", dbPath);
- return DriverManager.getConnection(connectPath);
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org