You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by el...@apache.org on 2019/02/26 15:52:14 UTC
[hadoop] branch docker-hadoop-runner updated: HDDS-1019. Use
apache/hadoop-runner image to test ozone secure cluster. Contributed by
Xiaoyu Yao.
This is an automated email from the ASF dual-hosted git repository.
elek pushed a commit to branch docker-hadoop-runner
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/docker-hadoop-runner by this push:
new 6d097eb HDDS-1019. Use apache/hadoop-runner image to test ozone secure cluster. Contributed by Xiaoyu Yao.
6d097eb is described below
commit 6d097eb48825d60ebb4bf0ba78974d59a81e0907
Author: Xiaoyu Yao <xy...@apache.org>
AuthorDate: Tue Feb 26 16:41:09 2019 +0100
HDDS-1019. Use apache/hadoop-runner image to test ozone secure cluster. Contributed by Xiaoyu Yao.
---
Dockerfile | 2 +
scripts/envtoconf.py | 155 +++++++++++++++++++-----------------
scripts/krb5.conf | 23 +++---
scripts/starter.sh | 124 ++++++++++++++---------------
scripts/transformation.py | 197 +++++++++++++++++++++++++---------------------
5 files changed, 260 insertions(+), 241 deletions(-)
diff --git a/Dockerfile b/Dockerfile
index ec96d80..f50bc7b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -34,6 +34,8 @@ RUN useradd --uid 1000 hadoop --gid 100 --home /opt/hadoop
RUN echo "hadoop ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
RUN chown hadoop /opt
ADD scripts /opt/
+ADD scripts/krb5.conf /etc/
+RUN yum install -y krb5-workstation
WORKDIR /opt/hadoop
diff --git a/scripts/envtoconf.py b/scripts/envtoconf.py
index 18f22b4..ad2e176 100755
--- a/scripts/envtoconf.py
+++ b/scripts/envtoconf.py
@@ -16,89 +16,100 @@
# limitations under the License.
#
+"""convert environment variables to config"""
+
import os
import re
-from shutil import copyfile
import argparse
import sys
import transformation
-
-class Simple:
- def __init__(self, args):
- parser = argparse.ArgumentParser()
- parser.add_argument("--destination", help="Destination directory", required=True)
- self.args = parser.parse_args(args=args)
- # copy the default files to file.raw in desitnation directory
-
- self.known_formats = ['xml', 'properties', 'yaml', 'yml', 'env', "sh", "cfg", 'conf']
- self.output_dir = self.args.destination
-
- self.configurables = {}
-
- def destination_file_path(self, name, extension):
- return os.path.join(self.output_dir, "{}.{}".format(name, extension))
-
- def write_env_var(self, name, extension, key, value):
- with open(self.destination_file_path(name, extension) + ".raw", "a") as myfile:
- myfile.write("{}: {}\n".format(key, value))
-
- def process_envs(self):
- for key in os.environ.keys():
- p = re.compile("[_\\.]")
- parts = p.split(key)
- extension = None
- name = parts[0].lower()
- if len(parts) > 1:
- extension = parts[1].lower()
- config_key = key[len(name) + len(extension) + 2:].strip()
- if extension and "!" in extension:
- splitted = extension.split("!")
- extension = splitted[0]
- format = splitted[1]
- config_key = key[len(name) + len(extension) + len(format) + 3:].strip()
- else:
- format = extension
-
- if extension and extension in self.known_formats:
- if name not in self.configurables.keys():
- with open(self.destination_file_path(name, extension) + ".raw", "w") as myfile:
- myfile.write("")
- self.configurables[name] = (extension, format)
- self.write_env_var(name, extension, config_key, os.environ[key])
- else:
- for configurable_name in self.configurables.keys():
- if key.lower().startswith(configurable_name.lower()):
- self.write_env_var(configurable_name, self.configurables[configurable_name], key[len(configurable_name) + 1:], os.environ[key])
-
- def transform(self):
- for configurable_name in self.configurables.keys():
- name = configurable_name
- extension, format = self.configurables[name]
-
- destination_path = self.destination_file_path(name, extension)
-
- with open(destination_path + ".raw", "r") as myfile:
- content = myfile.read()
- transformer_func = getattr(transformation, "to_" + format)
- content = transformer_func(content)
- with open(destination_path, "w") as myfile:
- myfile.write(content)
-
- def main(self):
-
- # add the
- self.process_envs()
-
- # copy file.ext.raw to file.ext in the destination directory, and transform to the right format (eg. key: value ===> XML)
- self.transform()
+class Simple(object):
+ """Simple conversion"""
+ def __init__(self, args):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--destination", help="Destination directory", required=True)
+ self.args = parser.parse_args(args=args)
+ # copy the default files to file.raw in destination directory
+
+ self.known_formats = ['xml', 'properties', 'yaml', 'yml', 'env', "sh", "cfg", 'conf']
+ self.output_dir = self.args.destination
+
+ self.configurables = {}
+
+ def destination_file_path(self, name, extension):
+ """destination file path"""
+ return os.path.join(self.output_dir, "{}.{}".format(name, extension))
+
+ def write_env_var(self, name, extension, key, value):
+ """Write environment variables"""
+ with open(self.destination_file_path(name, extension) + ".raw", "a") as myfile:
+ myfile.write("{}: {}\n".format(key, value))
+
+ def process_envs(self):
+ """Process environment variables"""
+ for key in os.environ.keys():
+ pattern = re.compile("[_\\.]")
+ parts = pattern.split(key)
+ extension = None
+ name = parts[0].lower()
+ if len(parts) > 1:
+ extension = parts[1].lower()
+ config_key = key[len(name) + len(extension) + 2:].strip()
+ if extension and "!" in extension:
+ splitted = extension.split("!")
+ extension = splitted[0]
+ fmt = splitted[1]
+ config_key = key[len(name) + len(extension) + len(fmt) + 3:].strip()
+ else:
+ fmt = extension
+
+ if extension and extension in self.known_formats:
+ if name not in self.configurables.keys():
+ with open(self.destination_file_path(name, extension) + ".raw", "w") as myfile:
+ myfile.write("")
+ self.configurables[name] = (extension, fmt)
+ self.write_env_var(name, extension, config_key, os.environ[key])
+ else:
+ for configurable_name in self.configurables:
+ if key.lower().startswith(configurable_name.lower()):
+ self.write_env_var(configurable_name,
+ self.configurables[configurable_name],
+ key[len(configurable_name) + 1:],
+ os.environ[key])
+
+ def transform(self):
+ """transform"""
+ for configurable_name in self.configurables:
+ name = configurable_name
+ extension, fmt = self.configurables[name]
+
+ destination_path = self.destination_file_path(name, extension)
+
+ with open(destination_path + ".raw", "r") as myfile:
+ content = myfile.read()
+ transformer_func = getattr(transformation, "to_" + fmt)
+ content = transformer_func(content)
+ with open(destination_path, "w") as myfile:
+ myfile.write(content)
+
+ def main(self):
+ """main"""
+
+ # add the
+ self.process_envs()
+
+ # copy file.ext.raw to file.ext in the destination directory, and
+ # transform to the right format (eg. key: value ===> XML)
+ self.transform()
def main():
- Simple(sys.argv[1:]).main()
+ """main"""
+ Simple(sys.argv[1:]).main()
if __name__ == '__main__':
- Simple(sys.argv[1:]).main()
+ Simple(sys.argv[1:]).main()
diff --git a/scripts/krb5.conf b/scripts/krb5.conf
index 0e03a6f..82ae73a 100644
--- a/scripts/krb5.conf
+++ b/scripts/krb5.conf
@@ -1,11 +1,9 @@
-##
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
@@ -14,12 +12,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-##
[logging]
-default = FILE:/var/log/krb5libs.log
-kdc = FILE:/var/log/krb5kdc.log
-admin_server = FILE:/var/log/kadmind.log
+ default = FILE:/var/log/krb5libs.log
+ kdc = FILE:/var/log/krb5kdc.log
+ admin_server = FILE:/var/log/kadmind.log
[libdefaults]
dns_canonicalize_hostname = false
@@ -38,4 +35,4 @@ admin_server = FILE:/var/log/kadmind.log
[domain_realm]
.example.com = EXAMPLE.COM
- example.com = EXAMPLE.COM
+ example.com = EXAMPLE.COM
\ No newline at end of file
diff --git a/scripts/starter.sh b/scripts/starter.sh
index 0b07bba..2f9894f 100755
--- a/scripts/starter.sh
+++ b/scripts/starter.sh
@@ -20,8 +20,6 @@ set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-$DIR/envtoconf.py --destination /opt/hadoop/etc/hadoop
-
if [ -n "$SLEEP_SECONDS" ]; then
echo "Sleeping for $SLEEP_SECONDS seconds"
sleep $SLEEP_SECONDS
@@ -34,7 +32,7 @@ fi
#
# export WAITFOR=localhost:9878
#
-# With an optional parameter, you can also set the maximum
+# With an optional parameter, you can also set the maximum
# time of waiting with (in seconds) with WAITFOR_TIMEOUT.
# (The default is 300 seconds / 5 minutes.)
if [ ! -z "$WAITFOR" ]; then
@@ -58,84 +56,81 @@ if [ ! -z "$WAITFOR" ]; then
fi
fi
-
if [ -n "$KERBEROS_ENABLED" ]; then
- echo "Setting up kerberos!!"
- KERBEROS_SERVER=${KERBEROS_SERVER:-krb5}
- ISSUER_SERVER=${ISSUER_SERVER:-$KERBEROS_SERVER\:8081}
- echo "KDC ISSUER_SERVER => $ISSUER_SERVER"
-
- while true
- do
- STATUS=$(curl -s -o /dev/null -w '%{http_code}' http://$ISSUER_SERVER/keytab/test/test)
- if [ $STATUS -eq 200 ]; then
- echo "Got 200, KDC service ready!!"
- break
- else
- echo "Got $STATUS :( KDC service not ready yet..."
- fi
- sleep 5
- done
-
- export HOST_NAME=`hostname -f`
- for NAME in ${KERBEROS_KEYTABS}; do
- echo "Download $NAME/$HOSTNAME@EXAMPLE.COM keytab file to $CONF_DIR/$NAME.keytab"
- wget http://$ISSUER_SERVER/keytab/$HOST_NAME/$NAME -O $CONF_DIR/$NAME.keytab
- KERBEROS_ENABLED=true
- done
-
- cat $DIR/krb5.conf | sed "s/SERVER/$KERBEROS_SERVER/g" | sudo tee /etc/krb5.conf
+ echo "Setting up kerberos!!"
+ KERBEROS_SERVER=${KERBEROS_SERVER:-krb5}
+ ISSUER_SERVER=${ISSUER_SERVER:-$KERBEROS_SERVER\:8081}
+ echo "KDC ISSUER_SERVER => $ISSUER_SERVER"
+
+ if [ -n "$SLEEP_SECONDS" ]; then
+ echo "Sleeping for $(SLEEP_SECONDS) seconds"
+ sleep "$SLEEP_SECONDS"
+ fi
+
+ if [ -z "$KEYTAB_DIR"]; then
+ KEYTAB_DIR=/etc/security/keytabs
+ fi
+ while true
+ do
+ set +e
+ STATUS=$(curl -s -o /dev/null -w '%{http_code}' http://"$ISSUER_SERVER"/keytab/test/test)
+ set -e
+ if [ "$STATUS" -eq 200 ]; then
+ echo "Got 200, KDC service ready!!"
+ break
+ else
+ echo "Got $STATUS :( KDC service not ready yet..."
+ fi
+ sleep 5
+ done
+
+ HOST_NAME=$(hostname -f)
+ export HOST_NAME
+ for NAME in ${KERBEROS_KEYTABS}; do
+ echo "Download $NAME/$HOSTNAME@EXAMPLE.COM keytab file to $KEYTAB_DIR/$NAME.keytab"
+ wget "http://$ISSUER_SERVER/keytab/$HOST_NAME/$NAME" -O "$KEYTAB_DIR/$NAME.keytab"
+ klist -kt "$KEYTAB_DIR/$NAME.keytab"
+ KERBEROS_ENABLED=true
+ done
+
+ sed "s/SERVER/$KERBEROS_SERVER/g" "$DIR"/krb5.conf | sudo tee /etc/krb5.conf
fi
#To avoid docker volume permission problems
sudo chmod o+rwx /data
+"$DIR"/envtoconf.py --destination /opt/hadoop/etc/hadoop
+
if [ -n "$ENSURE_NAMENODE_DIR" ]; then
- CLUSTERID_OPTS=""
- if [ -n "$ENSURE_NAMENODE_CLUSTERID" ]; then
- CLUSTERID_OPTS="-clusterid $ENSURE_NAMENODE_CLUSTERID"
- fi
- if [ ! -d "$ENSURE_NAMENODE_DIR" ]; then
- /opt/hadoop/bin/hdfs namenode -format -force $CLUSTERID_OPTS
- fi
+ CLUSTERID_OPTS=""
+ if [ -n "$ENSURE_NAMENODE_CLUSTERID" ]; then
+ CLUSTERID_OPTS="-clusterid $ENSURE_NAMENODE_CLUSTERID"
+ fi
+ if [ ! -d "$ENSURE_NAMENODE_DIR" ]; then
+ /opt/hadoop/bin/hdfs namenode -format -force "$CLUSTERID_OPTS"
+ fi
fi
if [ -n "$ENSURE_STANDBY_NAMENODE_DIR" ]; then
- if [ ! -d "$ENSURE_STANDBY_NAMENODE_DIR" ]; then
- /opt/hadoop/bin/hdfs namenode -bootstrapStandby
- fi
+ if [ ! -d "$ENSURE_STANDBY_NAMENODE_DIR" ]; then
+ /opt/hadoop/bin/hdfs namenode -bootstrapStandby
+ fi
fi
if [ -n "$ENSURE_SCM_INITIALIZED" ]; then
- if [ ! -f "$ENSURE_SCM_INITIALIZED" ]; then
- # Improve om and scm start up options
- /opt/hadoop/bin/ozone scm --init || /opt/hadoop/bin/ozone scm -init
- fi
+ if [ ! -f "$ENSURE_SCM_INITIALIZED" ]; then
+ # Improve om and scm start up options
+ /opt/hadoop/bin/ozone scm --init || /opt/hadoop/bin/ozone scm -init
+ fi
fi
-
if [ -n "$ENSURE_OM_INITIALIZED" ]; then
- if [ ! -f "$ENSURE_OM_INITIALIZED" ]; then
- # Improve om and scm start up options
- /opt/hadoop/bin/ozone om --init || /opt/hadoop/bin/ozone om -createObjectStore
- fi
-fi
-
-
-# The KSM initialization block will go away eventually once
-# we have completed renaming KSM to OzoneManager (OM).
-#
-if [ -n "$ENSURE_KSM_INITIALIZED" ]; then
- if [ ! -f "$ENSURE_KSM_INITIALIZED" ]; then
- # To make sure SCM is running in dockerized environment we will sleep
- # Could be removed after HDFS-13203
- echo "Waiting 15 seconds for SCM startup"
- sleep 15
- /opt/hadoop/bin/ozone ksm -createObjectStore
- fi
+ if [ ! -f "$ENSURE_OM_INITIALIZED" ]; then
+ # Improve om and scm start up options
+ /opt/hadoop/bin/ozone om --init || /opt/hadoop/bin/ozone om -createObjectStore
+ fi
fi
-
# Supports byteman script to instrument hadoop process with byteman script
#
#
@@ -158,5 +153,4 @@ if [ -n "$BYTEMAN_SCRIPT" ] || [ -n "$BYTEMAN_SCRIPT_URL" ]; then
echo "Process is instrumented with adding $AGENT_STRING to HADOOP_OPTS"
fi
-
-$@
+"$@"
diff --git a/scripts/transformation.py b/scripts/transformation.py
index 8838eb9..5e708ce 100755
--- a/scripts/transformation.py
+++ b/scripts/transformation.py
@@ -16,120 +16,135 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+
+"""This module transform properties into different format"""
def render_yaml(yaml_root, prefix=""):
- result = ""
- if isinstance(yaml_root, dict):
- if len(prefix)>0:
- result +="\n"
- for key in yaml_root:
- result += "{}{}: {}".format(prefix, key, render_yaml(yaml_root[key], prefix + " "))
- elif isinstance(yaml_root, list):
- result += "\n"
- for item in yaml_root:
- result += prefix + " - " + render_yaml(item, prefix + " ")
- else:
- result += "{}\n".format(yaml_root)
- return result
+ """render yaml"""
+ result = ""
+ if isinstance(yaml_root, dict):
+ if prefix:
+ result += "\n"
+ for key in yaml_root:
+ result += "{}{}: {}".format(prefix, key, render_yaml(
+ yaml_root[key], prefix + " "))
+ elif isinstance(yaml_root, list):
+ result += "\n"
+ for item in yaml_root:
+ result += prefix + " - " + render_yaml(item, prefix + " ")
+ else:
+ result += "{}\n".format(yaml_root)
+ return result
def to_yaml(content):
- props = process_properties(content)
-
- keys = props.keys()
- yaml_props = {}
- for key in keys:
- parts = key.split(".")
- node = yaml_props
- prev_part = None
- parent_node = None
- for part in parts[:-1]:
- if part.isdigit():
- if isinstance(node, dict):
- parent_node[prev_part] = []
- node = parent_node[prev_part]
- while len(node) <= int(part):
- node.append({})
- parent_node = node
- node = node[int(node)]
- else:
- if part not in node:
- node[part] = {}
- parent_node = node
- node = node[part]
- prev_part = part
- if parts[-1].isdigit():
- if isinstance(node, dict):
- parent_node[prev_part] = []
- node = parent_node[prev_part]
- node.append(props[key])
- else:
- node[parts[-1]] = props[key]
-
- return render_yaml(yaml_props)
+ """transform to yaml"""
+ props = process_properties(content)
+
+ keys = props.keys()
+ yaml_props = {}
+ for key in keys:
+ parts = key.split(".")
+ node = yaml_props
+ prev_part = None
+ parent_node = {}
+ for part in parts[:-1]:
+ if part.isdigit():
+ if isinstance(node, dict):
+ parent_node[prev_part] = []
+ node = parent_node[prev_part]
+ while len(node) <= int(part):
+ node.append({})
+ parent_node = node
+ node = node[int(node)]
+ else:
+ if part not in node:
+ node[part] = {}
+ parent_node = node
+ node = node[part]
+ prev_part = part
+ if parts[-1].isdigit():
+ if isinstance(node, dict):
+ parent_node[prev_part] = []
+ node = parent_node[prev_part]
+ node.append(props[key])
+ else:
+ node[parts[-1]] = props[key]
+
+ return render_yaml(yaml_props)
+
def to_yml(content):
- return to_yaml(content)
+ """transform to yml"""
+ return to_yaml(content)
+
def to_properties(content):
- result = ""
- props = process_properties(content)
- for key in props.keys():
- result += "{}: {}\n".format(key, props[key])
- return result
+ """transform to properties"""
+ result = ""
+ props = process_properties(content)
+ for key, val in props.items():
+ result += "{}: {}\n".format(key, val)
+ return result
def to_env(content):
- result = ""
- props = process_properties(content)
- for key in props.keys():
- result += "{}={}\n".format(key, props[key])
- return result
+ """transform to environment variables"""
+ result = ""
+ props = process_properties(content)
+ for key, val in props:
+ result += "{}={}\n".format(key, val)
+ return result
def to_sh(content):
- result = ""
- props = process_properties(content)
- for key in props.keys():
- result += "export {}=\"{}\"\n".format(key, props[key])
- return result
+ """transform to shell"""
+ result = ""
+ props = process_properties(content)
+ for key, val in props:
+ result += "export {}=\"{}\"\n".format(key, val)
+ return result
def to_cfg(content):
- result = ""
- props = process_properties(content)
- for key in props.keys():
- result += "{}={}\n".format(key, props[key])
- return result
+ """transform to config"""
+ result = ""
+ props = process_properties(content)
+ for key, val in props:
+ result += "{}={}\n".format(key, val)
+ return result
def to_conf(content):
- result = ""
- props = process_properties(content)
- for key in props.keys():
- result += "export {}={}\n".format(key, props[key])
- return result
+ """transform to configuration"""
+ result = ""
+ props = process_properties(content)
+ for key, val in props:
+ result += "export {}={}\n".format(key, val)
+ return result
def to_xml(content):
- result = "<configuration>\n"
- props = process_properties(content)
- for key in props.keys():
- result += "<property><name>{0}</name><value>{1}</value></property>\n".format(key, props[key])
- result += "</configuration>"
- return result
+ """transform to xml"""
+ result = "<configuration>\n"
+ props = process_properties(content)
+ for key in props:
+ result += "<property><name>{0}</name><value>{1}</value></property>\n". \
+ format(key, props[key])
+ result += "</configuration>"
+ return result
def process_properties(content, sep=': ', comment_char='#'):
- """
- Read the file passed as parameter as a properties file.
- """
- props = {}
- for line in content.split("\n"):
- l = line.strip()
- if l and not l.startswith(comment_char):
- key_value = l.split(sep)
- key = key_value[0].strip()
- value = sep.join(key_value[1:]).strip().strip('"')
- props[key] = value
-
- return props
+ """
+ Read the file passed as parameter as a properties file.
+ """
+ props = {}
+ for line in content.split("\n"):
+ sline = line.strip()
+ if sline and not sline.startswith(comment_char):
+ key_value = sline.split(sep)
+ key = key_value[0].strip()
+ value = sep.join(key_value[1:]).strip().strip('"')
+ props[key] = value
+
+ return props
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org