You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ranger.apache.org by ma...@apache.org on 2020/08/26 20:03:33 UTC

[ranger] branch master updated: RANGER-2969: Docker setup to run Ranger enabled HDFS

This is an automated email from the ASF dual-hosted git repository.

madhan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ranger.git


The following commit(s) were added to refs/heads/master by this push:
     new 95dfaf8  RANGER-2969: Docker setup to run Ranger enabled HDFS
95dfaf8 is described below

commit 95dfaf80c3f7a67e827580f99934dcbef6f06dc4
Author: Madhan Neethiraj <ma...@apache.org>
AuthorDate: Sun Aug 23 22:43:35 2020 -0700

    RANGER-2969: Docker setup to run Ranger enabled HDFS
---
 dev-support/ranger-docker/.dockerignore            |  2 +
 dev-support/ranger-docker/Dockerfile.ranger        |  7 +-
 dev-support/ranger-docker/Dockerfile.ranger-hadoop | 80 ++++++++++++++++++++++
 dev-support/ranger-docker/README.md                | 13 ++++
 .../ranger-docker/docker-compose.ranger-hadoop.yml | 20 ++++++
 .../ranger-docker/scripts/ranger-hadoop-setup.sh   | 61 +++++++++++++++++
 dev-support/ranger-docker/scripts/ranger-hadoop.sh | 54 +++++++++++++++
 .../scripts/ranger-hdfs-plugin-install.properties  | 76 ++++++++++++++++++++
 .../scripts/ranger-hdfs-service-dev_hdfs.py        |  8 +++
 .../scripts/ranger-hive-service-dev_hive.py        |  8 +++
 dev-support/ranger-docker/scripts/ranger.sh        |  9 +++
 11 files changed, 337 insertions(+), 1 deletion(-)

diff --git a/dev-support/ranger-docker/.dockerignore b/dev-support/ranger-docker/.dockerignore
index cedefaa..5a236e9 100644
--- a/dev-support/ranger-docker/.dockerignore
+++ b/dev-support/ranger-docker/.dockerignore
@@ -2,4 +2,6 @@
 !config
 !dist/version
 !dist/ranger-*-admin.tar.gz
+!dist/ranger-*-hdfs-plugin.tar.gz
+!dist/ranger-*-hive-plugin.tar.gz
 !scripts/*
diff --git a/dev-support/ranger-docker/Dockerfile.ranger b/dev-support/ranger-docker/Dockerfile.ranger
index 3e54a30..2711b35 100644
--- a/dev-support/ranger-docker/Dockerfile.ranger
+++ b/dev-support/ranger-docker/Dockerfile.ranger
@@ -16,12 +16,15 @@
 
 FROM ubuntu:20.04
 
+ENV RANGER_VERSION 3.0.0-SNAPSHOT
+
 # Install curl, wget, tzdata, Python, Java, python-requests
 RUN apt-get update && \
     DEBIAN_FRONTEND="noninteractive" apt-get -y install curl wget tzdata python python3 python3-pip openjdk-8-jdk bc iputils-ping && \
     curl https://bootstrap.pypa.io/get-pip.py --output /tmp/get-pip.py && \
     python2 /tmp/get-pip.py && \
     pip3 install requests && \
+    pip3 install apache-ranger && \
     pip install requests
 
 # Set environment variables
@@ -44,8 +47,10 @@ RUN groupadd ranger && \
 	chown -R ranger:ranger /opt/ranger
 
 COPY ./dist/version /home/ranger/dist/
-COPY ./dist/ranger-*-admin.tar.gz /home/ranger/dist/
+COPY ./dist/ranger-${RANGER_VERSION}-admin.tar.gz /home/ranger/dist/
 COPY ./scripts/ranger.sh /home/ranger/scripts/
 COPY ./scripts/ranger-admin-install.properties /home/ranger/scripts/
+COPY ./scripts/ranger-hdfs-service-dev_hdfs.py /home/ranger/scripts/
+COPY ./scripts/ranger-hive-service-dev_hive.py /home/ranger/scripts/
 
 ENTRYPOINT [ "/home/ranger/scripts/ranger.sh" ]
diff --git a/dev-support/ranger-docker/Dockerfile.ranger-hadoop b/dev-support/ranger-docker/Dockerfile.ranger-hadoop
new file mode 100644
index 0000000..27af2b7
--- /dev/null
+++ b/dev-support/ranger-docker/Dockerfile.ranger-hadoop
@@ -0,0 +1,80 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:20.04
+
+ENV RANGER_VERSION 3.0.0-SNAPSHOT
+ENV HADOOP_VERSION 3.1.1
+ENV HIVE_VERSION   3.1.2
+
+# Install curl, wget, tzdata, Python, Java, python-requests
+RUN apt-get update && \
+    DEBIAN_FRONTEND="noninteractive" apt-get -y install vim sudo curl wget tzdata python python3 python3-pip openjdk-8-jdk bc iputils-ping ssh pdsh && \
+    curl https://bootstrap.pypa.io/get-pip.py --output /tmp/get-pip.py && \
+    python2 /tmp/get-pip.py && \
+    pip3 install requests && \
+    pip install requests
+
+RUN groupadd hadoop && \
+    useradd -g hadoop -ms /bin/bash hdfs && \
+    useradd -g hadoop -ms /bin/bash hive && \
+    mkdir -p /opt/ranger && \
+    mkdir -p /home/ranger/dist && \
+    mkdir -p /home/ranger/scripts
+
+
+COPY ./dist/version                                     /home/ranger/dist/
+COPY ./dist/ranger-${RANGER_VERSION}-hdfs-plugin.tar.gz /home/ranger/dist/
+COPY ./scripts/ranger-hadoop-setup.sh                   /home/ranger/scripts/
+COPY ./scripts/ranger-hadoop.sh                         /home/ranger/scripts/
+COPY ./scripts/ranger-hdfs-plugin-install.properties    /home/ranger/scripts/
+
+RUN curl https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz  --output /tmp/hadoop-${HADOOP_VERSION}.tar.gz && \
+    tar xvfz /tmp/hadoop-${HADOOP_VERSION}.tar.gz --directory=/opt/ && \
+    ln -s /opt/hadoop-${HADOOP_VERSION} /opt/hadoop && \
+    rm -f /tmp/hadoop-${HADOOP_VERSION}.tar.gz && \
+    tar xvfz /home/ranger/dist/ranger-${RANGER_VERSION}-hdfs-plugin.tar.gz --directory=/opt/ranger && \
+    ln -s /opt/ranger/ranger-${RANGER_VERSION}-hdfs-plugin /opt/ranger/ranger-hdfs-plugin && \
+    rm -f /home/ranger/dist/ranger-${RANGER_VERSION}-hdfs-plugin.tar.gz && \
+    cp -f /home/ranger/scripts/ranger-hdfs-plugin-install.properties /opt/ranger/ranger-hdfs-plugin/install.properties
+
+ENV JAVA_HOME      /usr/lib/jvm/java-8-openjdk-amd64
+ENV RANGER_DIST    /home/ranger/dist
+ENV RANGER_SCRIPTS /home/ranger/scripts
+ENV RANGER_HOME    /opt/ranger
+
+ENV HADOOP_HOME        /opt/hadoop
+ENV HADOOP_CONF_DIR    /opt/hadoop/etc/hadoop
+ENV HADOOP_HDFS_HOME   /opt/hadoop
+ENV HADOOP_MAPRED_HOME /opt/hadoop
+ENV HADOOP_COMMON_HOME /opt/hadoop
+ENV YARN_HOME          /opt/hadoop
+ENV PATH /usr/java/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/hadoop/bin
+
+# COPY ./dist/ranger-${RANGER_VERSION}-hive-plugin.tar.gz /home/ranger/dist/
+#
+# RUN curl https://archive.apache.org/dist/hive/hive-${HIVE_VERSION}/apache-hive-${HIVE_VERSION}-bin.tar.gz --output /tmp/apache-hive-${HIVE_VERSION}-bin.tar.gz &&
+#     tar xvfz /tmp/apache-hive-${HIVE_VERSION}-bin.tar.gz --directory=/opt/ && \
+#     ln -s /opt/apache-hive-${HIVE_VERSION}-bin /opt/hive && \
+#     rm -f /tmp/apache-hive-${HIVE_VERSION}-bin.tar.gz && \
+#     tar xvfz /home/ranger/dist/ranger-${RANGER_VERSION}-hive-plugin.tar.gz --directory=/opt/ranger && \
+#     ln -s /opt/ranger/ranger-${RANGER_VERSION}-hive-plugin /opt/ranger/ranger-hive-plugin && \
+#     rm -f /home/ranger/dist/ranger-${RANGER_VERSION}-hive-plugin.tar.gz
+# ENV HIVE_HOME        /opt/hive
+# ENV HIVE_CONF_DIR    /opt/hive/conf
+# ENV PATH /usr/java/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/hadoop/bin:/opt/hive/bin
+
+ENTRYPOINT [ "/home/ranger/scripts/ranger-hadoop.sh" ]
diff --git a/dev-support/ranger-docker/README.md b/dev-support/ranger-docker/README.md
index c042f80..42c866a 100644
--- a/dev-support/ranger-docker/README.md
+++ b/dev-support/ranger-docker/README.md
@@ -42,6 +42,9 @@ deploy Apache Ranger and its dependent services in containers.
    3.2. Execute following command to start Ranger and dependent services in containers:
         docker-compose -f docker-compose.ranger.yml up -d
 
+   3.2. Execute following command to start Ranger enabled Hadoop services (only HDFS for now) in a continer:
+        docker-compose -f docker-compose.ranger.yml -f docker-compose.ranger-hadoop.yml up -d
+
 
 4. Alternatively docker command can be used to build and deploy Apache Ranger.
    4.1. Execute following command to build Docker image **ranger-build**:
@@ -73,4 +76,14 @@ deploy Apache Ranger and its dependent services in containers.
 
         This might take few minutes to complete.
 
+   4.8. Execute following command to build Docker image **ranger-hadoop**:
+        docker build -f Dockerfile.ranger-hadoop -t ranger-hadoop .
+
+        This steps includes downloading of Hadoop tar balls, and can take a while to complete.
+
+   4.9. Execute following command to install and run Ranger enabled Hadoop services (only HDFS for now) in a container:
+        docker run -it -d --name ranger-hadoop --hostname ranger-hadoop.example.com -p 9000:9000 --link ranger:ranger --link ranger-solr:ranger-solr ranger-hadoop
+
+        This might take few minutes to complete.
+
 5. Ranger Admin can be accessed at http://localhost:6080 (admin/rangerR0cks!)
diff --git a/dev-support/ranger-docker/docker-compose.ranger-hadoop.yml b/dev-support/ranger-docker/docker-compose.ranger-hadoop.yml
new file mode 100644
index 0000000..a92f3d2
--- /dev/null
+++ b/dev-support/ranger-docker/docker-compose.ranger-hadoop.yml
@@ -0,0 +1,20 @@
+version: '3'
+services:
+  ranger-hadoop:
+    build:
+      context: .
+      dockerfile: Dockerfile.ranger-hadoop
+    image: ranger-hadoop
+    container_name: ranger-hadoop
+    hostname: ranger-hadoop.example.com
+    stdin_open: true
+    tty: true
+    networks:
+      - ranger
+    ports:
+      - "9000:9000"
+    depends_on:
+      - ranger
+
+networks:
+  ranger:
diff --git a/dev-support/ranger-docker/scripts/ranger-hadoop-setup.sh b/dev-support/ranger-docker/scripts/ranger-hadoop-setup.sh
new file mode 100755
index 0000000..ebf25ce
--- /dev/null
+++ b/dev-support/ranger-docker/scripts/ranger-hadoop-setup.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "export JAVA_HOME=${JAVA_HOME}" >> ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh
+
+cat <<EOF > /etc/ssh/ssh_config
+Host *
+   StrictHostKeyChecking no
+   UserKnownHostsFile=/dev/null
+EOF
+
+cat <<EOF > ${HADOOP_HOME}/etc/hadoop/core-site.xml
+<configuration>
+  <property>
+    <name>fs.defaultFS</name>
+    <value>hdfs://ranger-hadoop:9000</value>
+  </property>
+</configuration>
+EOF
+
+cat <<EOF > ${HADOOP_HOME}/etc/hadoop/hdfs-site.xml
+<configuration>
+  <property>
+    <name>dfs.replication</name>
+    <value>1</value>
+  </property>
+</configuration>
+EOF
+
+cat <<EOF > ${HADOOP_HOME}/etc/hadoop/yarn-site.xml
+<configuration>
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.env-whitelist</name>
+    <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
+  </property>
+</configuration>
+EOF
+
+chown -R hdfs:hadoop /opt/hadoop/
+
+cd ${RANGER_HOME}/ranger-hdfs-plugin
+./enable-hdfs-plugin.sh
diff --git a/dev-support/ranger-docker/scripts/ranger-hadoop.sh b/dev-support/ranger-docker/scripts/ranger-hadoop.sh
new file mode 100755
index 0000000..9d7ebf0
--- /dev/null
+++ b/dev-support/ranger-docker/scripts/ranger-hadoop.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+service ssh start
+
+if [ ! -e ${HADOOP_HOME}/.setupDone ]
+then
+  su -c "ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa" hdfs
+  su -c "cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys" hdfs
+  su -c "chmod 0600 ~/.ssh/authorized_keys" hdfs
+
+  echo "ssh" > /etc/pdsh/rcmd_default
+
+  ${RANGER_SCRIPTS}/ranger-hadoop-setup.sh
+
+  su -c "${HADOOP_HOME}/bin/hdfs namenode -format" hdfs
+
+  touch ${HADOOP_HOME}/.setupDone
+fi
+
+su -c "${HADOOP_HOME}/sbin/start-dfs.sh" hdfs
+su -c "${HADOOP_HOME}/sbin/start-yarn.sh" hdfs
+
+# if [ ! -e ${HIVE_HOME}/.setupDone ]
+# then
+#   su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir /tmp" hdfs
+#   su -c "${HADOOP_HOME}/bin/hdfs dfs -mkdir /user/hive/warehouse" hdfs
+#   su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod g+w /tmp" hdfs
+#   su -c "${HADOOP_HOME}/bin/hdfs dfs -chmod g+w /user/hive/warehouse" hdfs
+#
+#   su -c "${HIVE_HOME}/bin/schematool -dbType postgres -initSchema" hive
+#
+#   touch ${HIVE_HOME}/.setupDone
+# fi
+#
+# su -c "${HIVE_HOME}/bin/hiveserver2" hive
+
+# prevent the container from exiting
+/bin/bash
diff --git a/dev-support/ranger-docker/scripts/ranger-hdfs-plugin-install.properties b/dev-support/ranger-docker/scripts/ranger-hdfs-plugin-install.properties
new file mode 100644
index 0000000..686cda0
--- /dev/null
+++ b/dev-support/ranger-docker/scripts/ranger-hdfs-plugin-install.properties
@@ -0,0 +1,76 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+POLICY_MGR_URL=http://ranger:6080
+REPOSITORY_NAME=dev_hdfs
+COMPONENT_INSTALL_DIR_NAME=/opt/hadoop
+
+CUSTOM_USER=hdfs
+CUSTOM_GROUP=hadoop
+
+XAAUDIT.SOLR.IS_ENABLED=true
+XAAUDIT.SOLR.MAX_QUEUE_SIZE=1
+XAAUDIT.SOLR.MAX_FLUSH_INTERVAL_MS=1000
+XAAUDIT.SOLR.SOLR_URL=http://ranger-solr:8983/solr/ranger_audits
+
+# Following properties are needed to get past installation script! Please don't remove
+XAAUDIT.HDFS.IS_ENABLED=false
+XAAUDIT.HDFS.DESTINATION_DIRECTORY=/ranger/audit
+XAAUDIT.HDFS.DESTINTATION_FILE=hadoop
+XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS=900
+XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS=86400
+XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS=60
+XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY=/var/log/hadoop/hdfs/audit
+XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY=/var/log/hadoop/hdfs/audit/archive
+XAAUDIT.HDFS.LOCAL_BUFFER_FILE=%time:yyyyMMdd-HHmm.ss%.log
+XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS=60
+XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS=600
+XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT=10
+
+XAAUDIT.SOLR.ENABLE=true
+XAAUDIT.SOLR.URL=http://ranger-solr:8983/solr/ranger_audits
+XAAUDIT.SOLR.USER=NONE
+XAAUDIT.SOLR.PASSWORD=NONE
+XAAUDIT.SOLR.ZOOKEEPER=NONE
+XAAUDIT.SOLR.FILE_SPOOL_DIR=/var/log/hadoop/hdfs/audit/solr/spool
+
+XAAUDIT.ELASTICSEARCH.ENABLE=false
+XAAUDIT.ELASTICSEARCH.URL=NONE
+XAAUDIT.ELASTICSEARCH.USER=NONE
+XAAUDIT.ELASTICSEARCH.PASSWORD=NONE
+XAAUDIT.ELASTICSEARCH.INDEX=NONE
+XAAUDIT.ELASTICSEARCH.PORT=NONE
+XAAUDIT.ELASTICSEARCH.PROTOCOL=NONE
+
+XAAUDIT.HDFS.ENABLE=false
+XAAUDIT.HDFS.HDFS_DIR=hdfs://localhost:9000/ranger/audit
+XAAUDIT.HDFS.FILE_SPOOL_DIR=/var/log/hadoop/hdfs/audit/hdfs/spool
+
+XAAUDIT.HDFS.AZURE_ACCOUNTNAME=__REPLACE_AZURE_ACCOUNT_NAME
+XAAUDIT.HDFS.AZURE_ACCOUNTKEY=__REPLACE_AZURE_ACCOUNT_KEY
+XAAUDIT.HDFS.AZURE_SHELL_KEY_PROVIDER=__REPLACE_AZURE_SHELL_KEY_PROVIDER
+XAAUDIT.HDFS.AZURE_ACCOUNTKEY_PROVIDER=__REPLACE_AZURE_ACCOUNT_KEY_PROVIDER
+
+XAAUDIT.LOG4J.ENABLE=false
+XAAUDIT.LOG4J.IS_ASYNC=false
+XAAUDIT.LOG4J.ASYNC.MAX.QUEUE.SIZE=10240
+XAAUDIT.LOG4J.ASYNC.MAX.FLUSH.INTERVAL.MS=30000
+XAAUDIT.LOG4J.DESTINATION.LOG4J=true
+XAAUDIT.LOG4J.DESTINATION.LOG4J.LOGGER=xaaudit
+
+SSL_KEYSTORE_FILE_PATH=/etc/hadoop/conf/ranger-plugin-keystore.jks
+SSL_KEYSTORE_PASSWORD=myKeyFilePassword
+SSL_TRUSTSTORE_FILE_PATH=/etc/hadoop/conf/ranger-plugin-truststore.jks
+SSL_TRUSTSTORE_PASSWORD=changeit
diff --git a/dev-support/ranger-docker/scripts/ranger-hdfs-service-dev_hdfs.py b/dev-support/ranger-docker/scripts/ranger-hdfs-service-dev_hdfs.py
new file mode 100644
index 0000000..27d20b7
--- /dev/null
+++ b/dev-support/ranger-docker/scripts/ranger-hdfs-service-dev_hdfs.py
@@ -0,0 +1,8 @@
+from apache_ranger.model.ranger_service     import RangerService
+from apache_ranger.client.ranger_client     import RangerClient
+
+ranger_client = RangerClient('http://ranger:6080', 'admin', 'rangerR0cks!')
+
+service = RangerService(name='dev_hdfs', type='hdfs', configs={'username':'hdfs', 'password':'hdfs', 'fs.default.name': 'hdfs://ranger-hadoop:9000', 'hadoop.security.authentication': 'simple', 'hadoop.security.authorization': 'true'})
+
+ranger_client.create_service(service)
diff --git a/dev-support/ranger-docker/scripts/ranger-hive-service-dev_hive.py b/dev-support/ranger-docker/scripts/ranger-hive-service-dev_hive.py
new file mode 100644
index 0000000..36a871c
--- /dev/null
+++ b/dev-support/ranger-docker/scripts/ranger-hive-service-dev_hive.py
@@ -0,0 +1,8 @@
+from apache_ranger.model.ranger_service     import RangerService
+from apache_ranger.client.ranger_client     import RangerClient
+
+ranger_client = RangerClient('http://ranger:6080', 'admin', 'rangerR0cks!')
+
+service = RangerService(name='dev_hive', type='hive', configs={'username':'hive', 'password':'hive', 'jdbc.driverClassName': 'org.apache.hive.jdbc.HiveDriver', 'jdbc.url': 'jdfb:hive2://ranger-hadoop:10000', 'hadoop.security.authorization': 'true'})
+
+ranger_client.create_service(service)
diff --git a/dev-support/ranger-docker/scripts/ranger.sh b/dev-support/ranger-docker/scripts/ranger.sh
index 47543d8..bf61968 100755
--- a/dev-support/ranger-docker/scripts/ranger.sh
+++ b/dev-support/ranger-docker/scripts/ranger.sh
@@ -43,5 +43,14 @@ fi
 cd ${RANGER_HOME}/admin
 ./ews/ranger-admin-services.sh start
 
+if [ "${SETUP_RANGER}" == "true" ]
+then
+  # Wait for Ranger Admin to become ready
+  sleep 30
+
+  python3 ${RANGER_SCRIPTS}/ranger-hdfs-service-dev_hdfs.py
+  python3 ${RANGER_SCRIPTS}/ranger-hive-service-dev_hive.py
+fi
+
 # prevent the container from exiting
 /bin/bash