You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ranger.apache.org by sn...@apache.org on 2020/08/24 03:25:23 UTC

[ranger] branch master updated: RANGER-2961: Docker setup to run Ranger admin

This is an automated email from the ASF dual-hosted git repository.

sneethir pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ranger.git


The following commit(s) were added to refs/heads/master by this push:
     new d656288  RANGER-2961: Docker setup to run Ranger admin
d656288 is described below

commit d65628827bbaeb6b32c1bb52e93532ead56b7878
Author: Madhan Neethiraj <ma...@apache.org>
AuthorDate: Sat Aug 22 11:44:26 2020 -0700

    RANGER-2961: Docker setup to run Ranger admin
    
    Signed-off-by: Selvamohan Neethiraj <sn...@apache.org>
---
 dev-support/ranger-docker/.dockerignore            |    4 +
 dev-support/ranger-docker/.gitignore               |    1 -
 ...erfile.ranger-base-ubuntu => Dockerfile.ranger} |   23 +-
 ....ranger-base-ubuntu => Dockerfile.ranger-build} |   29 +-
 ....ranger-build-ubuntu => Dockerfile.ranger-solr} |   27 +-
 dev-support/ranger-docker/README.md                |   56 +-
 .../config/solr-ranger_audits/managed-schema       |   95 ++
 .../config/solr-ranger_audits/solrconfig.xml       | 1148 ++++++++++++++++++++
 .../{.dockerignore => dist/.gitignore}             |    0
 .../ranger-docker/docker-compose.ranger-build.yml  |   20 +
 .../ranger-docker/docker-compose.ranger.yml        |   48 +
 .../scripts/ranger-admin-install.properties        |   78 ++
 dev-support/ranger-docker/scripts/ranger-build.sh  |   31 +-
 .../scripts/{ranger-build.sh => ranger.sh}         |   41 +-
 14 files changed, 1516 insertions(+), 85 deletions(-)

diff --git a/dev-support/ranger-docker/.dockerignore b/dev-support/ranger-docker/.dockerignore
index 72e8ffc..cedefaa 100644
--- a/dev-support/ranger-docker/.dockerignore
+++ b/dev-support/ranger-docker/.dockerignore
@@ -1 +1,5 @@
 *
+!config
+!dist/version
+!dist/ranger-*-admin.tar.gz
+!scripts/*
diff --git a/dev-support/ranger-docker/.gitignore b/dev-support/ranger-docker/.gitignore
index a375c4c..12e97ed 100644
--- a/dev-support/ranger-docker/.gitignore
+++ b/dev-support/ranger-docker/.gitignore
@@ -4,7 +4,6 @@
 .metadata
 .classpath
 .project
-/dist/
 /target/
 .DS_Store
 .idea
diff --git a/dev-support/ranger-docker/Dockerfile.ranger-base-ubuntu b/dev-support/ranger-docker/Dockerfile.ranger
similarity index 67%
copy from dev-support/ranger-docker/Dockerfile.ranger-base-ubuntu
copy to dev-support/ranger-docker/Dockerfile.ranger
index 19cf4f0..3e54a30 100644
--- a/dev-support/ranger-docker/Dockerfile.ranger-base-ubuntu
+++ b/dev-support/ranger-docker/Dockerfile.ranger
@@ -18,29 +18,34 @@ FROM ubuntu:20.04
 
 # Install curl, wget, tzdata, Python, Java, python-requests
 RUN apt-get update && \
-    DEBIAN_FRONTEND="noninteractive" apt-get -y install curl wget tzdata python python3 python3-pip openjdk-8-jdk && \
+    DEBIAN_FRONTEND="noninteractive" apt-get -y install curl wget tzdata python python3 python3-pip openjdk-8-jdk bc iputils-ping && \
     curl https://bootstrap.pypa.io/get-pip.py --output /tmp/get-pip.py && \
     python2 /tmp/get-pip.py && \
     pip3 install requests && \
     pip install requests
 
 # Set environment variables
-ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
-ENV PATH /usr/java/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ENV JAVA_HOME      /usr/lib/jvm/java-8-openjdk-amd64
+ENV PATH           /usr/java/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ENV RANGER_DIST    /home/ranger/dist
+ENV RANGER_SCRIPTS /home/ranger/scripts
+ENV RANGER_HOME    /opt/ranger
 
 # setup ranger group, and users
 RUN groupadd ranger && \
     useradd -g ranger -ms /bin/bash ranger && \
     useradd -g ranger -ms /bin/bash rangeradmin && \
-    useradd -g ranger -ms /bin/bash rangerugsync && \
+    useradd -g ranger -ms /bin/bash rangerusersync && \
     useradd -g ranger -ms /bin/bash rangertagsync && \
     useradd -g ranger -ms /bin/bash rangerkms && \
     mkdir -p /home/ranger/dist && \
     mkdir -p /home/ranger/scripts && \
-	mkdir -p /home/ranger/bin/ && \
-	chown -R ranger:ranger /home/ranger
+	mkdir -p /opt/ranger && \
+	chown -R ranger:ranger /opt/ranger
 
-VOLUME /home/ranger/dist
-VOLUME /home/ranger/scripts
+COPY ./dist/version /home/ranger/dist/
+COPY ./dist/ranger-*-admin.tar.gz /home/ranger/dist/
+COPY ./scripts/ranger.sh /home/ranger/scripts/
+COPY ./scripts/ranger-admin-install.properties /home/ranger/scripts/
 
-ENTRYPOINT [ "/bin/bash" ]
+ENTRYPOINT [ "/home/ranger/scripts/ranger.sh" ]
diff --git a/dev-support/ranger-docker/Dockerfile.ranger-base-ubuntu b/dev-support/ranger-docker/Dockerfile.ranger-build
similarity index 67%
rename from dev-support/ranger-docker/Dockerfile.ranger-base-ubuntu
rename to dev-support/ranger-docker/Dockerfile.ranger-build
index 19cf4f0..c5a11a0 100644
--- a/dev-support/ranger-docker/Dockerfile.ranger-base-ubuntu
+++ b/dev-support/ranger-docker/Dockerfile.ranger-build
@@ -18,29 +18,38 @@ FROM ubuntu:20.04
 
 # Install curl, wget, tzdata, Python, Java, python-requests
 RUN apt-get update && \
-    DEBIAN_FRONTEND="noninteractive" apt-get -y install curl wget tzdata python python3 python3-pip openjdk-8-jdk && \
+    DEBIAN_FRONTEND="noninteractive" apt-get -y install curl wget tzdata \
+	python python3 python3-pip openjdk-8-jdk bc iputils-ping git maven build-essential && \
     curl https://bootstrap.pypa.io/get-pip.py --output /tmp/get-pip.py && \
     python2 /tmp/get-pip.py && \
     pip3 install requests && \
     pip install requests
 
 # Set environment variables
-ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
-ENV PATH /usr/java/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ENV MAVEN_HOME     /usr/share/maven
+ENV JAVA_HOME      /usr/lib/jvm/java-8-openjdk-amd64
+ENV PATH           /usr/java/bin:/usr/local/apache-maven/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ENV RANGER_DIST    /home/ranger/dist
+ENV RANGER_SCRIPTS /home/ranger/scripts
 
 # setup ranger group, and users
 RUN groupadd ranger && \
     useradd -g ranger -ms /bin/bash ranger && \
-    useradd -g ranger -ms /bin/bash rangeradmin && \
-    useradd -g ranger -ms /bin/bash rangerugsync && \
-    useradd -g ranger -ms /bin/bash rangertagsync && \
-    useradd -g ranger -ms /bin/bash rangerkms && \
     mkdir -p /home/ranger/dist && \
     mkdir -p /home/ranger/scripts && \
-	mkdir -p /home/ranger/bin/ && \
+    mkdir -p /home/ranger/git && \
+    mkdir -p /home/ranger/.m2 && \
 	chown -R ranger:ranger /home/ranger
 
+COPY ./scripts/ranger-build.sh /home/ranger/scripts/
+
 VOLUME /home/ranger/dist
-VOLUME /home/ranger/scripts
+VOLUME /home/ranger/.m2
+
+USER ranger
+
+WORKDIR /home/ranger/git
+
+RUN git clone https://github.com/apache/ranger.git
 
-ENTRYPOINT [ "/bin/bash" ]
+ENTRYPOINT [ "/home/ranger/scripts/ranger-build.sh" ]
diff --git a/dev-support/ranger-docker/Dockerfile.ranger-build-ubuntu b/dev-support/ranger-docker/Dockerfile.ranger-solr
similarity index 59%
rename from dev-support/ranger-docker/Dockerfile.ranger-build-ubuntu
rename to dev-support/ranger-docker/Dockerfile.ranger-solr
index 54b750f..bcfb488 100644
--- a/dev-support/ranger-docker/Dockerfile.ranger-build-ubuntu
+++ b/dev-support/ranger-docker/Dockerfile.ranger-solr
@@ -14,25 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-FROM ranger-base-ubuntu:latest
+FROM solr:8
 
-# Install Git, Maven, gcc
-RUN apt-get update && \
-    apt-get -y install git maven build-essential
+# Copy audit config set
+USER 0
+RUN  mkdir -p /opt/solr/server/solr/configsets/ranger_audits/conf
+COPY config/solr-ranger_audits/* /opt/solr/server/solr/configsets/ranger_audits/conf/
+RUN chown -R solr:solr /opt/solr/server/solr/configsets/ranger_audits/
 
-# Set environment variables
-ENV MAVEN_HOME /usr/share/maven
-ENV PATH /usr/java/bin:/usr/local/apache-maven/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-
-USER ranger
-
-RUN mkdir -p /home/ranger/git && \
-    mkdir -p /home/ranger/.m2
-
-VOLUME /home/ranger/.m2
-
-WORKDIR /home/ranger/git
-
-RUN git clone https://github.com/apache/ranger.git
-
-ENTRYPOINT [ "/home/ranger/scripts/ranger-build.sh" ]
+USER solr
diff --git a/dev-support/ranger-docker/README.md b/dev-support/ranger-docker/README.md
index 90c63bb..c042f80 100644
--- a/dev-support/ranger-docker/README.md
+++ b/dev-support/ranger-docker/README.md
@@ -19,26 +19,58 @@ under the License.
 
 ## Overview
 
-Dockerfile.build-ubuntu in this folder builds a Docker image to build Apache Ranger.
+Docker files in this folder create docker images to build Apache Ranger and
+deploy Apache Ranger and its dependent services in containers.
 
 ## Usage
 
 1. Ensure that you have a recent version of Docker installed from
    [docker.io](http://www.docker.io).
 
+
 2. Set this folder as your working directory.
 
-3. Execute following command to build a Docker image called **ranger-base-ubuntu**.
-       docker build -f Dockerfile.ranger-base-ubuntu -t ranger-base-ubuntu .
-   This might take about 10 minutes to complete.
 
-4. Execute following command to build a Docker image called **ranger-build-ubuntu**.
-       docker build -f Dockerfile.ranger-build-ubuntu -t ranger-build-ubuntu .
-   This might take about 10 minutes to complete.
+3. Using docker-compose is the simpler way to build and deploy Apache Ranger
+   in containers.
+   3.1. Execute following command to build Apache Ranger:
+        docker-compose -f docker-compose.ranger-build.yml up
+
+        Time taken to complete the build might vary (upto an hour), depending on
+        status of ${HOME}/.m2 directory cache.
+
+   3.2. Execute following command to start Ranger and dependent services in containers:
+        docker-compose -f docker-compose.ranger.yml up -d
+
+
+4. Alternatively docker command can be used to build and deploy Apache Ranger.
+   4.1. Execute following command to build Docker image **ranger-build**:
+        docker build -f Dockerfile.ranger-build -t ranger-build .
+
+        This might take about 10 minutes to complete.
+
+   4.2. Build Apache Ranger in a container with the following commands:
+        docker run -it --rm -v ${HOME}/.m2:/home/ranger/.m2 -v $(pwd)/dist:/home/ranger/dist -e BRANCH=master -e PROFILE=all -e SKIPTESTS=true ranger-build
+
+        Time taken to complete the build might vary (upto an hour), depending on status of ${HOME}/.m2 directory cache.
+
+   4.3. Execute following command to build Docker image **ranger**:
+        docker build -f Dockerfile.ranger -t ranger .
+
+        This might take about 10 minutes to complete.
+
+   4.4. Execute following command to build a Docker image **ranger-solr**:
+        docker build -f Dockerfile.ranger-solr -t ranger-solr .
+
+   4.5. Execute following command to start a container that runs database for use by Ranger Admin:
+        docker run --name ranger-db --hostname ranger-db.example.com -e POSTGRES_PASSWORD='rangerR0cks!' -d postgres:12
+
+   4.6. Execute following command to start a container that runs Solr for use by Ranger Admin:
+        docker run --name ranger-solr --hostname ranger-solr.example.com -p 8983:8983 -d ranger-solr solr-precreate ranger_audits /opt/solr/server/solr/configsets/ranger_audits/
+
+   4.7. Execute following command to install and run Ranger services in a container:
+        docker run -it -d --name ranger --hostname ranger.example.com -p 6080:6080 --link ranger-db:ranger-db --link ranger-solr:ranger-solr ranger
 
-5. Build Apache Ranger with the following commands:
-       mkdir -p ./dist
-       docker run -it --rm -v $(pwd)/scripts:/home/ranger/scripts -v ${HOME}/.m2:/home/ranger/.m2 -v $(pwd)/dist:/home/ranger/dist ranger-build-ubuntu -b master
-   Time taken to complete the build might vary (upto an hour), depending on status of ${HOME}/.m2 directory cache.
+        This might take few minutes to complete.
 
-6. After completion of build, dist files (like ranger-admin-<version>.tar.gz) will be available under ./dist directory.
+5. Ranger Admin can be accessed at http://localhost:6080 (admin/rangerR0cks!)
diff --git a/dev-support/ranger-docker/config/solr-ranger_audits/managed-schema b/dev-support/ranger-docker/config/solr-ranger_audits/managed-schema
new file mode 100644
index 0000000..c33f6de
--- /dev/null
+++ b/dev-support/ranger-docker/config/solr-ranger_audits/managed-schema
@@ -0,0 +1,95 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<schema name="ranger-audit-schema" version="1.6">
+  <uniqueKey>id</uniqueKey>
+  <fieldType name="binary" class="solr.BinaryField"/>
+  <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
+  <fieldType name="booleans" class="solr.BoolField" multiValued="true" sortMissingLast="true"/>
+  <fieldType name="date" class="solr.TrieDateField" docValues="true" precisionStep="0" positionIncrementGap="0"/>
+  <fieldType name="double" class="solr.TrieDoubleField" docValues="true" precisionStep="0" positionIncrementGap="0"/>
+  <fieldType name="float" class="solr.TrieFloatField" docValues="true" precisionStep="0" positionIncrementGap="0"/>
+  <fieldType name="ignored" class="solr.StrField" multiValued="true" indexed="false" stored="false"/>
+  <fieldType name="int" class="solr.TrieIntField" docValues="true" precisionStep="0" positionIncrementGap="0"/>
+  <fieldType name="key_lower_case" class="solr.TextField" sortMissingLast="true" omitNorms="true">
+    <analyzer>
+      <tokenizer class="solr.KeywordTokenizerFactory"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+      <filter class="solr.LengthFilterFactory" min="0" max="2500"/>
+    </analyzer>
+  </fieldType>
+  <fieldType name="long" class="solr.TrieLongField" docValues="true" precisionStep="0" positionIncrementGap="0"/>
+  <fieldType name="random" class="solr.RandomSortField" indexed="true"/>
+  <fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
+  <fieldType name="tdate" class="solr.TrieDateField" docValues="true" precisionStep="6" positionIncrementGap="0"/>
+  <fieldType name="tdates" class="solr.TrieDateField" docValues="true" precisionStep="6" multiValued="true" positionIncrementGap="0"/>
+  <fieldType name="tdouble" class="solr.TrieDoubleField" docValues="true" precisionStep="8" positionIncrementGap="0"/>
+  <fieldType name="tdoubles" class="solr.TrieDoubleField" docValues="true" precisionStep="8" multiValued="true" positionIncrementGap="0"/>
+  <fieldType name="text_std_token_lower_case" class="solr.TextField" multiValued="true" positionIncrementGap="100">
+    <analyzer>
+      <tokenizer class="solr.StandardTokenizerFactory"/>
+      <filter class="solr.LowerCaseFilterFactory"/>
+    </analyzer>
+  </fieldType>
+  <fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
+    <analyzer>
+      <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+    </analyzer>
+  </fieldType>
+  <fieldType name="tfloat" class="solr.TrieFloatField" docValues="true" precisionStep="8" positionIncrementGap="0"/>
+  <fieldType name="tfloats" class="solr.TrieFloatField" docValues="true" precisionStep="8" multiValued="true" positionIncrementGap="0"/>
+  <fieldType name="tint" class="solr.TrieIntField" docValues="true" precisionStep="8" positionIncrementGap="0"/>
+  <fieldType name="tints" class="solr.TrieIntField" docValues="true" precisionStep="8" multiValued="true" positionIncrementGap="0"/>
+  <fieldType name="tlong" class="solr.TrieLongField" docValues="true" precisionStep="8" positionIncrementGap="0"/>
+  <fieldType name="tlongs" class="solr.TrieLongField" docValues="true" precisionStep="8" multiValued="true" positionIncrementGap="0"/>
+  <field name="_expire_at_" type="tdate" multiValued="false" stored="true" docValues="true"/>
+  <field name="_ttl_" type="string" multiValued="false" indexed="true" stored="true"/>
+  <field name="_version_" type="long" indexed="false" stored="true"/>
+  <field name="access" type="key_lower_case" multiValued="false"/>
+  <field name="action" type="key_lower_case" multiValued="false"/>
+  <field name="agent" type="key_lower_case" multiValued="false"/>
+  <field name="agentHost" type="key_lower_case" multiValued="false"/>
+  <field name="cliIP" type="key_lower_case" multiValued="false"/>
+  <field name="cliType" type="key_lower_case" multiValued="false"/>
+  <field name="cluster" type="key_lower_case" multiValued="false"/>
+  <field name="reqContext" type="key_lower_case" multiValued="true"/>
+  <field name="enforcer" type="key_lower_case" multiValued="false"/>
+  <field name="event_count" type="tlong" multiValued="false" docValues="true" default="1"/>
+  <field name="event_dur_ms" type="tlong" multiValued="false" docValues="true"/>
+  <field name="evtTime" type="tdate" docValues="true"/>
+  <field name="id" type="string" multiValued="false" indexed="true" required="true" stored="true"/>
+  <field name="logType" type="key_lower_case" multiValued="false"/>
+  <field name="policy" type="tlong" docValues="true"/>
+  <field name="proxyUsers" type="key_lower_case" multiValued="true"/>
+  <field name="reason" type="text_std_token_lower_case" multiValued="false" omitNorms="false"/>
+  <field name="repo" type="key_lower_case" multiValued="false"/>
+  <field name="repoType" type="tint" multiValued="false" docValues="true"/>
+  <field name="req_caller_id" type="key_lower_case" multiValued="false"/>
+  <field name="req_self_id" type="key_lower_case" multiValued="false"/>
+  <field name="reqData" type="text_std_token_lower_case" multiValued="false"/>
+  <field name="reqUser" type="key_lower_case" multiValued="false"/>
+  <field name="resType" type="key_lower_case" multiValued="false"/>
+  <field name="resource" type="key_lower_case" multiValued="false"/>
+  <field name="result" type="tint" multiValued="false"/>
+  <field name="seq_num" type="tlong" multiValued="false" docValues="true" default="0"/>
+  <field name="sess" type="key_lower_case" multiValued="false"/>
+  <field name="tags" type="key_lower_case" multiValued="true"/>
+  <field name="tags_str" type="text_std_token_lower_case" multiValued="false"/>
+  <field name="text" type="text_std_token_lower_case" multiValued="true" indexed="true" stored="false"/>
+  <field name="zoneName" type="key_lower_case" multiValued="false"/>
+  <field name="policyVersion" type="tlong" multiValued="false"/>
+</schema>
\ No newline at end of file
diff --git a/dev-support/ranger-docker/config/solr-ranger_audits/solrconfig.xml b/dev-support/ranger-docker/config/solr-ranger_audits/solrconfig.xml
new file mode 100644
index 0000000..3389183
--- /dev/null
+++ b/dev-support/ranger-docker/config/solr-ranger_audits/solrconfig.xml
@@ -0,0 +1,1148 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+     http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml.
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>8.4.1</luceneMatchVersion>
+
+  <!-- <lib/> directives can be used to instruct Solr to load any Jars
+       identified and use them to resolve any "plugins" specified in
+       your solrconfig.xml or schema.xml (ie: Analyzers, Request
+       Handlers, etc...).
+       All directories and paths are resolved relative to the
+       instanceDir.
+       Please note that <lib/> directives are processed in the order
+       that they appear in your solrconfig.xml file, and are "stacked"
+       on top of each other when building a ClassLoader - so if you have
+       plugin jars with dependencies on other jars, the "lower level"
+       dependency jars should be loaded first.
+       If a "./lib" directory exists in your instanceDir, all files
+       found in it are included as if you had used the following
+       syntax...
+              <lib dir="./lib" />
+    -->
+
+  <!-- A 'dir' option by itself adds any files found in the directory
+       to the classpath, this is useful for including all jars in a
+       directory.
+       When a 'regex' is specified in addition to a 'dir', only the
+       files in that directory which completely match the regex
+       (anchored on both ends) will be included.
+       If a 'dir' option (with or without a regex) is used and nothing
+       is found that matches, a warning will be logged.
+       The example below can be used to load a solr-contrib along
+       with their external dependencies.
+    -->
+    <!-- <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-ltr-\d.*\.jar" /> -->
+
+  <!-- an exact 'path' can be used instead of a 'dir' to specify a
+       specific jar file.  This will cause a serious error to be logged
+       if it can't be loaded.
+    -->
+  <!--
+     <lib path="../a-jar-that-does-not-exist.jar" />
+  -->
+
+  <lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-dataimporthandler-.*\.jar" />
+
+  <!-- Data Directory
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+       solr.RAMDirectoryFactory is memory based and not persistent.
+    -->
+
+  <directoryFactory name="DirectoryFactory"
+                    class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}">
+
+
+    <!-- These will be used if you are using the solr.HdfsDirectoryFactory,
+         otherwise they will be ignored. If you don't plan on using hdfs,
+         you can safely remove this section. -->
+    <!-- The root directory that collection data should be written to. -->
+    <str name="solr.hdfs.home">${solr.hdfs.home:}</str>
+    <!-- The hadoop configuration files to use for the hdfs client. -->
+    <str name="solr.hdfs.confdir">${solr.hdfs.confdir:}</str>
+    <!-- Enable/Disable the hdfs cache. -->
+    <str name="solr.hdfs.blockcache.enabled">${solr.hdfs.blockcache.enabled:true}</str>
+    <!-- Enable/Disable using one global cache for all SolrCores.
+         The settings used will be from the first HdfsDirectoryFactory created. -->
+    <str name="solr.hdfs.blockcache.global">${solr.hdfs.blockcache.global:true}</str>
+
+  </directoryFactory>
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official Lucene
+       index format, but hooks into the schema to provide per-field customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative implementations
+       are experimental, so if you choose to customize the index format, it's a good
+       idea to convert back to the official format e.g. via IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+       A "compressionMode" string element can be added to <codecFactory> to choose
+       between the existing compression modes in the default codec: "BEST_SPEED" (default)
+       or "BEST_COMPRESSION".
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+  <schemaFactory class="ManagedIndexSchemaFactory">
+      <bool name="mutable">true</bool>
+      <str name="managedSchemaResourceName">managed-schema</str>
+  </schemaFactory>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <indexConfig>
+    <!-- maxFieldLength was removed in 4.0. To get similar behavior, include a
+         LimitTokenCountFilterFactory in your fieldType definition. E.g.
+     <filter class="solr.LimitTokenCountFilterFactory" maxTokenCount="10000"/>
+    -->
+    <!-- Maximum time to wait for a write lock (ms) for an IndexWriter. Default: 1000 -->
+    <!-- <writeLockTimeout>1000</writeLockTimeout>  -->
+
+    <!-- Expert: Enabling compound file will use less files for the index,
+         using fewer file descriptors on the expense of performance decrease.
+         Default in Lucene is "true". Default in Solr is "false" (since 3.6) -->
+    <!-- <useCompoundFile>false</useCompoundFile> -->
+
+    <!-- ramBufferSizeMB sets the amount of RAM that may be used by Lucene
+         indexing for buffering added documents and deletions before they are
+         flushed to the Directory.
+         maxBufferedDocs sets a limit on the number of documents buffered
+         before flushing.
+         If both ramBufferSizeMB and maxBufferedDocs is set, then
+         Lucene will flush based on whichever limit is hit first.  -->
+    <ramBufferSizeMB>128</ramBufferSizeMB>
+    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
+
+    <!-- Expert: ramPerThreadHardLimitMB sets the maximum amount of RAM that can be consumed
+         per thread before they are flushed. When limit is exceeded, this triggers a forced
+         flush even if ramBufferSizeMB has not been exceeded.
+         This is a safety limit to prevent Lucene's DocumentsWriterPerThread from address space
+         exhaustion due to its internal 32 bit signed integer based memory addressing.
+         The specified value should be greater than 0 and less than 2048MB. When not specified,
+         Solr uses Lucene's default value 1945. -->
+    <!-- <ramPerThreadHardLimitMB>1945</ramPerThreadHardLimitMB> -->
+
+    <!-- Expert: Merge Policy
+         The Merge Policy in Lucene controls how merging of segments is done.
+         The default since Solr/Lucene 3.3 is TieredMergePolicy.
+         The default since Lucene 2.3 was the LogByteSizeMergePolicy,
+         Even older versions of Lucene used LogDocMergePolicy.
+      -->
+    <!--
+        <mergePolicyFactory class="org.apache.solr.index.TieredMergePolicyFactory">
+          <int name="maxMergeAtOnce">10</int>
+          <int name="segmentsPerTier">10</int>
+          <double name="noCFSRatio">0.1</double>
+        </mergePolicyFactory>
+      -->
+
+    <!-- Expert: Merge Scheduler
+         The Merge Scheduler in Lucene controls how merges are
+         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
+         can perform merges in the background using separate threads.
+         The SerialMergeScheduler (Lucene 2.2 default) does not.
+     -->
+    <!--
+       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
+       -->
+
+    <!-- LockFactory
+         This option specifies which Lucene LockFactory implementation
+         to use.
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Commit Deletion Policy
+         Custom deletion policies can be specified here. The class must
+         implement org.apache.lucene.index.IndexDeletionPolicy.
+         The default Solr IndexDeletionPolicy implementation supports
+         deleting index commit points on number of commits, age of
+         commit point and optimized status.
+         The latest commit point should always be preserved regardless
+         of the criteria.
+    -->
+    <!--
+    <deletionPolicy class="solr.SolrDeletionPolicy">
+    -->
+    <!-- The number of commit points to be kept -->
+    <!-- <str name="maxCommitsToKeep">1</str> -->
+    <!-- The number of optimized commit points to be kept -->
+    <!-- <str name="maxOptimizedCommitsToKeep">0</str> -->
+    <!--
+        Delete all commit points once they have reached the given age.
+        Supports DateMathParser syntax e.g.
+      -->
+    <!--
+       <str name="maxCommitAge">30MINUTES</str>
+       <str name="maxCommitAge">1DAY</str>
+    -->
+    <!--
+    </deletionPolicy>
+    -->
+
+    <!-- Lucene Infostream
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+         Setting The value to true will instruct the underlying Lucene
+         IndexWriter to write its debugging info the specified file
+      -->
+    <!-- <infoStream file="INFOSTREAM.txt">false</infoStream> -->
+  </indexConfig>
+
+
+  <!-- JMX
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.
+         "numVersionBuckets" - sets the number of buckets used to keep
+                track of max version values when checking for re-ordered
+                updates; increase this value to reduce the cost of
+                synchronizing access to version buckets during high-volume
+                indexing, this requires 8 bytes (long) * numVersionBuckets
+                of heap space per Solr core.
+    -->
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+      <int name="tlogDfsReplication">${solr.ulog.tlogDfsReplication:3}</int>
+      <int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
+    </updateLog>
+
+    <!-- AutoCommit
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents.
+         http://wiki.apache.org/solr/UpdateXmlMessages
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit.
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+    <autoCommit>
+      <maxTime>${solr.autoCommit.maxTime:60000}</maxTime>
+      <openSearcher>false</openSearcher>
+    </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+
+    <autoSoftCommit>
+      <maxTime>${solr.autoSoftCommit.maxTime:15000}</maxTime>
+    </autoSoftCommit>
+
+    <!-- Update Related Event Listeners
+         Various IndexWriter related events can trigger Listeners to
+         take actions.
+         postCommit - fired after every commit or optimize command
+         postOptimize - fired after every optimize command
+      -->
+    <!-- The RunExecutableListener executes an external command from a
+         hook such as postCommit or postOptimize.
+         exe - the name of the executable to run
+         dir - dir to use as the current working directory. (default=".")
+         wait - the calling thread waits until the executable returns.
+                (default="true")
+         args - the arguments to pass to the program.  (default is none)
+         env - environment variables to set.  (default is none)
+      -->
+    <!-- This example shows how RunExecutableListener could be used
+         with the script based replication...
+         http://wiki.apache.org/solr/CollectionDistribution
+      -->
+    <!--
+       <listener event="postCommit" class="solr.RunExecutableListener">
+         <str name="exe">solr/bin/snapshooter</str>
+         <str name="dir">.</str>
+         <bool name="wait">true</bool>
+         <arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
+         <arr name="env"> <str>MYVAR=val1</str> </arr>
+       </listener>
+      -->
+
+  </updateHandler>
+
+  <!-- IndexReaderFactory
+       Use the following format to specify a custom IndexReaderFactory,
+       which allows for alternate IndexReader implementations.
+       ** Experimental Feature **
+       Please note - Using a custom IndexReaderFactory may prevent
+       certain other features from working. The API to
+       IndexReaderFactory may change without warning or may even be
+       removed from future releases if the problems cannot be
+       resolved.
+       ** Features that may not work with custom IndexReaderFactory **
+       The ReplicationHandler assumes a disk-resident index. Using a
+       custom IndexReader implementation may cause incompatibility
+       with ReplicationHandler and may cause replication to not work
+       correctly. See SOLR-1366 for details.
+    -->
+  <!--
+  <indexReaderFactory name="IndexReaderFactory" class="package.class">
+    <str name="someArg">Some Value</str>
+  </indexReaderFactory >
+  -->
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
+  <query>
+
+    <!-- Maximum number of clauses allowed when parsing a boolean query string.
+
+         This limit only impacts boolean queries specified by a user as part of a query string,
+         and provides per-collection controls on how complex user specified boolean queries can
+         be.  Query strings that specify more clauses then this will result in an error.
+
+         If this per-collection limit is greater then the global `maxBooleanClauses` limit
+         specified in `solr.xml`, it will have no effect, as that setting also limits the size
+         of user specified boolean queries.
+      -->
+    <maxBooleanClauses>${solr.max.booleanClauses:1024}</maxBooleanClauses>
+
+    <!-- Solr Internal Query Caches
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.
+           maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                      to occupy. Note that when this option is specified, the size
+                      and initialSize parameters are ignored.
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+         Caches results of searches - ordered lists of document ids
+         (DocList) based on a query, a sort, and the range of documents requested.
+         Additional supported parameter by LRUCache:
+            maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
+                       to occupy
+      -->
+    <queryResultCache class="solr.LRUCache"
+                      size="512"
+                      initialSize="512"
+                      autowarmCount="0"/>
+
+    <!-- Document Cache
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+
+    <!-- custom cache currently used by block join -->
+    <cache name="perSegFilter"
+           class="solr.search.LRUCache"
+           size="10"
+           initialSize="0"
+           autowarmCount="10"
+           regenerator="solr.NoOpRegenerator" />
+
+    <!-- Field Value Cache
+         Cache used to hold field values that are quickly accessible
+         by document id.  The fieldValueCache is created by default
+         even if not configured here.
+      -->
+    <!--
+       <fieldValueCache class="solr.FastLRUCache"
+                        size="512"
+                        autowarmCount="128"
+                        showItems="32" />
+      -->
+
+    <!-- Custom Cache
+         Example of a generic cache.  These caches may be accessed by
+         name through SolrIndexSearcher.getCache(),cacheLookup(), and
+         cacheInsert().  The purpose is to enable easy caching of
+         user/application level data.  The regenerator argument should
+         be specified as an implementation of solr.CacheRegenerator
+         if autowarming is desired.
+      -->
+    <!--
+       <cache name="myUserCache"
+              class="solr.LRUCache"
+              size="4096"
+              initialSize="1024"
+              autowarmCount="1024"
+              regenerator="com.mycompany.MyRegenerator"
+              />
+      -->
+
+
+    <!-- Lazy Field Loading
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+    <!-- Use Filter For Sorted Query
+         A possible optimization that attempts to use a filter to
+         satisfy a search.  If the requested sort does not include
+         score, then the filterCache will be checked for a filter
+         matching the query. If found, the filter will be used as the
+         source of document ids, and then the sort will be applied to
+         that.
+         For most situations, this will not be useful unless you
+         frequently get the same search repeatedly with different sort
+         options, and none of them ever use "score"
+      -->
+    <!--
+       <useFilterForSortedQuery>true</useFilterForSortedQuery>
+      -->
+
+    <!-- Result Window Size
+         An optimization for use with the queryResultCache.  When a search
+         is requested, a superset of the requested number of document ids
+         are collected.  For example, if a search for a particular query
+         requests matching documents 10 through 19, and queryWindowSize is 50,
+         then documents 0 through 49 will be collected and cached.  Any further
+         requests in that range can be satisfied via the cache.
+      -->
+    <queryResultWindowSize>20</queryResultWindowSize>
+
+    <!-- Maximum number of documents to cache for any entry in the
+         queryResultCache.
+      -->
+    <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+    <!-- Query Related Event Listeners
+         Various IndexSearcher related events can trigger Listeners to
+         take actions.
+         newSearcher - fired whenever a new searcher is being prepared
+         and there is a current searcher handling requests (aka
+         registered).  It can be used to prime certain caches to
+         prevent long request times for certain requests.
+         firstSearcher - fired whenever a new searcher is being
+         prepared but there is no current registered searcher to handle
+         requests or to gain autowarming data from.
+      -->
+    <!-- QuerySenderListener takes an array of NamedList and executes a
+         local query request for each NamedList in sequence.
+      -->
+    <listener event="newSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+          -->
+      </arr>
+    </listener>
+    <listener event="firstSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+        <lst>
+          <str name="q">static firstSearcher warming in solrconfig.xml</str>
+        </lst>
+        -->
+      </arr>
+    </listener>
+
+    <!-- Use Cold Searcher
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+    <!-- Slow Query Request Logging
+         Any queries that take longer than the specified threshold
+         will be logged as "slow" queries.
+         To disable slow request logging for this Solr config,
+         set the value to -1
+      -->
+    <slowQueryThresholdMillis>5000</slowQueryThresholdMillis>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+    -->
+  <requestDispatcher>
+    <!-- Request Parsing
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom
+         plugins.
+         *** WARNING ***
+         Before enabling remote streaming, you should make sure your
+         system has authentication enabled.
+      -->
+    <requestParsers enableRemoteStreaming="true"
+                    multipartUploadLimitInKB="2048000"
+                    formdataUploadLimitInKB="2048"
+                    addHttpRequestToContext="true"/>
+
+    <!-- HTTP Caching
+         Set HTTP caching related parameters (for proxy caches and clients).
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+    <!-- If you include a <cacheControl> directive, it will be used to
+         generate a Cache-Control header (as well as an Expires header
+         if the value contains "max-age=")
+         By default, no Cache-Control header is generated.
+         You can use the <cacheControl> option even if you have set
+         never304="true"
+      -->
+    <!--
+       <httpCaching never304="true" >
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+    <!-- To enable Solr to respond with automatically generated HTTP
+         Caching headers, and to response to Cache Validation requests
+         correctly, set the value of never304="false"
+         This will cause Solr to generate Last-Modified and ETag
+         headers based on the properties of the Index.
+         The following options can also be specified to affect the
+         values of these headers...
+         lastModFrom - the default value is "openTime" which means the
+         Last-Modified value (and validation against If-Modified-Since
+         requests) will all be relative to when the current Searcher
+         was opened.  You can change it to lastModFrom="dirLastMod" if
+         you want the value to exactly correspond to when the physical
+         index was last modified.
+         etagSeed="..." is an option you can change to force the ETag
+         header (and validation against If-None-Match requests) to be
+         different even if the index has not changed (ie: when making
+         significant changes to your config file)
+         (lastModifiedFrom and etagSeed are both ignored if you use
+         the never304="true" option)
+      -->
+    <!--
+       <httpCaching lastModifiedFrom="openTime"
+                    etagSeed="Solr">
+         <cacheControl>max-age=30, public</cacheControl>
+       </httpCaching>
+      -->
+  </requestDispatcher>
+
+  <!-- Request Handlers
+       http://wiki.apache.org/solr/SolrRequestHandler
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+    -->
+  <!-- SearchHandler
+       http://wiki.apache.org/solr/SearchHandler
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <int name="rows">10</int>
+      <!-- Default search field
+         <str name="df">text</str>
+        -->
+      <!-- Change from JSON to XML format (the default prior to Solr 7.0)
+         <str name="wt">xml</str>
+        -->
+    </lst>
+    <!-- In addition to defaults, "appends" params can be specified
+         to identify values which should be appended to the list of
+         multi-val params from the query (or the existing "defaults").
+      -->
+    <!-- In this example, the param "fq=instock:true" would be appended to
+         any query time fq params the user may specify, as a mechanism for
+         partitioning the index, independent of any user selected filtering
+         that may also be desired (perhaps as a result of faceted searching).
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "appends" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="appends">
+         <str name="fq">inStock:true</str>
+       </lst>
+      -->
+    <!-- "invariants" are a way of letting the Solr maintainer lock down
+         the options available to Solr clients.  Any params values
+         specified here are used regardless of what values may be specified
+         in either the query, the "defaults", or the "appends" params.
+         In this example, the facet.field and facet.query params would
+         be fixed, limiting the facets clients can use.  Faceting is
+         not turned on by default - but if the client does specify
+         facet=true in the request, these are the only facets they
+         will be able to see counts for; regardless of what other
+         facet.field or facet.query params they may specify.
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "invariants" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="invariants">
+         <str name="facet.field">cat</str>
+         <str name="facet.field">manu_exact</str>
+         <str name="facet.query">price:[* TO 500]</str>
+         <str name="facet.query">price:[500 TO *]</str>
+       </lst>
+      -->
+    <!-- If the default list of SearchComponents is not desired, that
+         list can either be overridden completely, or components can be
+         prepended or appended to the default list.  (see below)
+      -->
+    <!--
+       <arr name="components">
+         <str>nameOfCustomComponent1</str>
+         <str>nameOfCustomComponent2</str>
+       </arr>
+      -->
+  </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+      <str name="wt">json</str>
+      <str name="indent">true</str>
+    </lst>
+  </requestHandler>
+
+  <initParams path="/update/**,/query,/select,/spell">
+    <lst name="defaults">
+      <str name="df">_text_</str>
+    </lst>
+  </initParams>
+
+  <!-- Search Components
+       Search components are registered to SolrCore and used by
+       instances of SearchHandler (which can access them by name)
+       By default, the following components are available:
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+       Default configuration in a requestHandler would look like:
+       <arr name="components">
+         <str>query</str>
+         <str>facet</str>
+         <str>mlt</str>
+         <str>highlight</str>
+         <str>stats</str>
+         <str>debug</str>
+       </arr>
+       If you register a searchComponent to one of the standard names,
+       that will be used instead of the default.
+       To insert components before or after the 'standard' components, use:
+       <arr name="first-components">
+         <str>myFirstComponentName</str>
+       </arr>
+       <arr name="last-components">
+         <str>myLastComponentName</str>
+       </arr>
+       NOTE: The component registered with the name "debug" will
+       always be executed after the "last-components"
+     -->
+
+  <!-- Spell Check
+       The spell check component can return a list of alternative spelling
+       suggestions.
+       http://wiki.apache.org/solr/SpellCheckComponent
+    -->
+  <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+    <str name="queryAnalyzerFieldType">text_general</str>
+
+    <!-- Multiple "Spell Checkers" can be declared and used by this
+         component
+      -->
+
+    <!-- a spellchecker built from a field of the main index -->
+    <lst name="spellchecker">
+      <str name="name">default</str>
+      <str name="field">_text_</str>
+      <str name="classname">solr.DirectSolrSpellChecker</str>
+      <!-- the spellcheck distance measure used, the default is the internal levenshtein -->
+      <str name="distanceMeasure">internal</str>
+      <!-- minimum accuracy needed to be considered a valid spellcheck suggestion -->
+      <float name="accuracy">0.5</float>
+      <!-- the maximum #edits we consider when enumerating terms: can be 1 or 2 -->
+      <int name="maxEdits">2</int>
+      <!-- the minimum shared prefix when enumerating terms -->
+      <int name="minPrefix">1</int>
+      <!-- maximum number of inspections per result. -->
+      <int name="maxInspections">5</int>
+      <!-- minimum length of a query term to be considered for correction -->
+      <int name="minQueryLength">4</int>
+      <!-- maximum threshold of documents a query term can appear to be considered for correction -->
+      <float name="maxQueryFrequency">0.01</float>
+      <!-- uncomment this to require suggestions to occur in 1% of the documents
+        <float name="thresholdTokenFrequency">.01</float>
+      -->
+    </lst>
+
+    <!-- a spellchecker that can break or combine words.  See "/spell" handler below for usage -->
+    <!--
+    <lst name="spellchecker">
+      <str name="name">wordbreak</str>
+      <str name="classname">solr.WordBreakSolrSpellChecker</str>
+      <str name="field">name</str>
+      <str name="combineWords">true</str>
+      <str name="breakWords">true</str>
+      <int name="maxChanges">10</int>
+    </lst>
+    -->
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the spellcheck component.
+       NOTE: This is purely as an example.  The whole purpose of the
+       SpellCheckComponent is to hook it into the request handler that
+       handles your normal user queries so that a separate request is
+       not needed to get suggestions.
+       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
+       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
+       See http://wiki.apache.org/solr/SpellCheckComponent for details
+       on the request parameters.
+    -->
+  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <!-- Solr will use suggestions from both the 'default' spellchecker
+           and from the 'wordbreak' spellchecker and combine them.
+           collations (re-written queries) can include a combination of
+           corrections from both spellcheckers -->
+      <str name="spellcheck.dictionary">default</str>
+      <str name="spellcheck">on</str>
+      <str name="spellcheck.extendedResults">true</str>
+      <str name="spellcheck.count">10</str>
+      <str name="spellcheck.alternativeTermCount">5</str>
+      <str name="spellcheck.maxResultsForSuggest">5</str>
+      <str name="spellcheck.collate">true</str>
+      <str name="spellcheck.collateExtendedResults">true</str>
+      <str name="spellcheck.maxCollationTries">10</str>
+      <str name="spellcheck.maxCollations">5</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Terms Component
+       http://wiki.apache.org/solr/TermsComponent
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Highlighting Component
+       http://wiki.apache.org/solr/HighlightingParameters
+    -->
+  <searchComponent class="solr.HighlightComponent" name="highlight">
+    <highlighting>
+      <!-- Configure the standard fragmenter -->
+      <!-- This could most likely be commented out in the "default" case -->
+      <fragmenter name="gap"
+                  default="true"
+                  class="solr.highlight.GapFragmenter">
+        <lst name="defaults">
+          <int name="hl.fragsize">100</int>
+        </lst>
+      </fragmenter>
+
+      <!-- A regular-expression-based fragmenter
+           (for sentence extraction)
+        -->
+      <fragmenter name="regex"
+                  class="solr.highlight.RegexFragmenter">
+        <lst name="defaults">
+          <!-- slightly smaller fragsizes work better because of slop -->
+          <int name="hl.fragsize">70</int>
+          <!-- allow 50% slop on fragment sizes -->
+          <float name="hl.regex.slop">0.5</float>
+          <!-- a basic sentence pattern -->
+          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
+        </lst>
+      </fragmenter>
+
+      <!-- Configure the standard formatter -->
+      <formatter name="html"
+                 default="true"
+                 class="solr.highlight.HtmlFormatter">
+        <lst name="defaults">
+          <str name="hl.simple.pre"><![CDATA[<em>]]></str>
+          <str name="hl.simple.post"><![CDATA[</em>]]></str>
+        </lst>
+      </formatter>
+
+      <!-- Configure the standard encoder -->
+      <encoder name="html"
+               class="solr.highlight.HtmlEncoder" />
+
+      <!-- Configure the standard fragListBuilder -->
+      <fragListBuilder name="simple"
+                       class="solr.highlight.SimpleFragListBuilder"/>
+
+      <!-- Configure the single fragListBuilder -->
+      <fragListBuilder name="single"
+                       class="solr.highlight.SingleFragListBuilder"/>
+
+      <!-- Configure the weighted fragListBuilder -->
+      <fragListBuilder name="weighted"
+                       default="true"
+                       class="solr.highlight.WeightedFragListBuilder"/>
+
+      <!-- default tag FragmentsBuilder -->
+      <fragmentsBuilder name="default"
+                        default="true"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <!--
+        <lst name="defaults">
+          <str name="hl.multiValuedSeparatorChar">/</str>
+        </lst>
+        -->
+      </fragmentsBuilder>
+
+      <!-- multi-colored tag FragmentsBuilder -->
+      <fragmentsBuilder name="colored"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <lst name="defaults">
+          <str name="hl.tag.pre"><![CDATA[
+               <b style="background:yellow">,<b style="background:lawgreen">,
+               <b style="background:aquamarine">,<b style="background:magenta">,
+               <b style="background:palegreen">,<b style="background:coral">,
+               <b style="background:wheat">,<b style="background:khaki">,
+               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
+          <str name="hl.tag.post"><![CDATA[</b>]]></str>
+        </lst>
+      </fragmentsBuilder>
+
+      <boundaryScanner name="default"
+                       default="true"
+                       class="solr.highlight.SimpleBoundaryScanner">
+        <lst name="defaults">
+          <str name="hl.bs.maxScan">10</str>
+          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
+        </lst>
+      </boundaryScanner>
+
+      <boundaryScanner name="breakIterator"
+                       class="solr.highlight.BreakIteratorBoundaryScanner">
+        <lst name="defaults">
+          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
+          <str name="hl.bs.type">WORD</str>
+          <!-- language and country are used when constructing Locale object.  -->
+          <!-- And the Locale object will be used when getting instance of BreakIterator -->
+          <str name="hl.bs.language">en</str>
+          <str name="hl.bs.country">US</str>
+        </lst>
+      </boundaryScanner>
+    </highlighting>
+  </searchComponent>
+
+  <!-- Update Processors
+       Chains of Update Processor Factories for dealing with Update
+       Requests can be declared, and then used by name in Update
+       Request Processors
+       http://wiki.apache.org/solr/UpdateRequestProcessor
+    -->
+
+  <!-- Add unknown fields to the schema
+       Field type guessing update processors that will
+       attempt to parse string-typed field values as Booleans, Longs,
+       Doubles, or Dates, and then add schema fields with the guessed
+       field types. Text content will be indexed as "text_general" as
+       well as a copy to a plain string version in *_str.
+       These require that the schema is both managed and mutable, by
+       declaring schemaFactory as ManagedIndexSchemaFactory, with
+       mutable specified as true.
+       See http://wiki.apache.org/solr/GuessingFieldTypes
+    -->
+  <updateProcessor class="solr.UUIDUpdateProcessorFactory" name="uuid"/>
+  <updateProcessor class="solr.RemoveBlankFieldUpdateProcessorFactory" name="remove-blank"/>
+  <updateProcessor class="solr.FieldNameMutatingUpdateProcessorFactory" name="field-name-mutating">
+    <str name="pattern">[^\w-\.]</str>
+    <str name="replacement">_</str>
+  </updateProcessor>
+  <updateProcessor class="solr.ParseBooleanFieldUpdateProcessorFactory" name="parse-boolean"/>
+  <updateProcessor class="solr.ParseLongFieldUpdateProcessorFactory" name="parse-long"/>
+  <updateProcessor class="solr.ParseDoubleFieldUpdateProcessorFactory" name="parse-double"/>
+  <updateProcessor class="solr.ParseDateFieldUpdateProcessorFactory" name="parse-date">
+    <arr name="format">
+      <str>yyyy-MM-dd['T'[HH:mm[:ss[.SSS]][z</str>
+      <str>yyyy-MM-dd['T'[HH:mm[:ss[,SSS]][z</str>
+      <str>yyyy-MM-dd HH:mm[:ss[.SSS]][z</str>
+      <str>yyyy-MM-dd HH:mm[:ss[,SSS]][z</str>
+      <str>[EEE, ]dd MMM yyyy HH:mm[:ss] z</str>
+      <str>EEEE, dd-MMM-yy HH:mm:ss z</str>
+      <str>EEE MMM ppd HH:mm:ss [z ]yyyy</str>
+    </arr>
+  </updateProcessor>
+  <updateProcessor class="solr.AddSchemaFieldsUpdateProcessorFactory" name="add-schema-fields">
+    <str name="defaultFieldType">key_lower_case</str>
+    <lst name="typeMapping">
+      <str name="valueClass">java.lang.Boolean</str>
+      <str name="fieldType">boolean</str>
+    </lst>
+    <lst name="typeMapping">
+      <str name="valueClass">java.util.Date</str>
+      <str name="fieldType">tdate</str>
+    </lst>
+    <lst name="typeMapping">
+      <str name="valueClass">java.lang.Long</str>
+      <str name="valueClass">java.lang.Integer</str>
+      <str name="fieldType">tlong</str>
+    </lst>
+    <lst name="typeMapping">
+      <str name="valueClass">java.lang.Number</str>
+      <str name="fieldType">tdouble</str>
+    </lst>
+  </updateProcessor>
+
+  <!-- The update.autoCreateFields property can be turned to false to disable schemaless mode -->
+  <updateRequestProcessorChain name="add-unknown-fields-to-the-schema" default="${update.autoCreateFields:true}"
+           processor="uuid,remove-blank,field-name-mutating,parse-boolean,parse-long,parse-double,parse-date,add-schema-fields">
+    <processor class="solr.LogUpdateProcessorFactory"/>
+    <processor class="solr.DistributedUpdateProcessorFactory"/>
+    <processor class="solr.RunUpdateProcessorFactory"/>
+  </updateRequestProcessorChain>
+
+  <!-- Deduplication
+       An example dedup update processor that creates the "id" field
+       on the fly based on the hash code of some other fields.  This
+       example has overwriteDupes set to false since we are using the
+       id field as the signatureField and Solr will maintain
+       uniqueness based on that anyway.
+    -->
+  <!--
+     <updateRequestProcessorChain name="dedupe">
+       <processor class="solr.processor.SignatureUpdateProcessorFactory">
+         <bool name="enabled">true</bool>
+         <str name="signatureField">id</str>
+         <bool name="overwriteDupes">false</bool>
+         <str name="fields">name,features,cat</str>
+         <str name="signatureClass">solr.processor.Lookup3Signature</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+  <!-- Response Writers
+       http://wiki.apache.org/solr/QueryResponseWriter
+       Request responses will be written using the writer specified by
+       the 'wt' request parameter matching the name of a registered
+       writer.
+       The "default" writer is the default and will be used if 'wt' is
+       not specified in the request.
+    -->
+  <!-- The following response writers are implicitly configured unless
+       overridden...
+    -->
+  <!--
+     <queryResponseWriter name="xml"
+                          default="true"
+                          class="solr.XMLResponseWriter" />
+     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
+     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
+     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
+     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
+     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
+     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
+     <queryResponseWriter name="schema.xml" class="solr.SchemaXmlResponseWriter"/>
+    -->
+
+  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
+    <!-- For the purposes of the tutorial, JSON responses are written as
+     plain text so that they are easy to read in *any* browser.
+     If you expect a MIME type of "application/json" just remove this override.
+    -->
+    <str name="content-type">text/plain; charset=UTF-8</str>
+  </queryResponseWriter>
+
+  <!-- Query Parsers
+       https://lucene.apache.org/solr/guide/query-syntax-and-parsing.html
+       Multiple QParserPlugins can be registered by name, and then
+       used in either the "defType" param for the QueryComponent (used
+       by SearchHandler) or in LocalParams
+    -->
+  <!-- example of registering a query parser -->
+  <!--
+     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
+    -->
+
+  <!-- Function Parsers
+       http://wiki.apache.org/solr/FunctionQuery
+       Multiple ValueSourceParsers can be registered by name, and then
+       used as function names when using the "func" QParser.
+    -->
+  <!-- example of registering a custom function parser  -->
+  <!--
+     <valueSourceParser name="myfunc"
+                        class="com.mycompany.MyValueSourceParser" />
+    -->
+
+
+  <!-- Document Transformers
+       http://wiki.apache.org/solr/DocTransformers
+    -->
+  <!--
+     Could be something like:
+     <transformer name="db" class="com.mycompany.LoadFromDatabaseTransformer" >
+       <int name="connection">jdbc://....</int>
+     </transformer>
+     To add a constant value to all docs, use:
+     <transformer name="mytrans2" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <int name="value">5</int>
+     </transformer>
+     If you want the user to still be able to change it with _value:something_ use this:
+     <transformer name="mytrans3" class="org.apache.solr.response.transform.ValueAugmenterFactory" >
+       <double name="defaultValue">5</double>
+     </transformer>
+      If you are using the QueryElevationComponent, you may wish to mark documents that get boosted.  The
+      EditorialMarkerFactory will do exactly that:
+     <transformer name="qecBooster" class="org.apache.solr.response.transform.EditorialMarkerFactory" />
+    -->
+</config>
diff --git a/dev-support/ranger-docker/.dockerignore b/dev-support/ranger-docker/dist/.gitignore
similarity index 100%
copy from dev-support/ranger-docker/.dockerignore
copy to dev-support/ranger-docker/dist/.gitignore
diff --git a/dev-support/ranger-docker/docker-compose.ranger-build.yml b/dev-support/ranger-docker/docker-compose.ranger-build.yml
new file mode 100644
index 0000000..c17361a
--- /dev/null
+++ b/dev-support/ranger-docker/docker-compose.ranger-build.yml
@@ -0,0 +1,20 @@
+version: '3'
+services:
+  ranger-build:
+    build:
+      context: .
+      dockerfile: Dockerfile.ranger-build
+    image: ranger-build
+    container_name: ranger-build
+    hostname: ranger-build.example.com
+    networks:
+      - ranger
+    volumes:
+      - ~/.m2:/home/ranger/.m2
+      - ./dist:/home/ranger/dist
+    environment:
+      BRANCH: 'master'
+      SKIPTESTS: 'true'
+
+networks:
+  ranger:
diff --git a/dev-support/ranger-docker/docker-compose.ranger.yml b/dev-support/ranger-docker/docker-compose.ranger.yml
new file mode 100644
index 0000000..2c6a3c9
--- /dev/null
+++ b/dev-support/ranger-docker/docker-compose.ranger.yml
@@ -0,0 +1,48 @@
+version: '3'
+services:
+  ranger:
+    build:
+      context: .
+      dockerfile: Dockerfile.ranger
+    image: ranger:latest
+    container_name: ranger
+    hostname: ranger.example.com
+    stdin_open: true
+    tty: true
+    networks:
+      - ranger
+    ports:
+      - "6080:6080"
+    depends_on:
+      - ranger-db
+      - ranger-solr
+    command:
+      - /home/ranger/scripts/ranger.sh
+
+  ranger-solr:
+    build:
+      context: .
+      dockerfile: Dockerfile.ranger-solr
+    image: ranger-solr
+    container_name: ranger-solr
+    hostname: ranger-solr.example.com
+    networks:
+      - ranger
+    ports:
+      - "8983:8983"
+    command:
+      - solr-precreate
+      - ranger_audits
+      - /opt/solr/server/solr/configsets/ranger_audits/
+
+  ranger-db:
+    image: postgres:12
+    container_name: ranger-db
+    hostname: ranger-db.example.com
+    networks:
+      - ranger
+    environment:
+      POSTGRES_PASSWORD: rangerR0cks!
+
+networks:
+  ranger:
diff --git a/dev-support/ranger-docker/scripts/ranger-admin-install.properties b/dev-support/ranger-docker/scripts/ranger-admin-install.properties
new file mode 100644
index 0000000..9bb8c75
--- /dev/null
+++ b/dev-support/ranger-docker/scripts/ranger-admin-install.properties
@@ -0,0 +1,78 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This file provides a list of the deployment variables for the Policy Manager Web Application
+#
+
+PYTHON_COMMAND_INVOKER=python
+RANGER_ADMIN_LOG_DIR=/var/log/ranger
+RANGER_PID_DIR_PATH=/var/run/ranger
+DB_FLAVOR=POSTGRES
+SQL_CONNECTOR_JAR=/usr/share/java/postgresql.jar
+
+db_root_user=postgres
+db_root_password=rangerR0cks!
+db_host=ranger-db
+
+db_name=ranger
+db_user=rangeradmin
+db_password=rangerR0cks!
+
+postgres_core_file=db/postgres/optimized/current/ranger_core_db_postgres.sql
+postgres_audit_file=db/postgres/xa_audit_db_postgres.sql
+
+rangerAdmin_password=rangerR0cks!
+rangerTagsync_password=rangerR0cks!
+rangerUsersync_password=rangerR0cks!
+keyadmin_password=rangerR0cks!
+
+
+audit_store=solr
+audit_solr_urls=http://ranger-solr:8983/solr/ranger_audits
+audit_solr_collection_name=ranger_audits
+
+policymgr_external_url=http://ranger-admin:6080
+policymgr_http_enabled=true
+
+unix_user=rangerusersync
+unix_user_pwd=ranger
+unix_group=ranger
+
+# Following variables are referenced in db_setup.py. Do not remove these
+mysql_core_file=
+oracle_core_file=
+sqlserver_core_file=
+sqlanywhere_core_file=
+cred_keystore_filename=
+
+# #################  DO NOT MODIFY ANY VARIABLES BELOW #########################
+#
+# --- These deployment variables are not to be modified unless you understand the full impact of the changes
+#
+################################################################################
+XAPOLICYMGR_DIR=$PWD
+app_home=$PWD/ews/webapp
+TMPFILE=$PWD/.fi_tmp
+LOGFILE=$PWD/logfile
+LOGFILES="$LOGFILE"
+
+JAVA_BIN='java'
+JAVA_VERSION_REQUIRED='1.8'
+
+ranger_admin_max_heap_size=1g
+#retry DB and Java patches after the given time in seconds.
+PATCH_RETRY_INTERVAL=120
+STALE_PATCH_ENTRY_HOLD_TIME=10
diff --git a/dev-support/ranger-docker/scripts/ranger-build.sh b/dev-support/ranger-docker/scripts/ranger-build.sh
index a89ea2a..256b5a1 100755
--- a/dev-support/ranger-docker/scripts/ranger-build.sh
+++ b/dev-support/ranger-docker/scripts/ranger-build.sh
@@ -16,19 +16,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-BRANCH=master
-PROFILES=
-SKIP_TESTS="-DskipTests=true"
-
-while getopts ":b:p:s:" arg
-do
-  case $arg in
-    b) BRANCH=$OPTARG;;
-    p) PROFILES="-P \"$OPTARG\"";;
-    s) SKIP_TESTS="-DskipTests=$OPTARG";;
-  esac
-done
-
+if [ "${BRANCH}" == "" ]
+then
+  BRANCH=master
+fi
+
+if [ "${PROFILE}" != "" ]
+then
+  ARG_PROFILES="-P ${PROFILE}"
+fi
+
+if [ "${SKIPTESTS}" == "" ]
+then
+  ARG_SKIPTESTS="-DskipTests"
+else
+  ARG_SKIPTESTS="-DskipTests=${SKIPTESTS}"
+fi
 
 export MAVEN_OPTS="-Xms2g -Xmx2g"
 export M2=/home/ranger/.m2
@@ -38,7 +41,7 @@ cd /home/ranger/git/ranger
 git checkout ${BRANCH}
 git pull
 
-mvn ${PROFILES} ${SKIP_TESTS} -DskipDocs clean package
+mvn ${ARG_PROFILES} ${ARG_SKIPTESTS} -DskipDocs clean package
 
 mv -f target/version /home/ranger/dist/
 mv -f target/ranger-* /home/ranger/dist/
diff --git a/dev-support/ranger-docker/scripts/ranger-build.sh b/dev-support/ranger-docker/scripts/ranger.sh
similarity index 51%
copy from dev-support/ranger-docker/scripts/ranger-build.sh
copy to dev-support/ranger-docker/scripts/ranger.sh
index a89ea2a..47543d8 100755
--- a/dev-support/ranger-docker/scripts/ranger-build.sh
+++ b/dev-support/ranger-docker/scripts/ranger.sh
@@ -16,29 +16,32 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-BRANCH=master
-PROFILES=
-SKIP_TESTS="-DskipTests=true"
+export RANGER_VERSION=`cat ${RANGER_DIST}/version`
 
-while getopts ":b:p:s:" arg
-do
-  case $arg in
-    b) BRANCH=$OPTARG;;
-    p) PROFILES="-P \"$OPTARG\"";;
-    s) SKIP_TESTS="-DskipTests=$OPTARG";;
-  esac
-done
 
+if [ -e ${RANGER_HOME}/admin ]
+then
+  SETUP_RANGER=false
+else
+  SETUP_RANGER=true
+fi
 
-export MAVEN_OPTS="-Xms2g -Xmx2g"
-export M2=/home/ranger/.m2
+if [ "${SETUP_RANGER}" == "true" ]
+then
+  # Download PostgreSQL JDBC library
+  wget "https://search.maven.org/remotecontent?filepath=org/postgresql/postgresql/42.2.16.jre7/postgresql-42.2.16.jre7.jar" -O /usr/share/java/postgresql.jar
 
-cd /home/ranger/git/ranger
+  cd ${RANGER_HOME}
+  tar xvfz ${RANGER_DIST}/ranger-${RANGER_VERSION}-admin.tar.gz --directory=${RANGER_HOME}
+  ln -s ranger-${RANGER_VERSION}-admin admin
+  cp -f ${RANGER_SCRIPTS}/ranger-admin-install.properties admin/install.properties
 
-git checkout ${BRANCH}
-git pull
+  cd ${RANGER_HOME}/admin
+  ./setup.sh
+fi
 
-mvn ${PROFILES} ${SKIP_TESTS} -DskipDocs clean package
+cd ${RANGER_HOME}/admin
+./ews/ranger-admin-services.sh start
 
-mv -f target/version /home/ranger/dist/
-mv -f target/ranger-* /home/ranger/dist/
+# prevent the container from exiting
+/bin/bash