You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by da...@apache.org on 2014/03/12 14:18:42 UTC

[09/10] git commit: [CAMEL-7249] Working OSGi tests for camel-hdfs2

[CAMEL-7249] Working OSGi tests for camel-hdfs2

There are some issues with this implementation:
* Import-Packages for `camel-hdfs2` are not just the ones generated by
  maven-bundle plugin. They must contain some packages reachable only by
  reflection (different FileSystem implementations)
* There is problem with using java.util.ServiceLoader inside OSGi and Hadoop 2
  uses ServiceLoader to discover different FileSystem implementations. For OSGi
  (especially blueprint container) we have to use a hack to populate STATIC
  cache
* `camel-hdfs2` feature contains some hardcoded versions and for e.g., AVRO a
  downgraded one (1.7.5_1 requires `sun.misc` package - this changed however
  since SMX4-1709)
* org.osgi.framework.system.packages.extra property in
  camel-itest-osgi/**/itest/karaf/config.properties was added with xerces
  packages
* documentation has to be added describing hadoop2 specific configuration inside
  OSGi - this changed since hadoop 1.2.1 (using java.util.ServiceLoader)


Project: http://git-wip-us.apache.org/repos/asf/camel/repo
Commit: http://git-wip-us.apache.org/repos/asf/camel/commit/0b9b3531
Tree: http://git-wip-us.apache.org/repos/asf/camel/tree/0b9b3531
Diff: http://git-wip-us.apache.org/repos/asf/camel/diff/0b9b3531

Branch: refs/heads/master
Commit: 0b9b3531e4a4bfe82492e3037eb36381ad0b1865
Parents: af7661a
Author: Grzegorz Grzybek <gr...@gmail.com>
Authored: Tue Mar 11 19:29:30 2014 +0100
Committer: Grzegorz Grzybek <gr...@gmail.com>
Committed: Wed Mar 12 09:56:42 2014 +0100

----------------------------------------------------------------------
 components/camel-hdfs2/pom.xml                  |   12 +
 .../camel/component/hdfs2/HdfsOsgiHelper.java   |   54 +
 parent/pom.xml                                  |    5 +
 .../features/src/main/resources/features.xml    |   28 +
 tests/camel-itest-osgi/pom.xml                  |    2 +-
 .../itest/osgi/hdfs/HdfsBlueprintRouteTest.java |    9 +-
 .../camel/itest/osgi/hdfs/HdfsRouteTest.java    |   26 +-
 .../apache/camel/itest/karaf/config.properties  |   35 +
 .../itest/osgi/hdfs/blueprintCamelContext.xml   |   23 +-
 .../camel/itest/osgi/hdfs/core-default.xml      | 1088 +++++++++++++++---
 10 files changed, 1118 insertions(+), 164 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/camel/blob/0b9b3531/components/camel-hdfs2/pom.xml
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/pom.xml b/components/camel-hdfs2/pom.xml
index 61f63b1..0570e51 100644
--- a/components/camel-hdfs2/pom.xml
+++ b/components/camel-hdfs2/pom.xml
@@ -34,6 +34,18 @@
     <properties>
         <camel.osgi.export.pkg>org.apache.camel.component.hdfs2.*</camel.osgi.export.pkg>
         <camel.osgi.export.service>org.apache.camel.spi.ComponentResolver;component=hdfs2</camel.osgi.export.service>
+        <camel.osgi.import.additional>
+            org.apache.hadoop.hdfs,
+            org.apache.hadoop.hdfs.client,
+            org.apache.hadoop.hdfs.protocolPB,
+            org.apache.hadoop.hdfs.util,
+            org.apache.hadoop.hdfs.net,
+            org.apache.hadoop.hdfs.security.token.block,
+            org.apache.hadoop.hdfs.security.token.delegation,
+            org.apache.hadoop.hdfs.protocol,
+            org.apache.hadoop.hdfs.protocol.proto,
+            org.apache.hadoop.hdfs.protocol.datatransfer
+        </camel.osgi.import.additional>
     </properties>
 
     <dependencies>

http://git-wip-us.apache.org/repos/asf/camel/blob/0b9b3531/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsOsgiHelper.java
----------------------------------------------------------------------
diff --git a/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsOsgiHelper.java b/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsOsgiHelper.java
new file mode 100644
index 0000000..cfb1212
--- /dev/null
+++ b/components/camel-hdfs2/src/main/java/org/apache/camel/component/hdfs2/HdfsOsgiHelper.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.camel.component.hdfs2;
+
+import java.net.URI;
+import java.util.Map;
+import java.util.ServiceLoader;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * <p>Class which may be used in OSGi/Blueprint environment to perform some static initialization</p>
+ * <p>This could be useful to fix the usage of {@link ServiceLoader} by Hadoop 2 in OSGi environment.</p>
+ */
+public class HdfsOsgiHelper {
+
+    private static Logger LOG = LoggerFactory.getLogger(HdfsOsgiHelper.class.getName());
+
+    /**
+     * By using this constructor it is possible to perform static initialization of {@link FileSystem}.
+     */
+    public HdfsOsgiHelper(Map<String, String> fileSystems) {
+        try {
+            // get bundle classloader for camel-hdfs2 bundle
+            ClassLoader cl = getClass().getClassLoader();
+            Configuration conf = new Configuration();
+            for (String scheme: fileSystems.keySet()) {
+                conf.setClass(String.format("fs.%s.impl", scheme), cl.loadClass(fileSystems.get(scheme)), FileSystem.class);
+                FileSystem.get(URI.create(scheme + ":///"), conf);
+            }
+        } catch (Exception e) {
+            LOG.debug(e.getMessage());
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/camel/blob/0b9b3531/parent/pom.xml
----------------------------------------------------------------------
diff --git a/parent/pom.xml b/parent/pom.xml
index 622a255..ee7bab5 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -760,6 +760,11 @@
       </dependency>
       <dependency>
         <groupId>org.apache.camel</groupId>
+        <artifactId>camel-hdfs2</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.camel</groupId>
         <artifactId>camel-hl7</artifactId>
         <version>${project.version}</version>
       </dependency>

http://git-wip-us.apache.org/repos/asf/camel/blob/0b9b3531/platforms/karaf/features/src/main/resources/features.xml
----------------------------------------------------------------------
diff --git a/platforms/karaf/features/src/main/resources/features.xml b/platforms/karaf/features/src/main/resources/features.xml
index 2c01c6a..08a78d9 100644
--- a/platforms/karaf/features/src/main/resources/features.xml
+++ b/platforms/karaf/features/src/main/resources/features.xml
@@ -472,6 +472,34 @@
     <bundle dependency='true'>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.xmlresolver/${xmlresolver-bundle-version}</bundle>
     <bundle dependency='true'>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.jsch/${jsch-bundle-version}</bundle>
   </feature>
+  <feature name='camel-hdfs2' version='${project.version}' resolver='(obr)' start-level='50'>
+    <feature version='${project.version}'>camel-core</feature>
+    <bundle dependency='true'>mvn:commons-lang/commons-lang/${commons-lang-version}</bundle>
+    <bundle dependency='true'>mvn:com.google.guava/guava/${google-guava-version}</bundle>
+    <bundle dependency='true'>mvn:com.google.protobuf/protobuf-java/2.5.0</bundle><!-- newer than ${protobuf-java-bundle-version} -->
+    <bundle dependency='true'>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.guice/${guice-bundle-version}</bundle>
+    <bundle dependency='true'>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.jsch/${jsch-bundle-version}</bundle>
+    <bundle dependency='true'>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.paranamer/${paranamer-bundle-version}</bundle>
+    <bundle dependency='true'>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.avro/1.7.3_1</bundle>
+    <bundle dependency='true'>mvn:org.apache.commons/commons-compress/${commons-compress-version}</bundle>
+    <bundle dependency='true'>mvn:org.apache.commons/commons-math3/3.1.1</bundle>
+    <bundle dependency='true'>mvn:commons-configuration/commons-configuration/${commons-configuration-version}</bundle>
+    <bundle dependency='true'>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.commons-httpclient/${commons-httpclient-bundle-version}</bundle>
+    <bundle dependency='true'>mvn:io.netty/netty/${netty3-version}</bundle>
+    <bundle dependency='true'>mvn:org.codehaus.jackson/jackson-core-asl/${jackson-version}</bundle>
+    <bundle dependency='true'>mvn:org.codehaus.jackson/jackson-mapper-asl/${jackson-version}</bundle>
+    <bundle dependency='true'>mvn:org.xerial.snappy/snappy-java/${snappy-version}</bundle>
+    <bundle dependency='true'>mvn:commons-codec/commons-codec/${commons-codec-version}</bundle>
+    <bundle dependency='true'>mvn:commons-collections/commons-collections/${commons-collections-version}</bundle>
+    <bundle dependency='true'>mvn:commons-io/commons-io/${commons-io-version}</bundle>
+    <bundle dependency='true'>mvn:commons-net/commons-net/${commons-net-version}</bundle>
+    <bundle dependency='true'>mvn:org.apache.zookeeper/zookeeper/${zookeeper-version}</bundle>
+    <bundle dependency='true'>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.xmlenc/0.52_1</bundle>
+    <bundle dependency='true'>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.xerces/${xerces-bundle-version}</bundle>
+    <bundle dependency='true'>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.xmlresolver/${xmlresolver-bundle-version}</bundle>
+    <bundle>mvn:org.apache.camel/camel-hdfs2/${project.version}</bundle>
+    <bundle>mvn:org.apache.servicemix.bundles/org.apache.servicemix.bundles.hadoop-client/${hadoop2-bundle-version}</bundle>
+  </feature>
   <feature name='camel-hl7' version='${project.version}' resolver='(obr)' start-level='50'>
     <feature version='${project.version}'>camel-core</feature>
     <bundle dependency='true'>mvn:org.apache.mina/mina-core/${mina2-version}</bundle>

http://git-wip-us.apache.org/repos/asf/camel/blob/0b9b3531/tests/camel-itest-osgi/pom.xml
----------------------------------------------------------------------
diff --git a/tests/camel-itest-osgi/pom.xml b/tests/camel-itest-osgi/pom.xml
index e6ac1b0..c6fe417 100644
--- a/tests/camel-itest-osgi/pom.xml
+++ b/tests/camel-itest-osgi/pom.xml
@@ -192,7 +192,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.camel</groupId>
-      <artifactId>camel-hdfs</artifactId>
+      <artifactId>camel-hdfs2</artifactId>
       <scope>test</scope>
     </dependency>
     <dependency>

http://git-wip-us.apache.org/repos/asf/camel/blob/0b9b3531/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java
----------------------------------------------------------------------
diff --git a/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java b/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java
index 6feece9..d989a5f 100644
--- a/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java
+++ b/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsBlueprintRouteTest.java
@@ -20,6 +20,7 @@ import org.apache.camel.CamelContext;
 import org.apache.camel.ProducerTemplate;
 import org.apache.camel.component.mock.MockEndpoint;
 import org.apache.camel.itest.osgi.blueprint.OSGiBlueprintTestSupport;
+import org.apache.hadoop.fs.FileSystem;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.ops4j.pax.exam.Option;
@@ -46,6 +47,12 @@ public class HdfsBlueprintRouteTest extends OSGiBlueprintTestSupport {
             return;
         }
 
+        // hadoop depends on java.util.ServiceLoader which doesn't work well inside OSGi...
+        org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
+        conf.setClass("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class, FileSystem.class);
+        conf.setClass("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class, FileSystem.class);
+        FileSystem.getFileSystemClass("file", conf);
+
         getInstalledBundle("CamelBlueprintHdfsTestBundle").start();
         CamelContext ctx = getOsgiService(CamelContext.class, "(camel.context.symbolicname=CamelBlueprintHdfsTestBundle)", 20000);
 
@@ -70,7 +77,7 @@ public class HdfsBlueprintRouteTest extends OSGiBlueprintTestSupport {
                
                 // using the features to install the camel components
                 loadCamelFeatures(
-                        "camel-blueprint", "camel-hdfs"));
+                        "camel-blueprint", "camel-hdfs2"));
                 
         return options;
     }

http://git-wip-us.apache.org/repos/asf/camel/blob/0b9b3531/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java
----------------------------------------------------------------------
diff --git a/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java b/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java
index b2c20cd..e3340b3 100644
--- a/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java
+++ b/tests/camel-itest-osgi/src/test/java/org/apache/camel/itest/osgi/hdfs/HdfsRouteTest.java
@@ -26,22 +26,16 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.ops4j.pax.exam.Option;
 import org.ops4j.pax.exam.junit.Configuration;
 import org.ops4j.pax.exam.junit.JUnit4TestRunner;
-import org.osgi.framework.Constants;
 
-import static org.apache.hadoop.io.SequenceFile.createWriter;
-import static org.ops4j.pax.exam.CoreOptions.provision;
 import static org.ops4j.pax.exam.CoreOptions.scanFeatures;
 import static org.ops4j.pax.exam.OptionUtils.combine;
-import static org.ops4j.pax.swissbox.tinybundles.core.TinyBundles.newBundle;
 
 @RunWith(JUnit4TestRunner.class)
-@Ignore("karaf-pax-exam have trouble to modify the test prob bundle, We need to revisit this test later.")
 public class HdfsRouteTest extends OSGiIntegrationTestSupport {
     //Hadoop doesn't run on IBM JDK
     private static final boolean SKIP = System.getProperty("java.vendor").contains("IBM");
@@ -54,11 +48,13 @@ public class HdfsRouteTest extends OSGiIntegrationTestSupport {
 
         final Path file = new Path(new File("../../../../target/test/test-camel-string").getAbsolutePath());
         org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
+        conf.setClass("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class, FileSystem.class);
+        conf.setClass("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class, FileSystem.class);
         //conf.setClassLoader(this.getClass().getClassLoader());
         // add the default configure into the resource
-        conf.addResource(HdfsRouteTest.class.getResourceAsStream("/core-default.xml"));
-        FileSystem fs1 = FileSystem.get(file.toUri(), conf);
-        SequenceFile.Writer writer = createWriter(fs1, conf, file, NullWritable.class, Text.class);
+        conf.addResource(HdfsRouteTest.class.getResourceAsStream("core-default.xml"));
+        SequenceFile.Writer writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
+            SequenceFile.Writer.keyClass(NullWritable.class), SequenceFile.Writer.valueClass(Text.class));
         NullWritable keyWritable = NullWritable.get();
         Text valueWritable = new Text();
         String value = "CIAO!";
@@ -69,7 +65,7 @@ public class HdfsRouteTest extends OSGiIntegrationTestSupport {
 
         context.addRoutes(new RouteBuilder() {
             public void configure() {
-                from("hdfs:///" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
+                from("hdfs2:///" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
             }
         });
         context.start();
@@ -82,16 +78,10 @@ public class HdfsRouteTest extends OSGiIntegrationTestSupport {
     @Configuration
     public static Option[] configure() throws Exception {
         Option[] options = combine(
-
                 getDefaultCamelKarafOptions(),
                 // using the features to install the camel components
-                scanFeatures(getCamelKarafFeatureUrl(), "camel-hdfs"),
-                //TODO need to find a way to override the test prob bundle
-                provision(newBundle()
-                                .add("core-default.xml", HdfsRouteTest.class.getResource("/core-default.xml"))
-                                .set(Constants.BUNDLE_SYMBOLICNAME, "CamelHdfsTestBundle")
-                                .set(Constants.DYNAMICIMPORT_PACKAGE, "*")
-                                .build()));
+                scanFeatures(getCamelKarafFeatureUrl(), "camel-hdfs2")
+            );
 
         return options;
     }

http://git-wip-us.apache.org/repos/asf/camel/blob/0b9b3531/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/karaf/config.properties
----------------------------------------------------------------------
diff --git a/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/karaf/config.properties b/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/karaf/config.properties
index 544c508..a823558 100644
--- a/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/karaf/config.properties
+++ b/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/karaf/config.properties
@@ -81,6 +81,41 @@ org.osgi.framework.system.packages= \
 
 # Extra packages appended after standard packages
 # org.osgi.framework.system.packages.extra=
+org.osgi.framework.system.packages.extra= \
+ org.apache.xerces.parsers; version="2.11.0", \
+ org.apache.xerces.impl.dtd.models; version="2.11.0", \
+ org.apache.xerces.xni.parser; version="2.11.0", \
+ org.apache.xerces.impl.dv.xs; version="2.11.0", \
+ org.apache.xerces.impl.xs.traversers; version="2.11.0", \
+ org.apache.xerces.util; version="2.11.0", \
+ org.apache.xerces.impl.dtd; version="2.11.0", \
+ org.apache.xerces.jaxp.validation; version="2.11.0", \
+ org.apache.xerces.dom3.as; version="2.11.0", \
+ org.apache.xerces.impl.dv; version="2.11.0", \
+ org.apache.xerces.jaxp; version="2.11.0", \
+ org.apache.xerces.jaxp.datatype; version="2.11.0", \
+ org.apache.xerces.impl.xpath.regex; version="2.11.0", \
+ org.apache.xerces.xni; version="2.11.0", \
+ org.apache.xerces.impl.msg; version="2.11.0", \
+ org.apache.xerces.impl.dv.util; version="2.11.0", \
+ org.apache.xerces.impl.xs.util; version="2.11.0", \
+ org.apache.xerces.dom; version="2.11.0", \
+ org.apache.xerces.dom.events; version="2.11.0", \
+ org.apache.xerces.impl.xs.opti; version="2.11.0", \
+ org.apache.xerces.impl; version="2.11.0", \
+ org.apache.xerces.xs; version="2.11.0", \
+ org.apache.xerces.impl.io; version="2.11.0", \
+ org.apache.xerces.xpointer; version="2.11.0", \
+ org.apache.xerces.impl.dv.dtd; version="2.11.0", \
+ org.apache.xerces.xinclude; version="2.11.0", \
+ org.apache.xerces.impl.xpath; version="2.11.0", \
+ org.apache.xerces.xs.datatypes; version="2.11.0", \
+ org.apache.xerces.impl.xs.identity; version="2.11.0", \
+ org.apache.xerces.impl.xs.models; version="2.11.0", \
+ org.apache.xerces.xni.grammars; version="2.11.0", \
+ org.apache.xerces.impl.xs; version="2.11.0", \
+ org.apache.xerces.impl.validation; version="2.11.0", \
+ org.apache.xml.serialize; version="2.11.0"
 
 # javax.transaction is needed to avoid class loader constraint violation when using javax.sql
 org.osgi.framework.bootdelegation=org.apache.karaf.jaas.boot,sun.*,com.sun.*,javax.transaction,javax.transaction.*

http://git-wip-us.apache.org/repos/asf/camel/blob/0b9b3531/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
----------------------------------------------------------------------
diff --git a/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml b/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
index e036253..7f82450 100644
--- a/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
+++ b/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/blueprintCamelContext.xml
@@ -17,22 +17,39 @@
 <blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
            xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
            xmlns:ext="http://aries.apache.org/blueprint/xmlns/blueprint-ext/v1.0.0"
+           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+           xsi:schemaLocation="http://www.osgi.org/xmlns/blueprint/v1.0.0 http://www.osgi.org/xmlns/blueprint/v1.0.0/blueprint.xsd"
            default-activation="lazy">
 
     <ext:property-placeholder id="external" placeholder-prefix="$[" placeholder-suffix="]"/>
 
-    <camelContext xmlns="http://camel.apache.org/schema/blueprint">
+    <bean id="hdfsOsgiHelper" class="org.apache.camel.component.hdfs2.HdfsOsgiHelper">
+        <description>
+        Bean created as prerequisite for camel-hdfs2 component. It's role is to properly initialize Hadoop's internal
+        map of file systems which is normally (non-OSGi) initialized using java.util.ServiceLoader.
+        In OSGi ServiceLoader doesn't see *all* JARs to scan for services.
+        </description>
+        <argument>
+            <map>
+                <entry key="file" value="org.apache.hadoop.fs.LocalFileSystem"  />
+                <entry key="hdfs" value="org.apache.hadoop.hdfs.DistributedFileSystem" />
+            </map>
+        </argument>
+    </bean>
 
+    <bean id="hdfs2" class="org.apache.camel.component.hdfs2.HdfsComponent" depends-on="hdfsOsgiHelper" />
+
+    <camelContext xmlns="http://camel.apache.org/schema/blueprint" xsi:schemaLocation="http://camel.apache.org/schema/blueprint http://camel.apache.org/schema/blueprint/camel-blueprint-2.12.2.xsd">
         <!-- using Camel properties component and refer to the blueprint property placeholder by its id -->
         <propertyPlaceholder id="properties" location="blueprint:external"
                              prefixToken="[[" suffixToken="]]"
                              propertyPrefix="prefix."/>
         <route>
             <from uri="direct:start"/>
-            <to uri="hdfs://[[karaf.base]]/hdfs/test-camel?fileSystemType=LOCAL&amp;splitStrategy=BYTES:5,IDLE:1000"/>
+            <to uri="hdfs2://[[karaf.base]]/hdfs/test-camel?fileSystemType=LOCAL&amp;splitStrategy=BYTES:5,IDLE:1000"/>
         </route>
         <route>
-            <from uri="hdfs://[[karaf.base]]/hdfs/test-camel?pattern=seg*&amp;initialDelay=2000&amp;fileSystemType=LOCAL&amp;chunkSize=5"/>
+            <from uri="hdfs2://[[karaf.base]]/hdfs/test-camel?pattern=*&amp;initialDelay=2000&amp;fileSystemType=LOCAL&amp;chunkSize=5"/>
             <to uri="mock:result"/>
         </route>
     </camelContext>

http://git-wip-us.apache.org/repos/asf/camel/blob/0b9b3531/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
----------------------------------------------------------------------
diff --git a/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml b/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
index 8bc3b99..16db6f9 100644
--- a/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
+++ b/tests/camel-itest-osgi/src/test/resources/org/apache/camel/itest/osgi/hdfs/core-default.xml
@@ -1,6 +1,23 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
 <!-- Do not modify this file directly.  Instead, copy entries that you -->
 <!-- wish to modify from this file into core-site.xml and change them -->
 <!-- there.  If core-site.xml does not already exist, create it.      -->
@@ -10,20 +27,26 @@
 <!--- global properties -->
 
 <property>
+  <name>hadoop.common.configuration.version</name>
+  <value>0.23.0</value>
+  <description>version of this configuration file</description>
+</property>
+
+<property>
   <name>hadoop.tmp.dir</name>
   <value>/tmp/hadoop-${user.name}</value>
   <description>A base for other temporary directories.</description>
 </property>
 
 <property>
-  <name>hadoop.native.lib</name>
+  <name>io.native.lib.available</name>
   <value>true</value>
   <description>Should native hadoop libraries, if present, be used.</description>
 </property>
 
 <property>
   <name>hadoop.http.filter.initializers</name>
-  <value></value>
+  <value>org.apache.hadoop.http.lib.StaticUserWebFilter</value>
   <description>A comma separated list of class names. Each class in the list 
   must extend org.apache.hadoop.http.FilterInitializer. The corresponding 
   Filter will be initialized. Then, the Filter will be applied to all user 
@@ -31,12 +54,7 @@
   ordering of the filters.</description>
 </property>
 
- <property>
-  <name>hadoop.security.group.mapping</name>
-  <value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
-  <description>Class for user to group mapping (get groups for a given user)
-  </description>
-</property>
+<!--- security properties -->
 
 <property>
   <name>hadoop.security.authorization</name>
@@ -45,6 +63,15 @@
 </property>
 
 <property>
+  <name>hadoop.security.instrumentation.requires.admin</name>
+  <value>false</value>
+  <description>
+    Indicates if administrator ACLs are required to access
+    instrumentation servlets (JMX, METRICS, CONF, STACKS).
+  </description>
+</property>
+
+<property>
   <name>hadoop.security.authentication</name>
   <value>simple</value>
   <description>Possible values are simple (no authentication), and kerberos
@@ -52,39 +79,220 @@
 </property>
 
 <property>
-  <name>hadoop.security.token.service.use_ip</name>
-  <value>true</value>
-  <description>Controls whether tokens always use IP addresses.  DNS changes
-  will not be detected if this option is enabled.  Existing client connections
-  that break will always reconnect to the IP of the original host.  New clients
-  will connect to the host's new IP but fail to locate a token.  Disabling
-  this option will allow existing and new clients to detect an IP change and
-  continue to locate the new host's token.
+  <name>hadoop.security.group.mapping</name>
+  <value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value>
+  <description>
+    Class for user to group mapping (get groups for a given user) for ACL. 
+    The default implementation,
+    org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback, 
+    will determine if the Java Native Interface (JNI) is available. If JNI is 
+    available the implementation will use the API within hadoop to resolve a 
+    list of groups for a user. If JNI is not available then the shell 
+    implementation, ShellBasedUnixGroupsMapping, is used.  This implementation 
+    shells out to the Linux/Unix environment with the 
+    <code>bash -c groups</code> command to resolve a list of groups for a user.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.groups.cache.secs</name>
+  <value>300</value>
+  <description>
+    This is the config controlling the validity of the entries in the cache
+    containing the user->group mapping. When this duration has expired,
+    then the implementation of the group mapping provider is invoked to get
+    the groups of the user and then cached back.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.groups.cache.warn.after.ms</name>
+  <value>5000</value>
+  <description>
+    If looking up a single user to group takes longer than this amount of
+    milliseconds, we will log a warning message.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.url</name>
+  <value></value>
+  <description>
+    The URL of the LDAP server to use for resolving user groups when using
+    the LdapGroupsMapping user to group mapping.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.ssl</name>
+  <value>false</value>
+  <description>
+    Whether or not to use SSL when connecting to the LDAP server.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.ssl.keystore</name>
+  <value></value>
+  <description>
+    File path to the SSL keystore that contains the SSL certificate required
+    by the LDAP server.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.ssl.keystore.password.file</name>
+  <value></value>
+  <description>
+    The path to a file containing the password of the LDAP SSL keystore.
+
+    IMPORTANT: This file should be readable only by the Unix user running
+    the daemons.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.bind.user</name>
+  <value></value>
+  <description>
+    The distinguished name of the user to bind as when connecting to the LDAP
+    server. This may be left blank if the LDAP server supports anonymous binds.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.bind.password.file</name>
+  <value></value>
+  <description>
+    The path to a file containing the password of the bind user.
+
+    IMPORTANT: This file should be readable only by the Unix user running
+    the daemons.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.base</name>
+  <value></value>
+  <description>
+    The search base for the LDAP connection. This is a distinguished name,
+    and will typically be the root of the LDAP directory.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.search.filter.user</name>
+  <value>(&amp;(objectClass=user)(sAMAccountName={0}))</value>
+  <description>
+    An additional filter to use when searching for LDAP users. The default will
+    usually be appropriate for Active Directory installations. If connecting to
+    an LDAP server with a non-AD schema, this should be replaced with
+    (&amp;(objectClass=inetOrgPerson)(uid={0}). {0} is a special string used to
+    denote where the username fits into the filter.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.search.filter.group</name>
+  <value>(objectClass=group)</value>
+  <description>
+    An additional filter to use when searching for LDAP groups. This should be
+    changed when resolving groups against a non-Active Directory installation.
+    posixGroups are currently not a supported group class.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.search.attr.member</name>
+  <value>member</value>
+  <description>
+    The attribute of the group object that identifies the users that are
+    members of the group. The default will usually be appropriate for
+    any LDAP installation.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.search.attr.group.name</name>
+  <value>cn</value>
+  <description>
+    The attribute of the group object that identifies the group name. The
+    default will usually be appropriate for all LDAP systems.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.directory.search.timeout</name>
+  <value>10000</value>
+  <description>
+    The attribute applied to the LDAP SearchControl properties to set a
+    maximum time limit when searching and awaiting a result.
+    Set to 0 if infinite wait period is desired.
+    Default is 10 seconds. Units in milliseconds.
   </description>
 </property>
 
-<!--
 <property>
   <name>hadoop.security.service.user.name.key</name>
   <value></value>
-  <description>Name of the kerberos principal of the user that owns
-  a given service daemon
+  <description>
+    For those cases where the same RPC protocol is implemented by multiple
+    servers, this configuration is required for specifying the principal
+    name to use for the service when the client wishes to make an RPC call.
   </description>
 </property>
--->
 
-<!--- logging properties -->
 
 <property>
-  <name>hadoop.logfile.size</name>
-  <value>10000000</value>
-  <description>The max size of each log file</description>
+    <name>hadoop.security.uid.cache.secs</name>
+    <value>14400</value>
+    <description>
+        This is the config controlling the validity of the entries in the cache
+        containing the userId to userName and groupId to groupName used by
+        NativeIO getFstat().
+    </description>
 </property>
 
 <property>
-  <name>hadoop.logfile.count</name>
-  <value>10</value>
-  <description>The max number of log files</description>
+  <name>hadoop.rpc.protection</name>
+  <value>authentication</value>
+  <description>This field sets the quality of protection for secured sasl 
+      connections. Possible values are authentication, integrity and privacy.
+      authentication means authentication only and no integrity or privacy; 
+      integrity implies authentication and integrity are enabled; and privacy 
+      implies all of authentication, integrity and privacy are enabled.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.work.around.non.threadsafe.getpwuid</name>
+  <value>false</value>
+  <description>Some operating systems or authentication modules are known to
+  have broken implementations of getpwuid_r and getpwgid_r, such that these
+  calls are not thread-safe. Symptoms of this problem include JVM crashes
+  with a stack trace inside these functions. If your system exhibits this
+  issue, enable this configuration parameter to include a lock around the
+  calls as a workaround.
+
+  An incomplete list of some systems known to have this issue is available
+  at http://wiki.apache.org/hadoop/KnownBrokenPwuidImplementations
+  </description>
+</property>
+
+<property>
+  <name>hadoop.kerberos.kinit.command</name>
+  <value>kinit</value>
+  <description>Used to periodically renew Kerberos credentials when provided
+  to Hadoop. The default setting assumes that kinit is in the PATH of users
+  running the Hadoop client. Change this to the absolute path to kinit if this
+  is not the case.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.auth_to_local</name>
+  <value></value>
+  <description>Maps kerberos principals to local user names</description>
 </property>
 
 <!-- i/o properties -->
@@ -114,145 +322,155 @@
 
 <property>
   <name>io.compression.codecs</name>
-  <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec</value>
-  <description>A list of the compression codec classes that can be used 
-               for compression/decompression.</description>
+  <value></value>
+  <description>A comma-separated list of the compression codec classes that can
+  be used for compression/decompression. In addition to any classes specified
+  with this property (which take precedence), codec classes on the classpath
+  are discovered using a Java ServiceLoader.</description>
 </property>
 
 <property>
-  <name>io.serializations</name>
-  <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  <description>A list of serialization classes that can be used for
-  obtaining serializers and deserializers.</description>
+  <name>io.compression.codec.bzip2.library</name>
+  <value>system-native</value>
+  <description>The native-code library to be used for compression and
+  decompression by the bzip2 codec.  This library could be specified
+  either by by name or the full pathname.  In the former case, the
+  library is located by the dynamic linker, usually searching the
+  directories specified in the environment variable LD_LIBRARY_PATH.
+  
+  The value of "system-native" indicates that the default system
+  library should be used.  To indicate that the algorithm should
+  operate entirely in Java, specify "java-builtin".</description>
 </property>
 
-<!-- file system properties -->
-
 <property>
-  <name>fs.default.name</name>
-  <value>file:///</value>
-  <description>The name of the default file system.  A URI whose
-  scheme and authority determine the FileSystem implementation.  The
-  uri's scheme determines the config property (fs.SCHEME.impl) naming
-  the FileSystem implementation class.  The uri's authority is used to
-  determine the host, port, etc. for a filesystem.</description>
+  <name>io.serializations</name>
+  <value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value>
+  <description>A list of serialization classes that can be used for
+  obtaining serializers and deserializers.</description>
 </property>
 
 <property>
-  <name>fs.trash.interval</name>
-  <value>0</value>
-  <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
+  <name>io.seqfile.local.dir</name>
+  <value>${hadoop.tmp.dir}/io/local</value>
+  <description>The local directory where sequence file stores intermediate
+  data files during merge.  May be a comma-separated list of
+  directories on different devices in order to spread disk i/o.
+  Directories that do not exist are ignored.
   </description>
 </property>
 
 <property>
-  <name>fs.file.impl</name>
-  <value>org.apache.hadoop.fs.LocalFileSystem</value>
-  <description>The FileSystem for file: uris.</description>
-</property>
-
-<property>
-  <name>fs.hdfs.impl</name>
-  <value>org.apache.hadoop.hdfs.DistributedFileSystem</value>
-  <description>The FileSystem for hdfs: uris.</description>
+  <name>io.map.index.skip</name>
+  <value>0</value>
+  <description>Number of index entries to skip between each entry.
+  Zero by default. Setting this to values larger than zero can
+  facilitate opening large MapFiles using less memory.</description>
 </property>
 
 <property>
-  <name>fs.s3.impl</name>
-  <value>org.apache.hadoop.fs.s3.S3FileSystem</value>
-  <description>The FileSystem for s3: uris.</description>
+  <name>io.map.index.interval</name>
+  <value>128</value>
+  <description>
+    MapFile consist of two files - data file (tuples) and index file
+    (keys). For every io.map.index.interval records written in the
+    data file, an entry (record-key, data-file-position) is written
+    in the index file. This is to allow for doing binary search later
+    within the index file to look up records by their keys and get their
+    closest positions in the data file.
+  </description>
 </property>
 
-<property>
-  <name>fs.s3n.impl</name>
-  <value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value>
-  <description>The FileSystem for s3n: (Native S3) uris.</description>
-</property>
+<!-- file system properties -->
 
 <property>
-  <name>fs.kfs.impl</name>
-  <value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>
-  <description>The FileSystem for kfs: uris.</description>
+  <name>fs.defaultFS</name>
+  <value>file:///</value>
+  <description>The name of the default file system.  A URI whose
+  scheme and authority determine the FileSystem implementation.  The
+  uri's scheme determines the config property (fs.SCHEME.impl) naming
+  the FileSystem implementation class.  The uri's authority is used to
+  determine the host, port, etc. for a filesystem.</description>
 </property>
 
 <property>
-  <name>fs.hftp.impl</name>
-  <value>org.apache.hadoop.hdfs.HftpFileSystem</value>
+  <name>fs.default.name</name>
+  <value>file:///</value>
+  <description>Deprecated. Use (fs.defaultFS) property
+  instead</description>
 </property>
 
 <property>
-  <name>fs.hsftp.impl</name>
-  <value>org.apache.hadoop.hdfs.HsftpFileSystem</value>
+  <name>fs.trash.interval</name>
+  <value>0</value>
+  <description>Number of minutes after which the checkpoint
+  gets deleted.  If zero, the trash feature is disabled.
+  This option may be configured both on the server and the
+  client. If trash is disabled server side then the client
+  side configuration is checked. If trash is enabled on the
+  server side then the value configured on the server is
+  used and the client configuration value is ignored.
+  </description>
 </property>
 
 <property>
-  <name>fs.webhdfs.impl</name>
-  <value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value>
+  <name>fs.trash.checkpoint.interval</name>
+  <value>0</value>
+  <description>Number of minutes between trash checkpoints.
+  Should be smaller or equal to fs.trash.interval. If zero,
+  the value is set to the value of fs.trash.interval.
+  Every time the checkpointer runs it creates a new checkpoint 
+  out of current and removes checkpoints created more than 
+  fs.trash.interval minutes ago.
+  </description>
 </property>
 
 <property>
-  <name>fs.ftp.impl</name>
-  <value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>
-  <description>The FileSystem for ftp: uris.</description>
+  <name>fs.AbstractFileSystem.file.impl</name>
+  <value>org.apache.hadoop.fs.local.LocalFs</value>
+  <description>The AbstractFileSystem for file: uris.</description>
 </property>
 
-<property>
-  <name>fs.ramfs.impl</name>
-  <value>org.apache.hadoop.fs.InMemoryFileSystem</value>
-  <description>The FileSystem for ramfs: uris.</description>
-</property>
 
 <property>
-  <name>fs.har.impl</name>
-  <value>org.apache.hadoop.fs.HarFileSystem</value>
-  <description>The filesystem for Hadoop archives. </description>
+  <name>fs.AbstractFileSystem.hdfs.impl</name>
+  <value>org.apache.hadoop.fs.Hdfs</value>
+  <description>The FileSystem for hdfs: uris.</description>
 </property>
 
 <property>
-  <name>fs.har.impl.disable.cache</name>
-  <value>true</value>
-  <description>Don't cache 'har' filesystem instances.</description>
+  <name>fs.AbstractFileSystem.viewfs.impl</name>
+  <value>org.apache.hadoop.fs.viewfs.ViewFs</value>
+  <description>The AbstractFileSystem for view file system for viewfs: uris
+  (ie client side mount table:).</description>
 </property>
 
 <property>
-  <name>fs.checkpoint.dir</name>
-  <value>${hadoop.tmp.dir}/dfs/namesecondary</value>
-  <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-  </description>
+  <name>fs.ftp.host</name>
+  <value>0.0.0.0</value>
+  <description>FTP filesystem connects to this server</description>
 </property>
 
 <property>
-  <name>fs.checkpoint.edits.dir</name>
-  <value>${fs.checkpoint.dir}</value>
-  <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as fs.checkpoint.dir
+  <name>fs.ftp.host.port</name>
+  <value>21</value>
+  <description>
+    FTP filesystem connects to fs.ftp.host on this port
   </description>
 </property>
 
 <property>
-  <name>fs.checkpoint.period</name>
-  <value>3600</value>
-  <description>The number of seconds between two periodic checkpoints.
-  </description>
+  <name>fs.df.interval</name>
+  <value>60000</value>
+  <description>Disk usage statistics refresh interval in msec.</description>
 </property>
 
 <property>
-  <name>fs.checkpoint.size</name>
-  <value>67108864</value>
-  <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
+  <name>fs.du.interval</name>
+  <value>600000</value>
+  <description>File space usage statistics refresh interval in msec.</description>
 </property>
 
-
-
 <property>
   <name>fs.s3.block.size</name>
   <value>67108864</value>
@@ -283,15 +501,29 @@
   </description>
 </property>
 
+<property>
+  <name>fs.swift.impl</name>
+  <value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem</value>
+  <description>The implementation class of the OpenStack Swift Filesystem</description>
+</property>
 
 <property>
-  <name>local.cache.size</name>
-  <value>10737418240</value>
-  <description>The limit on the size of cache you want to keep, set by default
-  to 10GB. This will act as a soft limit on the cache directory for out of band data.
+  <name>fs.automatic.close</name>
+  <value>true</value>
+  <description>By default, FileSystem instances are automatically closed at program
+  exit using a JVM shutdown hook. Setting this property to false disables this
+  behavior. This is an advanced option that should only be used by server applications
+  requiring a more carefully orchestrated shutdown sequence.
   </description>
 </property>
-            
+
+<property>
+  <name>fs.s3n.block.size</name>
+  <value>67108864</value>
+  <description>Block size to use when reading files using the native S3
+  filesystem (s3n: URIs).</description>
+</property>
+
 <property>
   <name>io.seqfile.compress.blocksize</name>
   <value>1000000</value>
@@ -379,6 +611,30 @@
 </property>
 
 <property>
+  <name>ipc.client.connect.retry.interval</name>
+  <value>1000</value>
+  <description>Indicates the number of milliseconds a client will wait for
+    before retrying to establish a server connection.
+  </description>
+</property>
+
+<property>
+  <name>ipc.client.connect.timeout</name>
+  <value>20000</value>
+  <description>Indicates the number of milliseconds a client will wait for the 
+               socket to establish a server connection.
+  </description>
+</property>
+
+<property>
+  <name>ipc.client.connect.max.retries.on.timeouts</name>
+  <value>45</value>
+  <description>Indicates the number of retries a client will make on socket timeout
+               to establish a server connection.
+  </description>
+</property>
+
+<property>
   <name>ipc.server.listen.queue.size</name>
   <value>128</value>
   <description>Indicates the length of the listen queue for servers accepting
@@ -405,18 +661,6 @@
 </property>
 
 
-<!-- Web Interface Configuration -->
-
-<property>
-  <name>webinterface.private.actions</name>
-  <value>false</value>
-  <description> If set to true, the web interfaces of JT and NN may contain 
-                actions, such as kill job, delete file, etc., that should 
-                not be exposed to public. Enable this option if the interfaces 
-                are only reachable by those who have the right authorization.
-  </description>
-</property>
-
 <!-- Proxy Configuration -->
 
 <property>
@@ -446,20 +690,26 @@
   </description>
 </property>
 
-<!-- Rack Configuration -->
-
+<!-- Topology Configuration -->
 <property>
-  <name>topology.node.switch.mapping.impl</name>
+  <name>net.topology.node.switch.mapping.impl</name>
   <value>org.apache.hadoop.net.ScriptBasedMapping</value>
   <description> The default implementation of the DNSToSwitchMapping. It
-    invokes a script specified in topology.script.file.name to resolve
-    node names. If the value for topology.script.file.name is not set, the
+    invokes a script specified in net.topology.script.file.name to resolve
+    node names. If the value for net.topology.script.file.name is not set, the
     default value of DEFAULT_RACK is returned for all node names.
   </description>
 </property>
 
 <property>
-  <name>topology.script.file.name</name>
+  <name>net.topology.impl</name>
+  <value>org.apache.hadoop.net.NetworkTopology</value>
+  <description> The default implementation of NetworkTopology which is classic three layer one.
+  </description>
+</property>
+
+<property>
+  <name>net.topology.script.file.name</name>
   <value></value>
   <description> The script name that should be invoked to resolve DNS names to
     NetworkTopology names. Example: the script would take host.foo.bar as an
@@ -468,20 +718,576 @@
 </property>
 
 <property>
-  <name>topology.script.number.args</name>
+  <name>net.topology.script.number.args</name>
   <value>100</value>
   <description> The max number of args that the script configured with 
-    topology.script.file.name should be run with. Each arg is an
+    net.topology.script.file.name should be run with. Each arg is an
     IP address.
   </description>
 </property>
 
 <property>
-  <name>hadoop.security.uid.cache.secs</name>
-  <value>14400</value>
-  <description> NativeIO maintains a cache from UID to UserName. This is
-  the timeout for an entry in that cache. </description>
+  <name>net.topology.table.file.name</name>
+  <value></value>
+  <description> The file name for a topology file, which is used when the
+    net.topology.node.switch.mapping.impl property is set to
+    org.apache.hadoop.net.TableMapping. The file format is a two column text
+    file, with columns separated by whitespace. The first column is a DNS or
+    IP address and the second column specifies the rack where the address maps.
+    If no entry corresponding to a host in the cluster is found, then 
+    /default-rack is assumed.
+  </description>
+</property>
+
+<!-- Local file system -->
+<property>
+  <name>file.stream-buffer-size</name>
+  <value>4096</value>
+  <description>The size of buffer to stream files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+</property>
+
+<property>
+  <name>file.bytes-per-checksum</name>
+  <value>512</value>
+  <description>The number of bytes per checksum.  Must not be larger than
+  file.stream-buffer-size</description>
+</property>
+
+<property>
+  <name>file.client-write-packet-size</name>
+  <value>65536</value>
+  <description>Packet size for clients to write</description>
+</property>
+
+<property>
+  <name>file.blocksize</name>
+  <value>67108864</value>
+  <description>Block size</description>
+</property>
+
+<property>
+  <name>file.replication</name>
+  <value>1</value>
+  <description>Replication factor</description>
+</property>
+
+<!-- s3 File System -->
+
+<property>
+  <name>s3.stream-buffer-size</name>
+  <value>4096</value>
+  <description>The size of buffer to stream files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+</property>
+
+<property>
+  <name>s3.bytes-per-checksum</name>
+  <value>512</value>
+  <description>The number of bytes per checksum.  Must not be larger than
+  s3.stream-buffer-size</description>
+</property>
+
+<property>
+  <name>s3.client-write-packet-size</name>
+  <value>65536</value>
+  <description>Packet size for clients to write</description>
+</property>
+
+<property>
+  <name>s3.blocksize</name>
+  <value>67108864</value>
+  <description>Block size</description>
+</property>
+
+<property>
+  <name>s3.replication</name>
+  <value>3</value>
+  <description>Replication factor</description>
+</property>
+
+<!-- s3native File System -->
+
+<property>
+  <name>s3native.stream-buffer-size</name>
+  <value>4096</value>
+  <description>The size of buffer to stream files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+</property>
+
+<property>
+  <name>s3native.bytes-per-checksum</name>
+  <value>512</value>
+  <description>The number of bytes per checksum.  Must not be larger than
+  s3native.stream-buffer-size</description>
+</property>
+
+<property>
+  <name>s3native.client-write-packet-size</name>
+  <value>65536</value>
+  <description>Packet size for clients to write</description>
+</property>
+
+<property>
+  <name>s3native.blocksize</name>
+  <value>67108864</value>
+  <description>Block size</description>
+</property>
+
+<property>
+  <name>s3native.replication</name>
+  <value>3</value>
+  <description>Replication factor</description>
+</property>
+
+<!-- FTP file system -->
+<property>
+  <name>ftp.stream-buffer-size</name>
+  <value>4096</value>
+  <description>The size of buffer to stream files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+</property>
+
+<property>
+  <name>ftp.bytes-per-checksum</name>
+  <value>512</value>
+  <description>The number of bytes per checksum.  Must not be larger than
+  ftp.stream-buffer-size</description>
+</property>
+
+<property>
+  <name>ftp.client-write-packet-size</name>
+  <value>65536</value>
+  <description>Packet size for clients to write</description>
+</property>
+
+<property>
+  <name>ftp.blocksize</name>
+  <value>67108864</value>
+  <description>Block size</description>
+</property>
+
+<property>
+  <name>ftp.replication</name>
+  <value>3</value>
+  <description>Replication factor</description>
+</property>
+
+<!-- Tfile -->
+
+<property>
+  <name>tfile.io.chunk.size</name>
+  <value>1048576</value>
+  <description>
+    Value chunk size in bytes. Default  to
+    1MB. Values of the length less than the chunk size is
+    guaranteed to have known value length in read time (See also
+    TFile.Reader.Scanner.Entry.isValueLengthKnown()).
+  </description>
+</property>
+
+<property>
+  <name>tfile.fs.output.buffer.size</name>
+  <value>262144</value>
+  <description>
+    Buffer size used for FSDataOutputStream in bytes.
+  </description>
+</property>
+
+<property>
+  <name>tfile.fs.input.buffer.size</name>
+  <value>262144</value>
+  <description>
+    Buffer size used for FSDataInputStream in bytes.
+  </description>
+</property>
+
+<!-- HTTP web-consoles Authentication -->
+
+<property>
+  <name>hadoop.http.authentication.type</name>
+  <value>simple</value>
+  <description>
+    Defines authentication used for Oozie HTTP endpoint.
+    Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME#
+  </description>
+</property>
+
+<property>
+  <name>hadoop.http.authentication.token.validity</name>
+  <value>36000</value>
+  <description>
+    Indicates how long (in seconds) an authentication token is valid before it has
+    to be renewed.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.http.authentication.signature.secret.file</name>
+  <value>${user.home}/hadoop-http-auth-signature-secret</value>
+  <description>
+    The signature secret for signing the authentication tokens.
+    The same secret should be used for JT/NN/DN/TT configurations.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.http.authentication.cookie.domain</name>
+  <value></value>
+  <description>
+    The domain to use for the HTTP cookie that stores the authentication token.
+    In order to authentiation to work correctly across all Hadoop nodes web-consoles
+    the domain must be correctly set.
+    IMPORTANT: when using IP addresses, browsers ignore cookies with domain settings.
+    For this setting to work properly all nodes in the cluster must be configured
+    to generate URLs with hostname.domain names on it.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.http.authentication.simple.anonymous.allowed</name>
+  <value>true</value>
+  <description>
+    Indicates if anonymous requests are allowed when using 'simple' authentication.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.http.authentication.kerberos.principal</name>
+  <value>HTTP/_HOST@LOCALHOST</value>
+  <description>
+    Indicates the Kerberos principal to be used for HTTP endpoint.
+    The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification.
+  </description>
 </property>
 
+<property>
+  <name>hadoop.http.authentication.kerberos.keytab</name>
+  <value>${user.home}/hadoop.keytab</value>
+  <description>
+    Location of the keytab file with the credentials for the principal.
+    Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop.
+  </description>
+</property>
 
+<property>
+  <name>dfs.ha.fencing.methods</name>
+  <value></value>
+  <description>
+    List of fencing methods to use for service fencing. May contain
+    builtin methods (eg shell and sshfence) or user-defined method.
+  </description>
+</property>
+
+<property>
+  <name>dfs.ha.fencing.ssh.connect-timeout</name>
+  <value>30000</value>
+  <description>
+    SSH connection timeout, in milliseconds, to use with the builtin
+    sshfence fencer.
+  </description>
+</property>
+
+<property>
+  <name>dfs.ha.fencing.ssh.private-key-files</name>
+  <value></value>
+  <description>
+    The SSH private key files to use with the builtin sshfence fencer.
+  </description>
+</property>
+
+
+<!-- Static Web User Filter properties. -->
+<property>
+  <description>
+    The user name to filter as, on static web filters
+    while rendering content. An example use is the HDFS
+    web UI (user to be used for browsing files).
+  </description>
+  <name>hadoop.http.staticuser.user</name>
+  <value>dr.who</value>
+</property>
+
+<property>
+  <name>ha.zookeeper.quorum</name>
+  <description>
+    A list of ZooKeeper server addresses, separated by commas, that are
+    to be used by the ZKFailoverController in automatic failover.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.session-timeout.ms</name>
+  <value>5000</value>
+  <description>
+    The session timeout to use when the ZKFC connects to ZooKeeper.
+    Setting this value to a lower value implies that server crashes
+    will be detected more quickly, but risks triggering failover too
+    aggressively in the case of a transient error or network blip.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.parent-znode</name>
+  <value>/hadoop-ha</value>
+  <description>
+    The ZooKeeper znode under which the ZK failover controller stores
+    its information. Note that the nameservice ID is automatically
+    appended to this znode, so it is not normally necessary to
+    configure this, even in a federated environment.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.acl</name>
+  <value>world:anyone:rwcda</value>
+  <description>
+    A comma-separated list of ZooKeeper ACLs to apply to the znodes
+    used by automatic failover. These ACLs are specified in the same
+    format as used by the ZooKeeper CLI.
+
+    If the ACL itself contains secrets, you may instead specify a
+    path to a file, prefixed with the '@' symbol, and the value of
+    this configuration will be loaded from within.
+  </description>
+</property>
+
+<property>
+  <name>ha.zookeeper.auth</name>
+  <value></value>
+  <description>
+    A comma-separated list of ZooKeeper authentications to add when
+    connecting to ZooKeeper. These are specified in the same format
+    as used by the &quot;addauth&quot; command in the ZK CLI. It is
+    important that the authentications specified here are sufficient
+    to access znodes with the ACL specified in ha.zookeeper.acl.
+
+    If the auths contain secrets, you may instead specify a
+    path to a file, prefixed with the '@' symbol, and the value of
+    this configuration will be loaded from within.
+  </description>
+</property>
+
+<!-- SSLFactory configuration -->
+
+<property>
+  <name>hadoop.ssl.keystores.factory.class</name>
+  <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
+  <description>
+    The keystores factory to use for retrieving certificates.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.ssl.require.client.cert</name>
+  <value>false</value>
+  <description>Whether client certificates are required</description>
+</property>
+
+<property>
+  <name>hadoop.ssl.hostname.verifier</name>
+  <value>DEFAULT</value>
+  <description>
+    The hostname verifier to provide for HttpsURLConnections.
+    Valid values are: DEFAULT, STRICT, STRICT_I6, DEFAULT_AND_LOCALHOST and
+    ALLOW_ALL
+  </description>
+</property>
+
+<property>
+  <name>hadoop.ssl.server.conf</name>
+  <value>ssl-server.xml</value>
+  <description>
+    Resource file from which ssl server keystore information will be extracted.
+    This file is looked up in the classpath, typically it should be in Hadoop
+    conf/ directory.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.ssl.client.conf</name>
+  <value>ssl-client.xml</value>
+  <description>
+    Resource file from which ssl client keystore information will be extracted
+    This file is looked up in the classpath, typically it should be in Hadoop
+    conf/ directory.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.ssl.enabled</name>
+  <value>false</value>
+  <description>
+    Deprecated. Use dfs.http.policy and yarn.http.policy instead.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.jetty.logs.serve.aliases</name>
+  <value>true</value>
+  <description>
+    Enable/Disable aliases serving from jetty
+  </description>
+</property>
+
+<property>
+  <name>fs.permissions.umask-mode</name>
+  <value>022</value>
+  <description>
+    The umask used when creating files and directories.
+    Can be in octal or in symbolic. Examples are:
+    "022" (octal for u=rwx,g=r-x,o=r-x in symbolic),
+    or "u=rwx,g=rwx,o=" (symbolic for 007 in octal).
+  </description>
+</property>
+
+<!-- ha properties -->
+
+<property>
+  <name>ha.health-monitor.connect-retry-interval.ms</name>
+  <value>1000</value>
+  <description>
+    How often to retry connecting to the service.
+  </description>
+</property>
+
+<property>
+  <name>ha.health-monitor.check-interval.ms</name>
+  <value>1000</value>
+  <description>
+    How often to check the service.
+  </description>
+</property>
+
+<property>
+  <name>ha.health-monitor.sleep-after-disconnect.ms</name>
+  <value>1000</value>
+  <description>
+    How long to sleep after an unexpected RPC error.
+  </description>
+</property>
+
+<property>
+  <name>ha.health-monitor.rpc-timeout.ms</name>
+  <value>45000</value>
+  <description>
+    Timeout for the actual monitorHealth() calls.
+  </description>
+</property>
+
+<property>
+  <name>ha.failover-controller.new-active.rpc-timeout.ms</name>
+  <value>60000</value>
+  <description>
+    Timeout that the FC waits for the new active to become active
+  </description>
+</property>
+
+<property>
+  <name>ha.failover-controller.graceful-fence.rpc-timeout.ms</name>
+  <value>5000</value>
+  <description>
+    Timeout that the FC waits for the old active to go to standby
+  </description>
+</property>
+
+<property>
+  <name>ha.failover-controller.graceful-fence.connection.retries</name>
+  <value>1</value>
+  <description>
+    FC connection retries for graceful fencing
+  </description>
+</property>
+
+<property>
+  <name>ha.failover-controller.cli-check.rpc-timeout.ms</name>
+  <value>20000</value>
+  <description>
+    Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState
+  </description>
+</property>
+
+<property>
+  <name>ipc.client.fallback-to-simple-auth-allowed</name>
+  <value>false</value>
+  <description>
+    When a client is configured to attempt a secure connection, but attempts to
+    connect to an insecure server, that server may instruct the client to
+    switch to SASL SIMPLE (unsecure) authentication. This setting controls
+    whether or not the client will accept this instruction from the server.
+    When false (the default), the client will not allow the fallback to SIMPLE
+    authentication, and will abort the connection.
+  </description>
+</property>
+
+<property>
+  <name>fs.client.resolve.remote.symlinks</name>
+  <value>true</value>
+  <description>
+      Whether to resolve symlinks when accessing a remote Hadoop filesystem.
+      Setting this to false causes an exception to be thrown upon encountering
+      a symlink. This setting does not apply to local filesystems, which
+      automatically resolve local symlinks.
+  </description>
+</property>
+
+<property>
+  <name>nfs3.server.port</name>
+  <value>2049</value>
+  <description>
+      Specify the port number used by Hadoop NFS.
+  </description>
+</property>
+
+<property>
+  <name>nfs3.mountd.port</name>
+  <value>4242</value>
+  <description>
+      Specify the port number used by Hadoop mount daemon.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.user.group.static.mapping.overrides</name>
+  <value>dr.who=;</value>
+  <description>
+    Static mapping of user to groups. This will override the groups if
+    available in the system for the specified user. In otherwords, groups
+    look-up will not happen for these users, instead groups mapped in this
+    configuration will be used.
+    Mapping should be in this format.
+    user1=group1,group2;user2=;user3=group2;
+    Default, "dr.who=;" will consider "dr.who" as user without groups.
+  </description>
+</property>
+
+<property>
+  <name>rpc.metrics.quantile.enable</name>
+  <value>false</value>
+  <description>
+    Setting this property to true and rpc.metrics.percentiles.intervals
+    to a comma-separated list of the granularity in seconds, the
+    50/75/90/95/99th percentile latency for rpc queue/processing time in
+    milliseconds are added to rpc metrics.
+  </description>
+</property>
+
+<property>
+  <name>rpc.metrics.percentiles.intervals</name>
+  <value></value>
+  <description>
+    A comma-separated list of the granularity in seconds for the metrics which
+    describe the 50/75/90/95/99th percentile latency for rpc queue/processing
+    time. The metrics are outputted if rpc.metrics.quantile.enable is set to
+    true.
+  </description>
+</property>
 </configuration>