You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@flink.apache.org by gy...@apache.org on 2020/09/28 14:33:22 UTC

[flink] 02/02: [FLINK-18795][hbase] Support for HBase 2

This is an automated email from the ASF dual-hosted git repository.

gyfora pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 16c97c8db718219ed036218faf3519541f24b430
Author: Miklos Gergely <mg...@cloudera.com>
AuthorDate: Tue Aug 25 13:19:04 2020 +0200

    [FLINK-18795][hbase] Support for HBase 2
    
    This closes #13128
---
 .../pom.xml                                        | 212 ++-----
 .../flink/addons/hbase1}/TableInputFormat.java     |   6 +-
 .../hbase1/HBase1DynamicTableFactory.java}         |  10 +-
 .../connector/hbase1/HBase1TableFactory.java}      |  27 +-
 .../flink/connector/hbase1/HBaseValidator.java}    |  39 +-
 .../connector/hbase1}/options/HBaseOptions.java    |   2 +-
 .../hbase1}/sink/HBaseDynamicTableSink.java        |   6 +-
 .../hbase1}/sink/HBaseUpsertTableSink.java         |   6 +-
 .../hbase1}/source/AbstractTableInputFormat.java   |   3 +-
 .../hbase1/source/HBaseDynamicTableSource.java     |  53 ++
 .../connector/hbase1}/source/HBaseInputFormat.java |   9 +-
 .../hbase1}/source/HBaseRowDataInputFormat.java    |   2 +-
 .../hbase1}/source/HBaseRowInputFormat.java        |   2 +-
 .../connector/hbase1/source/HBaseTableSource.java  |  80 +++
 .../org.apache.flink.table.factories.Factory       |   2 +-
 .../org.apache.flink.table.factories.TableFactory  |   2 +-
 .../connector/hbase1}/HBaseConnectorITCase.java    |  14 +-
 .../connector/hbase1}/HBaseDescriptorTest.java     |   5 +-
 .../hbase1}/HBaseDynamicTableFactoryTest.java      |  14 +-
 .../connector/hbase1}/HBaseTableFactoryTest.java   |  12 +-
 .../connector/hbase1}/HBaseTablePlanTest.java      |   2 +-
 .../hbase1}/example/HBaseWriteExample.java         |   3 +-
 .../hbase1}/example/HBaseWriteStreamExample.java   |   2 +-
 .../connector/hbase1}/util/HBaseTestBase.java      |   3 +-
 .../util/HBaseTestingClusterAutoStarter.java       |   2 +-
 .../java/org/slf4j/impl/Log4jLoggerAdapter.java}   |   8 +-
 .../src/test/resources/hbase-site.xml              |   0
 .../src/test/resources/log4j2-test.properties      |   0
 .../flink/connector/hbase1}/HBaseTablePlanTest.xml |   0
 .../flink-connector-hbase-2.2/README.md            |  89 +++
 .../pom.xml                                        | 226 ++++---
 .../hbase2/HBase2DynamicTableFactory.java}         |  36 +-
 .../connector/hbase2/HBase2TableFactory.java}      |  55 +-
 .../flink/connector/hbase2/HBaseValidator.java}    |  38 +-
 .../hbase2}/sink/HBaseDynamicTableSink.java        |  31 +-
 .../hbase2}/sink/HBaseUpsertTableSink.java         |  30 +-
 .../hbase2}/source/AbstractTableInputFormat.java   |  27 +-
 .../hbase2/source/HBaseDynamicTableSource.java     |  50 ++
 .../connector/hbase2}/source/HBaseInputFormat.java |  32 +-
 .../hbase2}/source/HBaseRowDataInputFormat.java    |  13 +-
 .../hbase2}/source/HBaseRowInputFormat.java        |  11 +-
 .../connector/hbase2/source/HBaseTableSource.java  |  80 +++
 .../org.apache.flink.table.factories.Factory       |   2 +-
 .../org.apache.flink.table.factories.TableFactory} |   2 +-
 .../connector/hbase2}/HBaseConnectorITCase.java    | 387 ++++++------
 .../connector/hbase2}/HBaseDescriptorTest.java     |  24 +-
 .../hbase2}/HBaseDynamicTableFactoryTest.java      |  23 +-
 .../connector/hbase2}/HBaseTableFactoryTest.java   |  21 +-
 .../connector/hbase2}/HBaseTablePlanTest.java      |  12 +-
 .../hbase2}/example/HBaseWriteExample.java         |   6 +-
 .../connector/hbase2}/util/HBaseTestBase.java      |   7 +-
 .../util/HBaseTestingClusterAutoStarter.java       |  78 +--
 .../java/org/slf4j/impl/Log4jLoggerAdapter.java}   |   8 +-
 .../src/test/resources/hbase-site.xml              |   0
 .../src/test/resources/log4j2-test.properties      |   0
 .../flink/connector/hbase2}/HBaseTablePlanTest.xml |   0
 .../pom.xml                                        |  68 +-
 .../connector/hbase/options/HBaseWriteOptions.java |   0
 .../hbase/sink/HBaseMutationConverter.java         |   0
 .../connector/hbase/sink/HBaseSinkFunction.java    |   0
 .../hbase/sink/LegacyMutationConverter.java        |   2 +-
 .../hbase/sink/RowDataToMutationConverter.java     |   0
 .../source/AbstractHBaseDynamicTableSource.java}   |  24 +-
 .../hbase/source/AbstractHBaseTableSource.java}    |  34 +-
 .../hbase/source/HBaseLookupFunction.java          |   0
 .../hbase/source/HBaseRowDataLookupFunction.java   |   0
 .../connector/hbase/source/TableInputSplit.java    |   4 +-
 .../hbase/util/HBaseConfigurationUtil.java         |  33 +
 .../connector/hbase/util/HBaseReadWriteHelper.java |   0
 .../flink/connector/hbase/util/HBaseSerde.java     |   0
 .../connector/hbase/util/HBaseTableSchema.java     |   0
 .../flink/connector/hbase/util/HBaseTypeUtils.java |   0
 .../table/descriptors/AbstractHBaseValidator.java} |  15 +-
 .../org/apache/flink/table/descriptors/HBase.java  |  14 +-
 .../hbase/example/HBaseFlinkTestConstants.java     |  13 +-
 .../hbase/util/HBaseConfigLoadingTest.java         |   0
 .../flink/connector/hbase/util/PlannerType.java    |   0
 .../src/test/resources/hbase-site.xml              |   0
 .../src/test/resources/log4j2-test.properties      |   0
 .../connector/hbase/example/HBaseReadExample.java  |  85 ---
 .../pom.xml                                        |  11 +-
 .../src/main/resources/META-INF/NOTICE             |   3 +-
 .../resources/META-INF/licenses/LICENSE.protobuf   |   0
 .../src/main/resources/hbase-default.xml           |   0
 .../pom.xml                                        |  24 +-
 .../src/main/resources/META-INF/NOTICE             |  35 ++
 .../resources/META-INF/licenses/LICENSE.protobuf   |   0
 .../src/main/resources/hbase-default.xml           | 685 ++++++++++++++-------
 flink-connectors/pom.xml                           |   7 +-
 .../flink-end-to-end-tests-hbase/pom.xml           |  20 +-
 .../flink/tests/util/hbase/HBaseResource.java      |   5 +-
 .../tests/util/hbase/HBaseResourceFactory.java     |   3 +-
 .../util/hbase/LocalStandaloneHBaseResource.java   |   8 +-
 .../hbase/LocalStandaloneHBaseResourceFactory.java |   4 +-
 .../tests/util/hbase/SQLClientHBaseITCase.java     |  42 +-
 .../src/test/resources/hbase_e2e.sql               |   4 +-
 .../pyflink/table/tests/test_descriptor.py         |   2 +-
 tools/ci/stage.sh                                  |   4 +-
 98 files changed, 1736 insertions(+), 1227 deletions(-)

diff --git a/flink-connectors/flink-connector-hbase/pom.xml b/flink-connectors/flink-connector-hbase-1.4/pom.xml
similarity index 67%
copy from flink-connectors/flink-connector-hbase/pom.xml
copy to flink-connectors/flink-connector-hbase-1.4/pom.xml
index 39f81e8..8869ae7 100644
--- a/flink-connectors/flink-connector-hbase/pom.xml
+++ b/flink-connectors/flink-connector-hbase-1.4/pom.xml
@@ -29,8 +29,8 @@ under the License.
 		<relativePath>..</relativePath>
 	</parent>
 
-	<artifactId>flink-connector-hbase_${scala.binary.version}</artifactId>
-	<name>Flink : Connectors : HBase</name>
+	<artifactId>flink-connector-hbase-1.4_${scala.binary.version}</artifactId>
+	<name>Flink : Connectors : HBase 1.4</name>
 	<packaging>jar</packaging>
 
 	<properties>
@@ -54,7 +54,11 @@ under the License.
 
 	<dependencies>
 
-		<!-- core dependencies -->
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-connector-hbase-base_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+		</dependency>
 
 		<dependency>
 			<groupId>org.apache.flink</groupId>
@@ -107,125 +111,69 @@ under the License.
 			<scope>provided</scope>
 		</dependency>
 
+		<!-- test dependencies -->
+
 		<dependency>
-			<groupId>org.apache.hadoop</groupId>
-			<artifactId>hadoop-common</artifactId>
-			<scope>provided</scope>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-connector-hbase-base_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<type>test-jar</type>
+			<scope>test</scope>
 		</dependency>
 
-		<!-- HBase server needed for TableOutputFormat -->
-		<!-- TODO implement bulk output format for HBase -->
+
 		<dependency>
-			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-server</artifactId>
-			<version>${hbase.version}</version>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-hadoop-compatibility_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
 			<exclusions>
-				<!-- Remove unneeded dependency, which is conflicting with our jetty-util version. -->
-				<exclusion>
-					<groupId>org.mortbay.jetty</groupId>
-					<artifactId>jetty-util</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.mortbay.jetty</groupId>
-					<artifactId>jetty</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.mortbay.jetty</groupId>
-					<artifactId>jetty-sslengine</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.mortbay.jetty</groupId>
-					<artifactId>jsp-2.1</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.mortbay.jetty</groupId>
-					<artifactId>jsp-api-2.1</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.mortbay.jetty</groupId>
-					<artifactId>servlet-api-2.5</artifactId>
-				</exclusion>
-				<!-- Bug in hbase annotations, can be removed when fixed. See FLINK-2153. -->
-				<exclusion>
-					<groupId>org.apache.hbase</groupId>
-					<artifactId>hbase-annotations</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>com.sun.jersey</groupId>
-					<artifactId>jersey-core</artifactId>
-				</exclusion>
 				<exclusion>
-					<groupId>com.sun.jersey</groupId>
-					<artifactId>jersey-server</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>tomcat</groupId>
-					<artifactId>jasper-compiler</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>tomcat</groupId>
-					<artifactId>jasper-runtime</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.jruby.jcodings</groupId>
-					<artifactId>jcodings</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.jruby.joni</groupId>
-					<artifactId>joni</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.jamon</groupId>
-					<artifactId>jamon-runtime</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>log4j</groupId>
-					<artifactId>log4j</artifactId>
-				</exclusion>
-				<exclusion>
-					<groupId>org.slf4j</groupId>
-					<artifactId>slf4j-log4j12</artifactId>
+					<groupId>org.apache.flink</groupId>
+					<artifactId>flink-shaded-include-yarn_${scala.binary.version}</artifactId>
 				</exclusion>
 			</exclusions>
 		</dependency>
-
 		<dependency>
-			<!-- Bump hbase netty dependency -->
-			<groupId>io.netty</groupId>
-			<artifactId>netty-all</artifactId>
-			<version>4.1.44.Final</version>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-test-utils_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
 		</dependency>
-
 		<dependency>
-			<!-- Replaces Hbase log4j dependency -->
-			<groupId>org.apache.logging.log4j</groupId>
-			<artifactId>log4j-1.2-api</artifactId>
-			<scope>provided</scope>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-table-common</artifactId>
+			<version>${project.version}</version>
+			<type>test-jar</type>
+			<scope>test</scope>
 		</dependency>
-
-		<!-- test dependencies -->
-
 		<dependency>
 			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-clients_${scala.binary.version}</artifactId>
+			<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
 			<version>${project.version}</version>
 			<scope>test</scope>
 		</dependency>
-
 		<dependency>
 			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-hadoop-compatibility_${scala.binary.version}</artifactId>
+			<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
 			<version>${project.version}</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<type>test-jar</type>
 			<scope>test</scope>
-			<exclusions>
-				<exclusion>
-					<groupId>org.apache.flink</groupId>
-					<artifactId>flink-shaded-include-yarn_${scala.binary.version}</artifactId>
-				</exclusion>
-			</exclusions>
 		</dependency>
 
-		<!-- Test dependencies are only available for Hadoop-2. -->
 		<dependency>
 			<groupId>org.apache.hbase</groupId>
 			<artifactId>hbase-server</artifactId>
@@ -241,6 +189,10 @@ under the License.
 					<groupId>org.slf4j</groupId>
 					<artifactId>slf4j-log4j12</artifactId>
 				</exclusion>
+				<exclusion>
+					<groupId>org.apache.hadoop</groupId>
+					<artifactId>hadoop-auth</artifactId>
+				</exclusion>
 			</exclusions>
 		</dependency>
 
@@ -307,73 +259,11 @@ under the License.
 				</exclusion>
 			</exclusions>
 		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-test-utils_${scala.binary.version}</artifactId>
-			<version>${project.version}</version>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-table-common</artifactId>
-			<version>${project.version}</version>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
-			<version>${project.version}</version>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
-			<version>${project.version}</version>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
-			<version>${project.version}</version>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
-			<version>${project.version}</version>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
+
 	</dependencies>
 
 	<profiles>
 		<profile>
-			<id>cdh5.1.3</id>
-			<activation>
-				<property>
-					<name>cdh5.1.3</name>
-				</property>
-			</activation>
-			<properties>
-				<hbase.version>0.98.1-cdh5.1.3</hbase.version>
-				<hadoop.version>2.3.0-cdh5.1.3</hadoop.version>
-				<!-- Cloudera use different versions for hadoop core and commons-->
-				<!-- This profile could be removed if Cloudera fix this mismatch! -->
-				<hadoop.core.version>2.3.0-mr1-cdh5.1.3</hadoop.core.version>
-			</properties>
-			<dependencyManagement>
-				<dependencies>
-					<dependency>
-						<groupId>org.apache.hadoop</groupId>
-						<artifactId>hadoop-core</artifactId>
-						<version>${hadoop.core.version}</version>
-					</dependency>
-				</dependencies>
-			</dependencyManagement>
-		</profile>
-		<profile>
 			<id>java11</id>
 			<activation>
 				<jdk>11</jdk>
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/addons/hbase1/TableInputFormat.java
similarity index 90%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java
rename to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/addons/hbase1/TableInputFormat.java
index a5e044e..dfe200d 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/addons/hbase/TableInputFormat.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/addons/hbase1/TableInputFormat.java
@@ -16,16 +16,16 @@
  * limitations under the License.
  */
 
-package org.apache.flink.addons.hbase;
+package org.apache.flink.addons.hbase1;
 
 import org.apache.flink.api.common.io.InputFormat;
 import org.apache.flink.api.java.tuple.Tuple;
-import org.apache.flink.connector.hbase.source.HBaseInputFormat;
+import org.apache.flink.connector.hbase1.source.HBaseInputFormat;
 
 /**
  * {@link InputFormat} subclass that wraps the access for HTables.
  *
- * @deprecated please use {@link org.apache.flink.connector.hbase.source.HBaseInputFormat}.
+ * @deprecated please use {@link org.apache.flink.connector.hbase1.source.HBaseInputFormat}.
  */
 @Deprecated
 public abstract class TableInputFormat<T extends Tuple> extends HBaseInputFormat<T> {
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactory.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/HBase1DynamicTableFactory.java
similarity index 96%
copy from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactory.java
copy to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/HBase1DynamicTableFactory.java
index 6e58aba..00fe3c0 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactory.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/HBase1DynamicTableFactory.java
@@ -16,17 +16,17 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase1;
 
 import org.apache.flink.configuration.ConfigOption;
 import org.apache.flink.configuration.ConfigOptions;
 import org.apache.flink.configuration.MemorySize;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
-import org.apache.flink.connector.hbase.sink.HBaseDynamicTableSink;
-import org.apache.flink.connector.hbase.source.HBaseDynamicTableSource;
 import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.connector.hbase1.options.HBaseOptions;
+import org.apache.flink.connector.hbase1.sink.HBaseDynamicTableSink;
+import org.apache.flink.connector.hbase1.source.HBaseDynamicTableSource;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.connector.sink.DynamicTableSink;
 import org.apache.flink.table.connector.source.DynamicTableSource;
@@ -46,7 +46,7 @@ import static org.apache.flink.table.factories.FactoryUtil.createTableFactoryHel
 /**
  * HBase connector factory.
  */
-public class HBaseDynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory {
+public class HBase1DynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory {
 
 	private static final String IDENTIFIER = "hbase-1.4";
 
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseTableFactory.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/HBase1TableFactory.java
similarity index 87%
copy from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseTableFactory.java
copy to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/HBase1TableFactory.java
index ca3e1e5..09877c3 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseTableFactory.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/HBase1TableFactory.java
@@ -16,21 +16,20 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase1;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
-import org.apache.flink.connector.hbase.sink.HBaseUpsertTableSink;
-import org.apache.flink.connector.hbase.source.HBaseTableSource;
 import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.connector.hbase1.options.HBaseOptions;
+import org.apache.flink.connector.hbase1.sink.HBaseUpsertTableSink;
+import org.apache.flink.connector.hbase1.source.HBaseTableSource;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.DescriptorProperties;
-import org.apache.flink.table.descriptors.HBaseValidator;
 import org.apache.flink.table.factories.StreamTableSinkFactory;
 import org.apache.flink.table.factories.StreamTableSourceFactory;
 import org.apache.flink.table.sinks.StreamTableSink;
@@ -52,6 +51,14 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import static org.apache.flink.connector.hbase1.HBaseValidator.CONNECTOR_VERSION_VALUE_143;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_TABLE_NAME;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_TYPE_VALUE_HBASE;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_INTERVAL;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_ZK_NODE_PARENT;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_ZK_QUORUM;
 import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_PROPERTY_VERSION;
 import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_TYPE;
 import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_VERSION;
@@ -60,14 +67,6 @@ import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK;
 import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK_ROWTIME;
 import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK_STRATEGY_DATA_TYPE;
 import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK_STRATEGY_EXPR;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_TABLE_NAME;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_TYPE_VALUE_HBASE;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_VERSION_VALUE_143;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_INTERVAL;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_ZK_NODE_PARENT;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_ZK_QUORUM;
 import static org.apache.flink.table.descriptors.Schema.SCHEMA;
 import static org.apache.flink.table.descriptors.Schema.SCHEMA_DATA_TYPE;
 import static org.apache.flink.table.descriptors.Schema.SCHEMA_NAME;
@@ -77,7 +76,7 @@ import static org.apache.flink.table.descriptors.Schema.SCHEMA_TYPE;
  * Factory for creating configured instances of {@link HBaseTableSource} or sink.
  */
 @Internal
-public class HBaseTableFactory implements StreamTableSourceFactory<Row>, StreamTableSinkFactory<Tuple2<Boolean, Row>> {
+public class HBase1TableFactory implements StreamTableSourceFactory<Row>, StreamTableSinkFactory<Tuple2<Boolean, Row>> {
 
 	@Override
 	public StreamTableSource<Row> createStreamTableSource(Map<String, String> properties) {
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/HBaseValidator.java
similarity index 56%
copy from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java
copy to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/HBaseValidator.java
index 181e10f..c302c6b 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/HBaseValidator.java
@@ -16,31 +16,32 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.sink;
+package org.apache.flink.connector.hbase1;
 
 import org.apache.flink.annotation.Internal;
-
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-
-import java.io.Serializable;
+import org.apache.flink.table.descriptors.AbstractHBaseValidator;
 
 /**
- * A converter used to converts the input record into HBase {@link Mutation}.
- * @param <T> type of input record.
+ * The validator for HBase.
+ * More features to be supported, e.g., batch read/write, async api(support from hbase version 2.0.0), Caching for LookupFunction.
  */
 @Internal
-public interface HBaseMutationConverter<T> extends Serializable {
+public class HBaseValidator extends AbstractHBaseValidator {
+
+	public static final String CONNECTOR_VERSION_VALUE_143 = "1.4.3";
+
+	@Override
+	protected boolean validateZkQuorum() {
+		return false;
+	}
 
-	/**
-	 * Initialization method for the function. It is called once before conversion method.
-	 */
-	void open();
+	@Override
+	protected String getConnectorVersion() {
+		return CONNECTOR_VERSION_VALUE_143;
+	}
 
-	/**
-	 * Converts the input record into HBase {@link Mutation}. A mutation can be a
-	 * {@link Put} or {@link Delete}.
-	 */
-	Mutation convertToMutation(T record);
+	@Override
+	protected boolean zkQuorumIsOptional() {
+		return false;
+	}
 }
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/options/HBaseOptions.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/options/HBaseOptions.java
similarity index 98%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/options/HBaseOptions.java
rename to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/options/HBaseOptions.java
index 83775c7..ce89454 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/options/HBaseOptions.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/options/HBaseOptions.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.options;
+package org.apache.flink.connector.hbase1.options;
 
 import org.apache.flink.annotation.Internal;
 
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseDynamicTableSink.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/sink/HBaseDynamicTableSink.java
similarity index 93%
copy from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseDynamicTableSink.java
copy to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/sink/HBaseDynamicTableSink.java
index 5d60ae3..0c2e8ee 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseDynamicTableSink.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/sink/HBaseDynamicTableSink.java
@@ -16,14 +16,16 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.sink;
+package org.apache.flink.connector.hbase1.sink;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
+import org.apache.flink.connector.hbase.sink.HBaseSinkFunction;
+import org.apache.flink.connector.hbase.sink.RowDataToMutationConverter;
 import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.connector.hbase1.options.HBaseOptions;
 import org.apache.flink.table.connector.ChangelogMode;
 import org.apache.flink.table.connector.sink.DynamicTableSink;
 import org.apache.flink.table.connector.sink.SinkFunctionProvider;
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseUpsertTableSink.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/sink/HBaseUpsertTableSink.java
similarity index 95%
copy from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseUpsertTableSink.java
copy to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/sink/HBaseUpsertTableSink.java
index f882833..591bc80 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseUpsertTableSink.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/sink/HBaseUpsertTableSink.java
@@ -16,16 +16,18 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.sink;
+package org.apache.flink.connector.hbase1.sink;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
+import org.apache.flink.connector.hbase.sink.HBaseSinkFunction;
+import org.apache.flink.connector.hbase.sink.LegacyMutationConverter;
 import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.connector.hbase1.options.HBaseOptions;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.datastream.DataStreamSink;
 import org.apache.flink.table.api.TableSchema;
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/AbstractTableInputFormat.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/AbstractTableInputFormat.java
similarity index 98%
copy from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/AbstractTableInputFormat.java
copy to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/AbstractTableInputFormat.java
index 6cd7a4d..134f437 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/AbstractTableInputFormat.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/AbstractTableInputFormat.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.source;
+package org.apache.flink.connector.hbase1.source;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
@@ -25,6 +25,7 @@ import org.apache.flink.api.common.io.LocatableInputSplitAssigner;
 import org.apache.flink.api.common.io.RichInputFormat;
 import org.apache.flink.api.common.io.statistics.BaseStatistics;
 import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.hbase.source.TableInputSplit;
 import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
 import org.apache.flink.core.io.InputSplitAssigner;
 
diff --git a/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseDynamicTableSource.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseDynamicTableSource.java
new file mode 100644
index 0000000..e8d92a3
--- /dev/null
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseDynamicTableSource.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.hbase1.source;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.io.InputFormat;
+import org.apache.flink.connector.hbase.source.AbstractHBaseDynamicTableSource;
+import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.table.connector.source.DynamicTableSource;
+import org.apache.flink.table.data.RowData;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * HBase table source implementation.
+ */
+@Internal
+public class HBaseDynamicTableSource extends AbstractHBaseDynamicTableSource {
+
+	public HBaseDynamicTableSource(
+			Configuration conf,
+			String tableName,
+			HBaseTableSchema hbaseSchema,
+			String nullStringLiteral) {
+		super(conf, tableName, hbaseSchema, nullStringLiteral);
+	}
+
+	@Override
+	public DynamicTableSource copy() {
+		return new HBaseDynamicTableSource(conf, tableName, hbaseSchema, nullStringLiteral);
+	}
+
+	@Override
+	public InputFormat<RowData, ?> getInputFormat() {
+		return new HBaseRowDataInputFormat(conf, tableName, hbaseSchema, nullStringLiteral);
+	}
+}
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseInputFormat.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseInputFormat.java
similarity index 96%
copy from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseInputFormat.java
copy to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseInputFormat.java
index 8076e8f..a1a6f1e 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseInputFormat.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseInputFormat.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.source;
+package org.apache.flink.connector.hbase1.source;
 
 import org.apache.flink.annotation.Experimental;
 import org.apache.flink.api.common.io.InputFormat;
@@ -26,8 +26,6 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 
-import java.io.IOException;
-
 /**
  * {@link InputFormat} subclass that wraps the access for HTables.
  */
@@ -49,6 +47,7 @@ public abstract class HBaseInputFormat<T extends Tuple> extends AbstractTableInp
 	 * Returns an instance of Scan that retrieves the required subset of records from the HBase table.
 	 * @return The appropriate instance of Scan for this usecase.
 	 */
+	@Override
 	protected abstract Scan getScanner();
 
 	/**
@@ -56,6 +55,7 @@ public abstract class HBaseInputFormat<T extends Tuple> extends AbstractTableInp
 	 * Per instance of a TableInputFormat derivative only a single tablename is possible.
 	 * @return The name of the table
 	 */
+	@Override
 	protected abstract String getTableName();
 
 	/**
@@ -67,7 +67,7 @@ public abstract class HBaseInputFormat<T extends Tuple> extends AbstractTableInp
 	protected abstract T mapResultToTuple(Result r);
 
 	@Override
-	protected void initTable() throws IOException {
+	protected void initTable() {
 		if (table == null) {
 			table = createTable();
 		}
@@ -91,6 +91,7 @@ public abstract class HBaseInputFormat<T extends Tuple> extends AbstractTableInp
 		return null;
 	}
 
+	@Override
 	protected T mapResultToOutType(Result r) {
 		return mapResultToTuple(r);
 	}
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataInputFormat.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseRowDataInputFormat.java
similarity index 98%
copy from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataInputFormat.java
copy to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseRowDataInputFormat.java
index 43a4fad..9472f7f 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataInputFormat.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseRowDataInputFormat.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.source;
+package org.apache.flink.connector.hbase1.source;
 
 import org.apache.flink.api.common.io.InputFormat;
 import org.apache.flink.connector.hbase.util.HBaseSerde;
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowInputFormat.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseRowInputFormat.java
similarity index 98%
copy from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowInputFormat.java
copy to flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseRowInputFormat.java
index 4d5f5b4..816cbed 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowInputFormat.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseRowInputFormat.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.source;
+package org.apache.flink.connector.hbase1.source;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.io.InputFormat;
diff --git a/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseTableSource.java b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseTableSource.java
new file mode 100644
index 0000000..7c180b5
--- /dev/null
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/java/org/apache/flink/connector/hbase1/source/HBaseTableSource.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.hbase1.source;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.io.InputFormat;
+import org.apache.flink.connector.hbase.source.AbstractHBaseTableSource;
+import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.types.Row;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Creates a TableSource to scan an HBase table.
+ *
+ * <p>The table name and required HBase configuration is passed during {@link HBaseTableSource} construction.
+ * Use {@link #addColumn(String, String, Class)} to specify the family, qualifier, and type of columns to scan.
+ *
+ * <p>The TableSource returns {@link Row} with nested Rows for each column family.
+ *
+ * <p>The HBaseTableSource is used as shown in the example below.
+ *
+ * <pre>
+ * {@code
+ * HBaseTableSource hSrc = new HBaseTableSource(conf, "hTable");
+ * hSrc.setRowKey("rowkey", String.class);
+ * hSrc.addColumn("fam1", "col1", byte[].class);
+ * hSrc.addColumn("fam1", "col2", Integer.class);
+ * hSrc.addColumn("fam2", "col1", String.class);
+ *
+ * tableEnv.registerTableSourceInternal("hTable", hSrc);
+ * Table res = tableEnv.sqlQuery(
+ *   "SELECT t.fam2.col1, SUM(t.fam1.col2) FROM hTable AS t " +
+ *   "WHERE t.rowkey LIKE 'flink%' GROUP BY t.fam2.col1");
+ * }
+ * </pre>
+ */
+@Internal
+public class HBaseTableSource extends AbstractHBaseTableSource {
+
+	/**
+	 * The HBase configuration and the name of the table to read.
+	 *
+	 * @param conf      hbase configuration
+	 * @param tableName the tableName
+	 */
+	public HBaseTableSource(Configuration conf, String tableName) {
+		this(conf, tableName, new HBaseTableSchema(), null);
+	}
+
+	public HBaseTableSource(Configuration conf, String tableName, HBaseTableSchema hbaseSchema, int[] projectFields) {
+		super(conf, tableName, hbaseSchema, projectFields);
+	}
+
+	@Override
+	public HBaseTableSource projectFields(int[] fields) {
+		return new HBaseTableSource(conf, tableName, hbaseSchema, fields);
+	}
+
+	@Override
+	public InputFormat<Row, ?> getInputFormat(HBaseTableSchema projectedSchema) {
+		return new HBaseRowInputFormat(conf, tableName, projectedSchema);
+	}
+}
diff --git a/flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory b/flink-connectors/flink-connector-hbase-1.4/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
similarity index 92%
copy from flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
copy to flink-connectors/flink-connector-hbase-1.4/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
index 669af95..0b6f75d 100644
--- a/flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-org.apache.flink.connector.hbase.HBaseDynamicTableFactory
+org.apache.flink.connector.hbase1.HBase1DynamicTableFactory
diff --git a/flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory b/flink-connectors/flink-connector-hbase-1.4/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
similarity index 93%
rename from flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
rename to flink-connectors/flink-connector-hbase-1.4/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
index 016a868..5d4e122 100644
--- a/flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
+++ b/flink-connectors/flink-connector-hbase-1.4/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-org.apache.flink.connector.hbase.HBaseTableFactory
+org.apache.flink.connector.hbase1.HBase1TableFactory
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseConnectorITCase.java b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseConnectorITCase.java
similarity index 98%
copy from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseConnectorITCase.java
copy to flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseConnectorITCase.java
index a784805..3195e05 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseConnectorITCase.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseConnectorITCase.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase1;
 
 import org.apache.flink.api.common.functions.ReduceFunction;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
@@ -25,14 +25,14 @@ import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
 import org.apache.flink.api.java.tuple.Tuple1;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.connector.hbase.source.AbstractTableInputFormat;
-import org.apache.flink.connector.hbase.source.HBaseInputFormat;
-import org.apache.flink.connector.hbase.source.HBaseRowDataInputFormat;
-import org.apache.flink.connector.hbase.source.HBaseRowInputFormat;
-import org.apache.flink.connector.hbase.source.HBaseTableSource;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
-import org.apache.flink.connector.hbase.util.HBaseTestBase;
 import org.apache.flink.connector.hbase.util.PlannerType;
+import org.apache.flink.connector.hbase1.source.AbstractTableInputFormat;
+import org.apache.flink.connector.hbase1.source.HBaseInputFormat;
+import org.apache.flink.connector.hbase1.source.HBaseRowDataInputFormat;
+import org.apache.flink.connector.hbase1.source.HBaseRowInputFormat;
+import org.apache.flink.connector.hbase1.source.HBaseTableSource;
+import org.apache.flink.connector.hbase1.util.HBaseTestBase;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.table.api.DataTypes;
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDescriptorTest.java b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseDescriptorTest.java
similarity index 97%
copy from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDescriptorTest.java
copy to flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseDescriptorTest.java
index d715ffb..48b402e 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDescriptorTest.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseDescriptorTest.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase1;
 
 import org.apache.flink.table.api.DataTypes;
 import org.apache.flink.table.api.ValidationException;
@@ -29,7 +29,6 @@ import org.apache.flink.table.descriptors.DescriptorTestBase;
 import org.apache.flink.table.descriptors.DescriptorValidator;
 import org.apache.flink.table.descriptors.FormatDescriptor;
 import org.apache.flink.table.descriptors.HBase;
-import org.apache.flink.table.descriptors.HBaseValidator;
 import org.apache.flink.table.descriptors.Rowtime;
 import org.apache.flink.table.descriptors.Schema;
 import org.apache.flink.table.descriptors.StreamTableDescriptor;
@@ -129,7 +128,7 @@ public class HBaseDescriptorTest extends DescriptorTestBase {
 
 	@Test
 	public void testFormatNeed(){
-		String expected = "The connector org.apache.flink.table.descriptors.HBase does not require a format description but org.apache.flink.connector.hbase.HBaseDescriptorTest$1 found.";
+		String expected = "The connector org.apache.flink.table.descriptors.HBase does not require a format description but org.apache.flink.connector.hbase1.HBaseDescriptorTest$1 found.";
 		AtomicReference<CatalogTableImpl> reference = new AtomicReference<>();
 		HBase hBase = new HBase();
 		Registration registration = (path, table) -> reference.set((CatalogTableImpl) table);
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactoryTest.java b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseDynamicTableFactoryTest.java
similarity index 96%
copy from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactoryTest.java
copy to flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseDynamicTableFactoryTest.java
index 47775ac..9a901c3 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactoryTest.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseDynamicTableFactoryTest.java
@@ -16,16 +16,16 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase1;
 
 import org.apache.flink.api.common.typeinfo.Types;
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
-import org.apache.flink.connector.hbase.sink.HBaseDynamicTableSink;
-import org.apache.flink.connector.hbase.source.HBaseDynamicTableSource;
 import org.apache.flink.connector.hbase.source.HBaseRowDataLookupFunction;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.connector.hbase1.options.HBaseOptions;
+import org.apache.flink.connector.hbase1.sink.HBaseDynamicTableSink;
+import org.apache.flink.connector.hbase1.source.HBaseDynamicTableSource;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.catalog.CatalogTableImpl;
 import org.apache.flink.table.catalog.ObjectIdentifier;
@@ -64,7 +64,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 /**
- * Unit test for {@link HBaseDynamicTableFactory}.
+ * Unit test for {@link HBase1DynamicTableFactory}.
  */
 public class HBaseDynamicTableFactoryTest {
 
@@ -334,7 +334,7 @@ public class HBaseDynamicTableFactoryTest {
 			ObjectIdentifier.of("default", "default", "t1"),
 			new CatalogTableImpl(schema, options, "mock source"),
 			new Configuration(),
-			HBaseDynamicTableFactory.class.getClassLoader());
+			HBase1DynamicTableFactory.class.getClassLoader());
 	}
 
 	private static DynamicTableSink createTableSink(TableSchema schema, Map<String, String> options) {
@@ -343,7 +343,7 @@ public class HBaseDynamicTableFactoryTest {
 			ObjectIdentifier.of("default", "default", "t1"),
 			new CatalogTableImpl(schema, options, "mock sink"),
 			new Configuration(),
-			HBaseDynamicTableFactory.class.getClassLoader());
+			HBase1DynamicTableFactory.class.getClassLoader());
 	}
 
 }
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTableFactoryTest.java b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseTableFactoryTest.java
similarity index 95%
copy from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTableFactoryTest.java
copy to flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseTableFactoryTest.java
index e5c3217..198eb72 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTableFactoryTest.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseTableFactoryTest.java
@@ -18,16 +18,16 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase1;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeinfo.Types;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
-import org.apache.flink.connector.hbase.sink.HBaseUpsertTableSink;
 import org.apache.flink.connector.hbase.source.HBaseLookupFunction;
-import org.apache.flink.connector.hbase.source.HBaseTableSource;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.connector.hbase1.options.HBaseOptions;
+import org.apache.flink.connector.hbase1.sink.HBaseUpsertTableSink;
+import org.apache.flink.connector.hbase1.source.HBaseTableSource;
 import org.apache.flink.table.api.DataTypes;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.DescriptorProperties;
@@ -97,7 +97,7 @@ public class HBaseTableFactoryTest {
 				DataTypes.FIELD(COL4, DataTypes.TIME())))
 			.build();
 		DescriptorProperties descriptorProperties = createDescriptor(schema);
-		TableSource source = TableFactoryService.find(HBaseTableFactory.class,
+		TableSource source = TableFactoryService.find(HBase1TableFactory.class,
 			descriptorProperties.asMap()).createTableSource(descriptorProperties.asMap());
 		Assert.assertTrue(source instanceof HBaseTableSource);
 		TableFunction<Row> tableFunction = ((HBaseTableSource) source).getLookupFunction(new String[]{ROWKEY});
@@ -145,7 +145,7 @@ public class HBaseTableFactoryTest {
 		DescriptorProperties descriptorProperties = createDescriptor(schema);
 
 		TableSink sink = TableFactoryService
-			.find(HBaseTableFactory.class, descriptorProperties.asMap())
+			.find(HBase1TableFactory.class, descriptorProperties.asMap())
 			.createTableSink(descriptorProperties.asMap());
 
 		Assert.assertTrue(sink instanceof HBaseUpsertTableSink);
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTablePlanTest.java b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseTablePlanTest.java
similarity index 99%
copy from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTablePlanTest.java
copy to flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseTablePlanTest.java
index 863c345..20a86a3 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTablePlanTest.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/HBaseTablePlanTest.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase1;
 
 import org.apache.flink.table.api.TableConfig;
 import org.apache.flink.table.planner.utils.StreamTableTestUtil;
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseWriteExample.java b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/example/HBaseWriteExample.java
similarity index 98%
copy from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseWriteExample.java
copy to flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/example/HBaseWriteExample.java
index 42fd97d..51b53d5 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseWriteExample.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/example/HBaseWriteExample.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.example;
+package org.apache.flink.connector.hbase1.example;
 
 import org.apache.flink.api.common.functions.FlatMapFunction;
 import org.apache.flink.api.common.functions.RichMapFunction;
@@ -26,6 +26,7 @@ import org.apache.flink.api.java.hadoop.mapreduce.HadoopOutputFormat;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.hbase.example.HBaseFlinkTestConstants;
 import org.apache.flink.util.Collector;
 
 import org.apache.hadoop.hbase.client.Mutation;
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseWriteStreamExample.java b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/example/HBaseWriteStreamExample.java
similarity index 98%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseWriteStreamExample.java
rename to flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/example/HBaseWriteStreamExample.java
index aeac52e..52ff618 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseWriteStreamExample.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/example/HBaseWriteStreamExample.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.example;
+package org.apache.flink.connector.hbase1.example;
 
 import org.apache.flink.api.common.io.OutputFormat;
 import org.apache.flink.configuration.Configuration;
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestBase.java b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/util/HBaseTestBase.java
similarity index 98%
copy from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestBase.java
copy to flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/util/HBaseTestBase.java
index c376b95..5ed790f 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestBase.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/util/HBaseTestBase.java
@@ -16,8 +16,9 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.util;
+package org.apache.flink.connector.hbase1.util;
 
+import org.apache.flink.connector.hbase.util.PlannerType;
 import org.apache.flink.table.api.EnvironmentSettings;
 
 import org.apache.hadoop.hbase.TableName;
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestingClusterAutoStarter.java b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/util/HBaseTestingClusterAutoStarter.java
similarity index 99%
copy from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestingClusterAutoStarter.java
copy to flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/util/HBaseTestingClusterAutoStarter.java
index 4ba1c17..ace32b9 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestingClusterAutoStarter.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/apache/flink/connector/hbase1/util/HBaseTestingClusterAutoStarter.java
@@ -18,7 +18,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.util;
+package org.apache.flink.connector.hbase1.util;
 
 import org.apache.flink.test.util.AbstractTestBase;
 
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/PlannerType.java b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/slf4j/impl/Log4jLoggerAdapter.java
similarity index 86%
copy from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/PlannerType.java
copy to flink-connectors/flink-connector-hbase-1.4/src/test/java/org/slf4j/impl/Log4jLoggerAdapter.java
index 5bad636..30d49da 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/PlannerType.java
+++ b/flink-connectors/flink-connector-hbase-1.4/src/test/java/org/slf4j/impl/Log4jLoggerAdapter.java
@@ -16,12 +16,10 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.util;
+package org.slf4j.impl;
 
 /**
- * Planner type to use.
+ * Fake appender to work around HBase referring to it directly.
  */
-public enum PlannerType {
-	BLINK_PLANNER,
-	OLD_PLANNER
+public interface Log4jLoggerAdapter {
 }
diff --git a/flink-connectors/flink-connector-hbase/src/test/resources/hbase-site.xml b/flink-connectors/flink-connector-hbase-1.4/src/test/resources/hbase-site.xml
similarity index 100%
copy from flink-connectors/flink-connector-hbase/src/test/resources/hbase-site.xml
copy to flink-connectors/flink-connector-hbase-1.4/src/test/resources/hbase-site.xml
diff --git a/flink-connectors/flink-connector-hbase/src/test/resources/log4j2-test.properties b/flink-connectors/flink-connector-hbase-1.4/src/test/resources/log4j2-test.properties
similarity index 100%
copy from flink-connectors/flink-connector-hbase/src/test/resources/log4j2-test.properties
copy to flink-connectors/flink-connector-hbase-1.4/src/test/resources/log4j2-test.properties
diff --git a/flink-connectors/flink-connector-hbase/src/test/resources/org/apache/flink/connector/hbase/HBaseTablePlanTest.xml b/flink-connectors/flink-connector-hbase-1.4/src/test/resources/org/apache/flink/connector/hbase1/HBaseTablePlanTest.xml
similarity index 100%
copy from flink-connectors/flink-connector-hbase/src/test/resources/org/apache/flink/connector/hbase/HBaseTablePlanTest.xml
copy to flink-connectors/flink-connector-hbase-1.4/src/test/resources/org/apache/flink/connector/hbase1/HBaseTablePlanTest.xml
diff --git a/flink-connectors/flink-connector-hbase-2.2/README.md b/flink-connectors/flink-connector-hbase-2.2/README.md
new file mode 100644
index 0000000..0ad6d12
--- /dev/null
+++ b/flink-connectors/flink-connector-hbase-2.2/README.md
@@ -0,0 +1,89 @@
+# Flink HBase Connector
+
+This connector provides classes that allow access for Flink to [HBase](https://hbase.apache.org/).
+
+ *Version Compatibility*: This module is compatible with Apache HBase *2.2.3* (last stable version).
+
+Note that the streaming connectors are not part of the binary distribution of Flink. You need to link them into your job jar for cluster execution.
+See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.11/dev/project-configuration.html#adding-connector-and-library-dependencies).
+
+## Installing HBase
+
+Follow the instructions from the [HBase Quick Start Guide](http://hbase.apache.org/book.html#quickstart).
+
+## HBase Configuration
+
+Connecting to HBase always requires a `Configuration` instance. If there is an HBase gateway on the same host as the Flink gateway where the application is started, this can be obtained by invoking `HBaseConfigurationUtil.createHBaseConf()` as in the examples below. If that's not the case a configuration should be provided where the proper core-site, hdfs-site, and hbase-site are added as resources.
+
+## DataStream API
+
+### Reading tables into a DataStreams
+
+To convert an HBase Table into a DataStream one must create an `HBaseTableSource` instance, then either convert it to a `DataStream` of `Row` objects with a built in function, or use the Table API and have a more flexible way to have a stream:
+
+```java
+StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
+StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, tableSettings);
+
+HBaseTableSource hBaseSource = new HBaseTableSource(HBaseConfigurationUtil.createHBaseConf(), "t1");
+hBaseSource.setRowKey("rowkey", byte[].class);
+hBaseSource.addColumn("f1", "str", byte[].class);
+
+// Direct conversion to DataStream<Row>
+DataStream<Row> rowStream = hBaseSource.getDataStream(env);
+
+// Table API
+((TableEnvironmentInternal) tableEnv).registerTableSourceInternal("t1", hBaseSource);
+Table table = tableEnv.sqlQuery("SELECT t.rowkey, t.f1.str FROM t1 t");
+DataStream<Tuple2<byte[], byte[]>> resultStream = tableEnv.toAppendStream(table, TypeInformation.of(new TypeHint<Tuple2<byte[], byte[]>>(){}));
+```
+
+### Writing into HBase tables from DataStreams
+There are two ways to write data to an HBase table from a `DataStream`:
+- Instantiate an `HBaseSinkFunction`, and provide one's own `HBaseMutationConverter` implementation that can create mutations from any data received.
+
+```java
+DataStream<Tuple2<byte[], byte[]>> dataStream = ...
+
+HBaseMutationConverter<Tuple2<byte[], byte[]>> mutationConverter = new HBaseMutationConverter<Tuple2<byte[], byte[]>>() {
+	private static final long serialVersionUID = 1L;
+
+	@Override
+	public void open() {
+	}
+
+	@Override
+	public Mutation convertToMutation(Tuple2<byte[], byte[]> record) {
+		Put put = new Put(record.f0);
+		put.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("str"), record.f1);
+		return put;
+	}
+};
+
+HBaseSinkFunction<Tuple2<byte[], byte[]>> hBaseSink = new HBaseSinkFunction<Tuple2<byte[], byte[]>>(
+		"t2", HBaseConfigurationUtil.createHBaseConf(), mutationConverter, 10000, 2, 1000);
+dataStream.addSink(hBaseSink);
+```
+
+- Use the built in `HBaseDynamicTableSink` or `HBaseUpsertTableSink` classes which convert `RowData` or `Tuple2<Boolen, Row>` objects into a mutation each based on an `HBaseTableSchema` provided to them.
+
+```java
+DataStream<<Tuple2<Boolean, Row>> dataStream = ...
+
+HBaseTableSchema schema = new HBaseTableSchema();
+schema.setRowKey("rowkey", byte[].class);
+schema.addColumn("f1", "str", byte[].class);
+
+HBaseUpsertTableSink sink = new HBaseUpsertTableSink("t3", schema, HBaseConfigurationUtil.createHBaseConf(),
+		HBaseWriteOptions.builder().setBufferFlushIntervalMillis(1000).build());
+sink.consumeDataStream(dataStream);
+```
+
+## Building the connector
+
+The connector can be easily built by using maven:
+
+```
+cd flink-connectors/flink-connector-hbase
+mvn clean install
+```
\ No newline at end of file
diff --git a/flink-connectors/flink-connector-hbase/pom.xml b/flink-connectors/flink-connector-hbase-2.2/pom.xml
similarity index 75%
copy from flink-connectors/flink-connector-hbase/pom.xml
copy to flink-connectors/flink-connector-hbase-2.2/pom.xml
index 39f81e8..5975f05 100644
--- a/flink-connectors/flink-connector-hbase/pom.xml
+++ b/flink-connectors/flink-connector-hbase-2.2/pom.xml
@@ -29,12 +29,13 @@ under the License.
 		<relativePath>..</relativePath>
 	</parent>
 
-	<artifactId>flink-connector-hbase_${scala.binary.version}</artifactId>
-	<name>Flink : Connectors : HBase</name>
+	<artifactId>flink-connector-hbase-2.2_${scala.binary.version}</artifactId>
+	<name>Flink : Connectors : HBase 2.2</name>
 	<packaging>jar</packaging>
 
 	<properties>
-		<hbase.version>1.4.3</hbase.version>
+		<hbase.version>2.2.3</hbase.version>
+		<hbase.guava.version>28.1-jre</hbase.guava.version>
 	</properties>
 
 	<build>
@@ -54,7 +55,17 @@ under the License.
 
 	<dependencies>
 
-		<!-- core dependencies -->
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-connector-hbase-base_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<exclusions>
+				<exclusion>
+					<groupId>org.apache.hbase</groupId>
+					<artifactId>hbase-server</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
 
 		<dependency>
 			<groupId>org.apache.flink</groupId>
@@ -113,11 +124,9 @@ under the License.
 			<scope>provided</scope>
 		</dependency>
 
-		<!-- HBase server needed for TableOutputFormat -->
-		<!-- TODO implement bulk output format for HBase -->
 		<dependency>
 			<groupId>org.apache.hbase</groupId>
-			<artifactId>hbase-server</artifactId>
+			<artifactId>hbase-client</artifactId>
 			<version>${hbase.version}</version>
 			<exclusions>
 				<!-- Remove unneeded dependency, which is conflicting with our jetty-util version. -->
@@ -155,28 +164,28 @@ under the License.
 					<artifactId>jersey-core</artifactId>
 				</exclusion>
 				<exclusion>
-					<groupId>com.sun.jersey</groupId>
-					<artifactId>jersey-server</artifactId>
+					<groupId>org.apache.hadoop</groupId>
+					<artifactId>hadoop-common</artifactId>
 				</exclusion>
 				<exclusion>
-					<groupId>tomcat</groupId>
-					<artifactId>jasper-compiler</artifactId>
+					<groupId>org.apache.hadoop</groupId>
+					<artifactId>hadoop-auth</artifactId>
 				</exclusion>
 				<exclusion>
-					<groupId>tomcat</groupId>
-					<artifactId>jasper-runtime</artifactId>
+					<groupId>org.apache.hadoop</groupId>
+					<artifactId>hadoop-annotations</artifactId>
 				</exclusion>
 				<exclusion>
-					<groupId>org.jruby.jcodings</groupId>
-					<artifactId>jcodings</artifactId>
+					<groupId>org.apache.hadoop</groupId>
+					<artifactId>hadoop-mapreduce-client-core</artifactId>
 				</exclusion>
 				<exclusion>
-					<groupId>org.jruby.joni</groupId>
-					<artifactId>joni</artifactId>
+					<groupId>org.apache.hadoop</groupId>
+					<artifactId>hadoop-client</artifactId>
 				</exclusion>
 				<exclusion>
-					<groupId>org.jamon</groupId>
-					<artifactId>jamon-runtime</artifactId>
+					<groupId>org.apache.hadoop</groupId>
+					<artifactId>hadoop-hdfs</artifactId>
 				</exclusion>
 				<exclusion>
 					<groupId>log4j</groupId>
@@ -207,6 +216,29 @@ under the License.
 
 		<dependency>
 			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-connector-hbase-base_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<exclusions>
+				<!-- exclude HBase dependencies -->
+				<exclusion>
+					<groupId>org.apache.hbase</groupId>
+					<artifactId>hbase-server</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>org.apache.hbase</groupId>
+					<artifactId>hbase-hadoop-compat</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>org.apache.hbase</groupId>
+					<artifactId>hbase-hadoop2-compat</artifactId>
+				</exclusion>
+			</exclusions>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+
+		<dependency>
+			<groupId>org.apache.flink</groupId>
 			<artifactId>flink-clients_${scala.binary.version}</artifactId>
 			<version>${project.version}</version>
 			<scope>test</scope>
@@ -224,8 +256,46 @@ under the License.
 				</exclusion>
 			</exclusions>
 		</dependency>
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-test-utils_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-table-common</artifactId>
+			<version>${project.version}</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+		</dependency>
 
-		<!-- Test dependencies are only available for Hadoop-2. -->
 		<dependency>
 			<groupId>org.apache.hbase</groupId>
 			<artifactId>hbase-server</artifactId>
@@ -245,6 +315,59 @@ under the License.
 		</dependency>
 
 		<dependency>
+			<groupId>org.apache.hbase</groupId>
+			<artifactId>hbase-zookeeper</artifactId>
+			<version>${hbase.version}</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+			<exclusions>
+				<exclusion>
+					<groupId>log4j</groupId>
+					<artifactId>log4j</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>org.slf4j</groupId>
+					<artifactId>slf4j-log4j12</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+
+		<dependency>
+			<groupId>org.apache.hbase</groupId>
+			<artifactId>hbase-common</artifactId>
+			<version>${hbase.version}</version>
+			<type>test-jar</type>
+			<scope>test</scope>
+			<exclusions>
+				<exclusion>
+					<groupId>log4j</groupId>
+					<artifactId>log4j</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>org.slf4j</groupId>
+					<artifactId>slf4j-log4j12</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+
+		<dependency>
+			<groupId>org.apache.hbase</groupId>
+			<artifactId>hbase-mapreduce</artifactId>
+			<version>${hbase.version}</version>
+			<scope>test</scope>
+			<exclusions>
+				<exclusion>
+					<groupId>log4j</groupId>
+					<artifactId>log4j</artifactId>
+				</exclusion>
+				<exclusion>
+					<groupId>org.slf4j</groupId>
+					<artifactId>slf4j-log4j12</artifactId>
+				</exclusion>
+			</exclusions>
+		</dependency>
+
+		<dependency>
 			<groupId>org.apache.hadoop</groupId>
 			<artifactId>hadoop-minicluster</artifactId>
 			<version>${hadoop.version}</version>
@@ -307,73 +430,10 @@ under the License.
 				</exclusion>
 			</exclusions>
 		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-test-utils_${scala.binary.version}</artifactId>
-			<version>${project.version}</version>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-table-common</artifactId>
-			<version>${project.version}</version>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
-			<version>${project.version}</version>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-table-planner_${scala.binary.version}</artifactId>
-			<version>${project.version}</version>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
-			<version>${project.version}</version>
-			<scope>test</scope>
-		</dependency>
-		<dependency>
-			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId>
-			<version>${project.version}</version>
-			<type>test-jar</type>
-			<scope>test</scope>
-		</dependency>
 	</dependencies>
 
 	<profiles>
 		<profile>
-			<id>cdh5.1.3</id>
-			<activation>
-				<property>
-					<name>cdh5.1.3</name>
-				</property>
-			</activation>
-			<properties>
-				<hbase.version>0.98.1-cdh5.1.3</hbase.version>
-				<hadoop.version>2.3.0-cdh5.1.3</hadoop.version>
-				<!-- Cloudera use different versions for hadoop core and commons-->
-				<!-- This profile could be removed if Cloudera fix this mismatch! -->
-				<hadoop.core.version>2.3.0-mr1-cdh5.1.3</hadoop.core.version>
-			</properties>
-			<dependencyManagement>
-				<dependencies>
-					<dependency>
-						<groupId>org.apache.hadoop</groupId>
-						<artifactId>hadoop-core</artifactId>
-						<version>${hadoop.core.version}</version>
-					</dependency>
-				</dependencies>
-			</dependencyManagement>
-		</profile>
-		<profile>
 			<id>java11</id>
 			<activation>
 				<jdk>11</jdk>
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactory.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/HBase2DynamicTableFactory.java
similarity index 89%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactory.java
rename to flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/HBase2DynamicTableFactory.java
index 6e58aba..09b0d05 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactory.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/HBase2DynamicTableFactory.java
@@ -16,17 +16,16 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase2;
 
 import org.apache.flink.configuration.ConfigOption;
 import org.apache.flink.configuration.ConfigOptions;
 import org.apache.flink.configuration.MemorySize;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
-import org.apache.flink.connector.hbase.sink.HBaseDynamicTableSink;
-import org.apache.flink.connector.hbase.source.HBaseDynamicTableSource;
 import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink;
+import org.apache.flink.connector.hbase2.source.HBaseDynamicTableSource;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.connector.sink.DynamicTableSink;
 import org.apache.flink.table.connector.source.DynamicTableSource;
@@ -46,9 +45,9 @@ import static org.apache.flink.table.factories.FactoryUtil.createTableFactoryHel
 /**
  * HBase connector factory.
  */
-public class HBaseDynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory {
+public class HBase2DynamicTableFactory implements DynamicTableSourceFactory, DynamicTableSinkFactory {
 
-	private static final String IDENTIFIER = "hbase-1.4";
+	private static final String IDENTIFIER = "hbase-2.2";
 
 	private static final ConfigOption<String> TABLE_NAME = ConfigOptions
 		.key("table-name")
@@ -108,15 +107,12 @@ public class HBaseDynamicTableFactory implements DynamicTableSourceFactory, Dyna
 		validatePrimaryKey(tableSchema);
 
 		String hTableName = helper.getOptions().get(TABLE_NAME);
-		// create default configuration from current runtime env (`hbase-site.xml` in classpath) first,
-		Configuration hbaseClientConf = HBaseConfigurationUtil.getHBaseConfiguration();
-		hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, helper.getOptions().get(ZOOKEEPER_QUORUM));
-		hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, helper.getOptions().get(ZOOKEEPER_ZNODE_PARENT));
+
 		String nullStringLiteral = helper.getOptions().get(NULL_STRING_LITERAL);
 		HBaseTableSchema hbaseSchema = HBaseTableSchema.fromTableSchema(tableSchema);
 
 		return new HBaseDynamicTableSource(
-			hbaseClientConf,
+			getHbaseConf(helper),
 			hTableName,
 			hbaseSchema,
 			nullStringLiteral);
@@ -129,10 +125,7 @@ public class HBaseDynamicTableFactory implements DynamicTableSourceFactory, Dyna
 		TableSchema tableSchema = context.getCatalogTable().getSchema();
 		validatePrimaryKey(tableSchema);
 
-		HBaseOptions.Builder hbaseOptionsBuilder = HBaseOptions.builder();
-		hbaseOptionsBuilder.setTableName(helper.getOptions().get(TABLE_NAME));
-		hbaseOptionsBuilder.setZkQuorum(helper.getOptions().get(ZOOKEEPER_QUORUM));
-		hbaseOptionsBuilder.setZkNodeParent(helper.getOptions().get(ZOOKEEPER_ZNODE_PARENT));
+		String hTableName = helper.getOptions().get(TABLE_NAME);
 
 		HBaseWriteOptions.Builder writeBuilder = HBaseWriteOptions.builder();
 		writeBuilder.setBufferFlushMaxSizeInBytes(helper.getOptions().get(SINK_BUFFER_FLUSH_MAX_SIZE).getBytes());
@@ -142,8 +135,9 @@ public class HBaseDynamicTableFactory implements DynamicTableSourceFactory, Dyna
 		HBaseTableSchema hbaseSchema = HBaseTableSchema.fromTableSchema(tableSchema);
 
 		return new HBaseDynamicTableSink(
+			hTableName,
 			hbaseSchema,
-			hbaseOptionsBuilder.build(),
+			getHbaseConf(helper),
 			writeBuilder.build(),
 			nullStringLiteral);
 	}
@@ -157,7 +151,6 @@ public class HBaseDynamicTableFactory implements DynamicTableSourceFactory, Dyna
 	public Set<ConfigOption<?>> requiredOptions() {
 		Set<ConfigOption<?>> set = new HashSet<>();
 		set.add(TABLE_NAME);
-		set.add(ZOOKEEPER_QUORUM);
 		return set;
 	}
 
@@ -165,6 +158,7 @@ public class HBaseDynamicTableFactory implements DynamicTableSourceFactory, Dyna
 	public Set<ConfigOption<?>> optionalOptions() {
 		Set<ConfigOption<?>> set = new HashSet<>();
 		set.add(ZOOKEEPER_ZNODE_PARENT);
+		set.add(ZOOKEEPER_QUORUM);
 		set.add(NULL_STRING_LITERAL);
 		set.add(SINK_BUFFER_FLUSH_MAX_SIZE);
 		set.add(SINK_BUFFER_FLUSH_MAX_ROWS);
@@ -202,4 +196,12 @@ public class HBaseDynamicTableFactory implements DynamicTableSourceFactory, Dyna
 			}
 		});
 	}
+
+	private static Configuration getHbaseConf(TableFactoryHelper helper)  {
+		Configuration hbaseClientConf = HBaseConfigurationUtil.createHBaseConf();
+		hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, helper.getOptions().get(ZOOKEEPER_QUORUM));
+		hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, helper.getOptions().get(ZOOKEEPER_ZNODE_PARENT));
+
+		return hbaseClientConf;
+	}
 }
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseTableFactory.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/HBase2TableFactory.java
similarity index 79%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseTableFactory.java
rename to flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/HBase2TableFactory.java
index ca3e1e5..895e6325 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/HBaseTableFactory.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/HBase2TableFactory.java
@@ -16,21 +16,19 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase2;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
-import org.apache.flink.connector.hbase.sink.HBaseUpsertTableSink;
-import org.apache.flink.connector.hbase.source.HBaseTableSource;
 import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.connector.hbase2.sink.HBaseUpsertTableSink;
+import org.apache.flink.connector.hbase2.source.HBaseTableSource;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.DescriptorProperties;
-import org.apache.flink.table.descriptors.HBaseValidator;
 import org.apache.flink.table.factories.StreamTableSinkFactory;
 import org.apache.flink.table.factories.StreamTableSourceFactory;
 import org.apache.flink.table.sinks.StreamTableSink;
@@ -52,6 +50,13 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import static org.apache.flink.connector.hbase2.HBaseValidator.CONNECTOR_TABLE_NAME;
+import static org.apache.flink.connector.hbase2.HBaseValidator.CONNECTOR_TYPE_VALUE_HBASE;
+import static org.apache.flink.connector.hbase2.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_INTERVAL;
+import static org.apache.flink.connector.hbase2.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS;
+import static org.apache.flink.connector.hbase2.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE;
+import static org.apache.flink.connector.hbase2.HBaseValidator.CONNECTOR_ZK_NODE_PARENT;
+import static org.apache.flink.connector.hbase2.HBaseValidator.CONNECTOR_ZK_QUORUM;
 import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_PROPERTY_VERSION;
 import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_TYPE;
 import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_VERSION;
@@ -60,14 +65,6 @@ import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK;
 import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK_ROWTIME;
 import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK_STRATEGY_DATA_TYPE;
 import static org.apache.flink.table.descriptors.DescriptorProperties.WATERMARK_STRATEGY_EXPR;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_TABLE_NAME;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_TYPE_VALUE_HBASE;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_VERSION_VALUE_143;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_INTERVAL;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_ZK_NODE_PARENT;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_ZK_QUORUM;
 import static org.apache.flink.table.descriptors.Schema.SCHEMA;
 import static org.apache.flink.table.descriptors.Schema.SCHEMA_DATA_TYPE;
 import static org.apache.flink.table.descriptors.Schema.SCHEMA_NAME;
@@ -77,18 +74,12 @@ import static org.apache.flink.table.descriptors.Schema.SCHEMA_TYPE;
  * Factory for creating configured instances of {@link HBaseTableSource} or sink.
  */
 @Internal
-public class HBaseTableFactory implements StreamTableSourceFactory<Row>, StreamTableSinkFactory<Tuple2<Boolean, Row>> {
+public class HBase2TableFactory implements StreamTableSourceFactory<Row>, StreamTableSinkFactory<Tuple2<Boolean, Row>> {
 
 	@Override
 	public StreamTableSource<Row> createStreamTableSource(Map<String, String> properties) {
 		final DescriptorProperties descriptorProperties = getValidatedProperties(properties);
-		// create default configuration from current runtime env (`hbase-site.xml` in classpath) first,
-		Configuration hbaseClientConf = HBaseConfigurationUtil.getHBaseConfiguration();
-		String hbaseZk = descriptorProperties.getString(CONNECTOR_ZK_QUORUM);
-		hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, hbaseZk);
-		descriptorProperties
-			.getOptionalString(CONNECTOR_ZK_NODE_PARENT)
-			.ifPresent(v -> hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, v));
+		Configuration hbaseClientConf = getHConf(descriptorProperties);
 
 		String hTableName = descriptorProperties.getString(CONNECTOR_TABLE_NAME);
 		TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(
@@ -100,12 +91,6 @@ public class HBaseTableFactory implements StreamTableSourceFactory<Row>, StreamT
 	@Override
 	public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
 		final DescriptorProperties descriptorProperties = getValidatedProperties(properties);
-		HBaseOptions.Builder hbaseOptionsBuilder = HBaseOptions.builder();
-		hbaseOptionsBuilder.setZkQuorum(descriptorProperties.getString(CONNECTOR_ZK_QUORUM));
-		hbaseOptionsBuilder.setTableName(descriptorProperties.getString(CONNECTOR_TABLE_NAME));
-		descriptorProperties
-			.getOptionalString(CONNECTOR_ZK_NODE_PARENT)
-			.ifPresent(hbaseOptionsBuilder::setZkNodeParent);
 
 		TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(
 			descriptorProperties.getTableSchema(SCHEMA));
@@ -123,8 +108,9 @@ public class HBaseTableFactory implements StreamTableSourceFactory<Row>, StreamT
 			.ifPresent(v -> writeBuilder.setBufferFlushIntervalMillis(v.toMillis()));
 
 		return new HBaseUpsertTableSink(
+			descriptorProperties.getString(CONNECTOR_TABLE_NAME),
 			hbaseSchema,
-			hbaseOptionsBuilder.build(),
+			getHConf(descriptorProperties),
 			writeBuilder.build()
 		);
 	}
@@ -171,7 +157,6 @@ public class HBaseTableFactory implements StreamTableSourceFactory<Row>, StreamT
 	public Map<String, String> requiredContext() {
 		Map<String, String> context = new HashMap<>();
 		context.put(CONNECTOR_TYPE, CONNECTOR_TYPE_VALUE_HBASE); // hbase
-		context.put(CONNECTOR_VERSION, hbaseVersion()); // version
 		context.put(CONNECTOR_PROPERTY_VERSION, "1"); // backwards compatibility
 		return context;
 	}
@@ -180,6 +165,7 @@ public class HBaseTableFactory implements StreamTableSourceFactory<Row>, StreamT
 	public List<String> supportedProperties() {
 		List<String> properties = new ArrayList<>();
 
+		properties.add(CONNECTOR_VERSION);
 		properties.add(CONNECTOR_TABLE_NAME);
 		properties.add(CONNECTOR_ZK_QUORUM);
 		properties.add(CONNECTOR_ZK_NODE_PARENT);
@@ -206,7 +192,14 @@ public class HBaseTableFactory implements StreamTableSourceFactory<Row>, StreamT
 		return properties;
 	}
 
-	private String hbaseVersion() {
-		return CONNECTOR_VERSION_VALUE_143;
+	private Configuration getHConf(DescriptorProperties descriptorProperties)  {
+		Configuration hbaseClientConf = HBaseConfigurationUtil.createHBaseConf();
+		descriptorProperties.getOptionalString(CONNECTOR_ZK_QUORUM)
+			.ifPresent(zkQ -> hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, zkQ));
+
+		descriptorProperties
+			.getOptionalString(CONNECTOR_ZK_NODE_PARENT)
+			.ifPresent(v -> hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, v));
+		return hbaseClientConf;
 	}
 }
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/HBaseValidator.java
similarity index 56%
copy from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java
copy to flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/HBaseValidator.java
index 181e10f..58648f0 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/HBaseValidator.java
@@ -16,31 +16,31 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.sink;
+package org.apache.flink.connector.hbase2;
 
 import org.apache.flink.annotation.Internal;
-
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-
-import java.io.Serializable;
+import org.apache.flink.table.descriptors.AbstractHBaseValidator;
 
 /**
- * A converter used to converts the input record into HBase {@link Mutation}.
- * @param <T> type of input record.
+ * The validator for HBase.
+ * More features to be supported, e.g., batch read/write, async api(support from hbase version 2.0.0), Caching for LookupFunction.
  */
 @Internal
-public interface HBaseMutationConverter<T> extends Serializable {
+public class HBaseValidator extends AbstractHBaseValidator {
+	public static final String CONNECTOR_VERSION_VALUE_223 = "2.2.3";
+
+	@Override
+	protected boolean validateZkQuorum() {
+		return true;
+	}
 
-	/**
-	 * Initialization method for the function. It is called once before conversion method.
-	 */
-	void open();
+	@Override
+	protected String getConnectorVersion() {
+		return CONNECTOR_VERSION_VALUE_223;
+	}
 
-	/**
-	 * Converts the input record into HBase {@link Mutation}. A mutation can be a
-	 * {@link Put} or {@link Delete}.
-	 */
-	Mutation convertToMutation(T record);
+	@Override
+	protected boolean zkQuorumIsOptional() {
+		return true;
+	}
 }
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseDynamicTableSink.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/sink/HBaseDynamicTableSink.java
similarity index 78%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseDynamicTableSink.java
rename to flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/sink/HBaseDynamicTableSink.java
index 5d60ae3..30e1b79 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseDynamicTableSink.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/sink/HBaseDynamicTableSink.java
@@ -16,13 +16,13 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.sink;
+package org.apache.flink.connector.hbase2.sink;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
-import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
+import org.apache.flink.connector.hbase.sink.HBaseSinkFunction;
+import org.apache.flink.connector.hbase.sink.RowDataToMutationConverter;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
 import org.apache.flink.table.connector.ChangelogMode;
 import org.apache.flink.table.connector.sink.DynamicTableSink;
@@ -31,7 +31,6 @@ import org.apache.flink.table.data.RowData;
 import org.apache.flink.types.RowKind;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
 
 /**
  * HBase table sink implementation.
@@ -39,30 +38,31 @@ import org.apache.hadoop.hbase.HConstants;
 @Internal
 public class HBaseDynamicTableSink implements DynamicTableSink {
 
+	private final String hbaseTableName;
 	private final HBaseTableSchema hbaseTableSchema;
-	private final HBaseOptions hbaseOptions;
+	private final Configuration hbaseConf;
 	private final HBaseWriteOptions writeOptions;
 	private final String nullStringLiteral;
 
 	public HBaseDynamicTableSink(
+			String hbaseTableName,
 			HBaseTableSchema hbaseTableSchema,
-			HBaseOptions hbaseOptions,
+			Configuration hbaseConf,
 			HBaseWriteOptions writeOptions,
 			String nullStringLiteral) {
+
+		this.hbaseTableName = hbaseTableName;
 		this.hbaseTableSchema = hbaseTableSchema;
-		this.hbaseOptions = hbaseOptions;
+		this.hbaseConf = hbaseConf;
 		this.writeOptions = writeOptions;
 		this.nullStringLiteral = nullStringLiteral;
 	}
 
 	@Override
 	public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
-		Configuration hbaseClientConf = HBaseConfigurationUtil.getHBaseConfiguration();
-		hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, hbaseOptions.getZkQuorum());
-		hbaseOptions.getZkNodeParent().ifPresent(v -> hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, v));
 		HBaseSinkFunction<RowData> sinkFunction = new HBaseSinkFunction<>(
-			hbaseOptions.getTableName(),
-			hbaseClientConf,
+			hbaseTableName,
+			hbaseConf,
 			new RowDataToMutationConverter(hbaseTableSchema, nullStringLiteral),
 			writeOptions.getBufferFlushMaxSizeInBytes(),
 			writeOptions.getBufferFlushMaxRows(),
@@ -84,7 +84,7 @@ public class HBaseDynamicTableSink implements DynamicTableSink {
 
 	@Override
 	public DynamicTableSink copy() {
-		return new HBaseDynamicTableSink(hbaseTableSchema, hbaseOptions, writeOptions, nullStringLiteral);
+		return new HBaseDynamicTableSink(hbaseTableName, hbaseTableSchema, hbaseConf, writeOptions, nullStringLiteral);
 	}
 
 	@Override
@@ -100,11 +100,6 @@ public class HBaseDynamicTableSink implements DynamicTableSink {
 	}
 
 	@VisibleForTesting
-	public HBaseOptions getHBaseOptions() {
-		return hbaseOptions;
-	}
-
-	@VisibleForTesting
 	public HBaseWriteOptions getWriteOptions() {
 		return writeOptions;
 	}
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseUpsertTableSink.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/sink/HBaseUpsertTableSink.java
similarity index 83%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseUpsertTableSink.java
rename to flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/sink/HBaseUpsertTableSink.java
index f882833..085868c 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseUpsertTableSink.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/sink/HBaseUpsertTableSink.java
@@ -16,15 +16,15 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.sink;
+package org.apache.flink.connector.hbase2.sink;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
-import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
+import org.apache.flink.connector.hbase.sink.HBaseSinkFunction;
+import org.apache.flink.connector.hbase.sink.LegacyMutationConverter;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.datastream.DataStreamSink;
@@ -36,7 +36,6 @@ import org.apache.flink.table.utils.TableConnectorUtils;
 import org.apache.flink.types.Row;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
 
 import java.util.Arrays;
 
@@ -50,18 +49,21 @@ public class HBaseUpsertTableSink implements UpsertStreamTableSink<Row> {
 
 	private final HBaseTableSchema hbaseTableSchema;
 	private final TableSchema tableSchema;
-	private final HBaseOptions hbaseOptions;
+	private final Configuration hconf;
 	private final HBaseWriteOptions writeOptions;
+	private final String tableName;
 
 	public HBaseUpsertTableSink(
+			String tableName,
 			HBaseTableSchema hbaseTableSchema,
-			HBaseOptions hbaseOptions,
+			Configuration hconf,
 			HBaseWriteOptions writeOptions) {
 		checkArgument(hbaseTableSchema.getRowKeyName().isPresent(), "HBaseUpsertTableSink requires rowkey is set.");
 		this.hbaseTableSchema = hbaseTableSchema;
 		this.tableSchema = hbaseTableSchema.convertsToTableSchema();
-		this.hbaseOptions = hbaseOptions;
+		this.hconf = hconf;
 		this.writeOptions = writeOptions;
+		this.tableName = tableName;
 	}
 
 	@Override
@@ -89,12 +91,9 @@ public class HBaseUpsertTableSink implements UpsertStreamTableSink<Row> {
 
 	@Override
 	public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) {
-		Configuration hbaseClientConf = HBaseConfigurationUtil.getHBaseConfiguration();
-		hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, hbaseOptions.getZkQuorum());
-		hbaseOptions.getZkNodeParent().ifPresent(v -> hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, v));
 		HBaseSinkFunction sinkFunction = new HBaseSinkFunction(
-			hbaseOptions.getTableName(),
-			hbaseClientConf,
+			tableName,
+			hconf,
 			new LegacyMutationConverter(hbaseTableSchema),
 			writeOptions.getBufferFlushMaxSizeInBytes(),
 			writeOptions.getBufferFlushMaxRows(),
@@ -113,7 +112,7 @@ public class HBaseUpsertTableSink implements UpsertStreamTableSink<Row> {
 				"But was: " + Arrays.toString(fieldNames) + " / " + Arrays.toString(fieldTypes));
 		}
 
-		return new HBaseUpsertTableSink(hbaseTableSchema, hbaseOptions, writeOptions);
+		return new HBaseUpsertTableSink(tableName, hbaseTableSchema, hconf, writeOptions);
 	}
 
 	@VisibleForTesting
@@ -122,11 +121,6 @@ public class HBaseUpsertTableSink implements UpsertStreamTableSink<Row> {
 	}
 
 	@VisibleForTesting
-	public HBaseOptions getHBaseOptions() {
-		return hbaseOptions;
-	}
-
-	@VisibleForTesting
 	public HBaseWriteOptions getWriteOptions() {
 		return writeOptions;
 	}
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/AbstractTableInputFormat.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/AbstractTableInputFormat.java
similarity index 93%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/AbstractTableInputFormat.java
rename to flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/AbstractTableInputFormat.java
index 6cd7a4d..24b7f64 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/AbstractTableInputFormat.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/AbstractTableInputFormat.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.source;
+package org.apache.flink.connector.hbase2.source;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
@@ -25,14 +25,18 @@ import org.apache.flink.api.common.io.LocatableInputSplitAssigner;
 import org.apache.flink.api.common.io.RichInputFormat;
 import org.apache.flink.api.common.io.statistics.BaseStatistics;
 import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.hbase.source.TableInputSplit;
 import org.apache.flink.connector.hbase.util.HBaseConfigurationUtil;
 import org.apache.flink.core.io.InputSplitAssigner;
+import org.apache.flink.util.IOUtils;
 
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.slf4j.Logger;
@@ -55,7 +59,8 @@ public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, Tab
 	protected boolean endReached = false;
 
 	protected transient Connection connection = null;
-	protected transient HTable table = null;
+	protected transient Table table = null;
+	protected transient RegionLocator regionLocator = null;
 	protected transient Scan scan = null;
 
 	/** HBase iterator wrapper. */
@@ -139,6 +144,7 @@ public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, Tab
 		scannedRows = 0;
 	}
 
+	@Override
 	public T nextRecord(T reuse) throws IOException {
 		if (resultScanner == null) {
 			throw new IOException("No table result scanner provided!");
@@ -184,14 +190,9 @@ public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, Tab
 	public void close() throws IOException {
 		LOG.info("Closing split (scanned {} rows)", scannedRows);
 		currentRow = null;
-		try {
-			if (resultScanner != null) {
-				resultScanner.close();
-			}
-			closeTable();
-		} finally {
-			resultScanner = null;
-		}
+		IOUtils.closeQuietly(resultScanner);
+		resultScanner = null;
+		closeTable();
 	}
 
 	public void closeTable() {
@@ -219,7 +220,7 @@ public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, Tab
 			initTable();
 
 			// Get the starting and ending row keys for every region in the currently open table
-			final Pair<byte[][], byte[][]> keys = table.getRegionLocator().getStartEndKeys();
+			final Pair<byte[][], byte[][]> keys = regionLocator.getStartEndKeys();
 			if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) {
 				throw new IOException("Expecting at least one region.");
 			}
@@ -232,7 +233,7 @@ public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, Tab
 			for (int i = 0; i < keys.getFirst().length; i++) {
 				final byte[] startKey = keys.getFirst()[i];
 				final byte[] endKey = keys.getSecond()[i];
-				final String regionLocation = table.getRegionLocator().getRegionLocation(startKey, false).getHostnamePort();
+				final String regionLocation = regionLocator.getRegionLocation(startKey, false).getHostnamePort();
 				// Test if the given region is to be included in the InputSplit while splitting the regions of a table
 				if (!includeRegionInScan(startKey, endKey)) {
 					continue;
@@ -249,7 +250,7 @@ public abstract class AbstractTableInputFormat<T> extends RichInputFormat<T, Tab
 					final byte[] splitStop = (scanWithNoUpperBound || Bytes.compareTo(endKey, stopRow) <= 0)
 						&& !isLastRegion ? endKey : stopRow;
 					int id = splits.size();
-					final TableInputSplit split = new TableInputSplit(id, hosts, table.getTableName(), splitStart, splitStop);
+					final TableInputSplit split = new TableInputSplit(id, hosts, table.getName().getName(), splitStart, splitStop);
 					splits.add(split);
 				}
 			}
diff --git a/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseDynamicTableSource.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseDynamicTableSource.java
new file mode 100644
index 0000000..7f2d8dd
--- /dev/null
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseDynamicTableSource.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.hbase2.source;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.io.InputFormat;
+import org.apache.flink.connector.hbase.source.AbstractHBaseDynamicTableSource;
+import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.table.connector.source.DynamicTableSource;
+import org.apache.flink.table.data.RowData;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * HBase table source implementation.
+ */
+@Internal
+public class HBaseDynamicTableSource extends AbstractHBaseDynamicTableSource {
+
+	public HBaseDynamicTableSource(Configuration conf, String tableName, HBaseTableSchema hbaseSchema,
+			String nullStringLiteral) {
+		super(conf, tableName, hbaseSchema, nullStringLiteral);
+	}
+
+	@Override
+	public DynamicTableSource copy() {
+		return new HBaseDynamicTableSource(conf, tableName, hbaseSchema, nullStringLiteral);
+	}
+
+	@Override
+	protected InputFormat<RowData, ?> getInputFormat() {
+		return new HBaseRowDataInputFormat(conf, tableName, hbaseSchema, nullStringLiteral);
+	}
+}
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseInputFormat.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseInputFormat.java
similarity index 75%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseInputFormat.java
rename to flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseInputFormat.java
index 8076e8f..3ca77fa 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseInputFormat.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseInputFormat.java
@@ -16,18 +16,19 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.source;
+package org.apache.flink.connector.hbase2.source;
 
 import org.apache.flink.annotation.Experimental;
 import org.apache.flink.api.common.io.InputFormat;
 import org.apache.flink.api.java.tuple.Tuple;
 
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 
-import java.io.IOException;
-
 /**
  * {@link InputFormat} subclass that wraps the access for HTables.
  */
@@ -49,6 +50,7 @@ public abstract class HBaseInputFormat<T extends Tuple> extends AbstractTableInp
 	 * Returns an instance of Scan that retrieves the required subset of records from the HBase table.
 	 * @return The appropriate instance of Scan for this usecase.
 	 */
+	@Override
 	protected abstract Scan getScanner();
 
 	/**
@@ -56,6 +58,7 @@ public abstract class HBaseInputFormat<T extends Tuple> extends AbstractTableInp
 	 * Per instance of a TableInputFormat derivative only a single tablename is possible.
 	 * @return The name of the table
 	 */
+	@Override
 	protected abstract String getTableName();
 
 	/**
@@ -67,9 +70,9 @@ public abstract class HBaseInputFormat<T extends Tuple> extends AbstractTableInp
 	protected abstract T mapResultToTuple(Result r);
 
 	@Override
-	protected void initTable() throws IOException {
+	protected void initTable() {
 		if (table == null) {
-			table = createTable();
+			createTable();
 		}
 		if (table != null && scan == null) {
 			scan = getScanner();
@@ -79,18 +82,23 @@ public abstract class HBaseInputFormat<T extends Tuple> extends AbstractTableInp
 	/**
 	 * Create an {@link HTable} instance and set it into this format.
 	 */
-	private HTable createTable() {
-		LOG.info("Initializing HBaseConfiguration");
-		org.apache.hadoop.conf.Configuration hConf = getHadoopConfiguration();
-
+	private void createTable() {
 		try {
-			return new HTable(hConf, getTableName());
+			if (connection == null) {
+				this.connection = ConnectionFactory.createConnection(getHadoopConfiguration());
+			}
+			TableName name = TableName.valueOf(getTableName());
+			table = connection.getTable(name);
+			regionLocator = connection.getRegionLocator(name);
+		} catch (TableNotFoundException tnfe) {
+			LOG.error("The table " + table.getName().getNameAsString() + " not found ", tnfe);
+			throw new RuntimeException("HBase table '" + table.getName().getNameAsString() + "' not found.", tnfe);
 		} catch (Exception e) {
-			LOG.error("Error instantiating a new HTable instance", e);
+			throw new RuntimeException("Error connecting to the HBase table", e);
 		}
-		return null;
 	}
 
+	@Override
 	protected T mapResultToOutType(Result r) {
 		return mapResultToTuple(r);
 	}
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataInputFormat.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowDataInputFormat.java
similarity index 88%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataInputFormat.java
rename to flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowDataInputFormat.java
index 43a4fad..506f315 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataInputFormat.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowDataInputFormat.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.source;
+package org.apache.flink.connector.hbase2.source;
 
 import org.apache.flink.api.common.io.InputFormat;
 import org.apache.flink.connector.hbase.util.HBaseSerde;
@@ -26,7 +26,6 @@ import org.apache.flink.table.data.RowData;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.slf4j.Logger;
@@ -84,10 +83,14 @@ public class HBaseRowDataInputFormat extends AbstractTableInputFormat<RowData> {
 		return serde.convertToRow(res);
 	}
 
-	private void connectToTable() throws IOException {
+	private void connectToTable() throws IOException{
 		try {
-			connection = ConnectionFactory.createConnection(getHadoopConfiguration());
-			table = (HTable) connection.getTable(TableName.valueOf(tableName));
+			if (connection == null) {
+				connection = ConnectionFactory.createConnection(getHadoopConfiguration());
+			}
+			TableName name = TableName.valueOf(getTableName());
+			table = connection.getTable(name);
+			regionLocator = connection.getRegionLocator(name);
 		} catch (TableNotFoundException tnfe) {
 			LOG.error("The table " + tableName + " not found ", tnfe);
 			throw new RuntimeException("HBase table '" + tableName + "' not found.", tnfe);
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowInputFormat.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowInputFormat.java
similarity index 91%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowInputFormat.java
rename to flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowInputFormat.java
index 4d5f5b4..aa878a0 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowInputFormat.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseRowInputFormat.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.source;
+package org.apache.flink.connector.hbase2.source;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.api.common.io.InputFormat;
@@ -30,7 +30,6 @@ import org.apache.flink.types.Row;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.slf4j.Logger;
@@ -87,8 +86,12 @@ public class HBaseRowInputFormat extends AbstractTableInputFormat<Row> implement
 
 	private void connectToTable() throws IOException {
 		try {
-			connection = ConnectionFactory.createConnection(getHadoopConfiguration());
-			table = (HTable) connection.getTable(TableName.valueOf(tableName));
+			if (connection == null) {
+				this.connection = ConnectionFactory.createConnection(getHadoopConfiguration());
+			}
+			TableName name = TableName.valueOf(getTableName());
+			table = connection.getTable(name);
+			regionLocator = connection.getRegionLocator(name);
 		} catch (TableNotFoundException tnfe) {
 			LOG.error("The table " + tableName + " not found ", tnfe);
 			throw new RuntimeException("HBase table '" + tableName + "' not found.", tnfe);
diff --git a/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseTableSource.java b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseTableSource.java
new file mode 100644
index 0000000..811897d
--- /dev/null
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/java/org/apache/flink/connector/hbase2/source/HBaseTableSource.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.connector.hbase2.source;
+
+import org.apache.flink.annotation.Internal;
+import org.apache.flink.api.common.io.InputFormat;
+import org.apache.flink.connector.hbase.source.AbstractHBaseTableSource;
+import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.types.Row;
+
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Creates a TableSource to scan an HBase table.
+ *
+ * <p>The table name and required HBase configuration is passed during {@link HBaseTableSource} construction.
+ * Use {@link #addColumn(String, String, Class)} to specify the family, qualifier, and type of columns to scan.
+ *
+ * <p>The TableSource returns {@link Row} with nested Rows for each column family.
+ *
+ * <p>The HBaseTableSource is used as shown in the example below.
+ *
+ * <pre>
+ * {@code
+ * HBaseTableSource hSrc = new HBaseTableSource(conf, "hTable");
+ * hSrc.setRowKey("rowkey", String.class);
+ * hSrc.addColumn("fam1", "col1", byte[].class);
+ * hSrc.addColumn("fam1", "col2", Integer.class);
+ * hSrc.addColumn("fam2", "col1", String.class);
+ *
+ * tableEnv.registerTableSourceInternal("hTable", hSrc);
+ * Table res = tableEnv.sqlQuery(
+ *   "SELECT t.fam2.col1, SUM(t.fam1.col2) FROM hTable AS t " +
+ *   "WHERE t.rowkey LIKE 'flink%' GROUP BY t.fam2.col1");
+ * }
+ * </pre>
+ */
+@Internal
+public class HBaseTableSource extends AbstractHBaseTableSource {
+
+	/**
+	 * The HBase configuration and the name of the table to read.
+	 *
+	 * @param conf      hbase configuration
+	 * @param tableName the tableName
+	 */
+	public HBaseTableSource(Configuration conf, String tableName) {
+		this(conf, tableName, new HBaseTableSchema(), null);
+	}
+
+	public HBaseTableSource(Configuration conf, String tableName, HBaseTableSchema hbaseSchema, int[] projectFields) {
+		super(conf, tableName, hbaseSchema, projectFields);
+	}
+
+	@Override
+	public HBaseTableSource projectFields(int[] fields) {
+		return new HBaseTableSource(conf, tableName, hbaseSchema, fields);
+	}
+
+	@Override
+	public InputFormat<Row, ?> getInputFormat(HBaseTableSchema projectedSchema) {
+		return new HBaseRowInputFormat(conf, tableName, projectedSchema);
+	}
+}
diff --git a/flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory b/flink-connectors/flink-connector-hbase-2.2/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
similarity index 92%
copy from flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
copy to flink-connectors/flink-connector-hbase-2.2/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
index 669af95..904d775 100644
--- a/flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-org.apache.flink.connector.hbase.HBaseDynamicTableFactory
+org.apache.flink.connector.hbase2.HBase2DynamicTableFactory
diff --git a/flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory b/flink-connectors/flink-connector-hbase-2.2/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
similarity index 93%
rename from flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
rename to flink-connectors/flink-connector-hbase-2.2/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
index 669af95..56a7085 100644
--- a/flink-connectors/flink-connector-hbase/src/main/resources/META-INF/services/org.apache.flink.table.factories.Factory
+++ b/flink-connectors/flink-connector-hbase-2.2/src/main/resources/META-INF/services/org.apache.flink.table.factories.TableFactory
@@ -13,4 +13,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-org.apache.flink.connector.hbase.HBaseDynamicTableFactory
+org.apache.flink.connector.hbase2.HBase2TableFactory
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseConnectorITCase.java b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseConnectorITCase.java
similarity index 70%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseConnectorITCase.java
rename to flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseConnectorITCase.java
index a784805..c6fc1ee 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseConnectorITCase.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseConnectorITCase.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase2;
 
 import org.apache.flink.api.common.functions.ReduceFunction;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
@@ -25,14 +25,14 @@ import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
 import org.apache.flink.api.java.tuple.Tuple1;
 import org.apache.flink.api.java.typeutils.RowTypeInfo;
-import org.apache.flink.connector.hbase.source.AbstractTableInputFormat;
-import org.apache.flink.connector.hbase.source.HBaseInputFormat;
-import org.apache.flink.connector.hbase.source.HBaseRowDataInputFormat;
-import org.apache.flink.connector.hbase.source.HBaseRowInputFormat;
-import org.apache.flink.connector.hbase.source.HBaseTableSource;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
-import org.apache.flink.connector.hbase.util.HBaseTestBase;
 import org.apache.flink.connector.hbase.util.PlannerType;
+import org.apache.flink.connector.hbase2.source.AbstractTableInputFormat;
+import org.apache.flink.connector.hbase2.source.HBaseInputFormat;
+import org.apache.flink.connector.hbase2.source.HBaseRowDataInputFormat;
+import org.apache.flink.connector.hbase2.source.HBaseRowInputFormat;
+import org.apache.flink.connector.hbase2.source.HBaseTableSource;
+import org.apache.flink.connector.hbase2.util.HBaseTestBase;
 import org.apache.flink.streaming.api.datastream.DataStream;
 import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
 import org.apache.flink.table.api.DataTypes;
@@ -49,6 +49,8 @@ import org.apache.flink.test.util.TestBaseUtils;
 import org.apache.flink.types.Row;
 import org.apache.flink.util.CollectionUtil;
 
+import org.apache.flink.shaded.guava18.com.google.common.collect.Lists;
+
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
@@ -115,37 +117,37 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 		} else {
 			tEnv.executeSql(
 					"CREATE TABLE hTable (" +
-					" family1 ROW<col1 INT>," +
-					" family2 ROW<col1 STRING, col2 BIGINT>," +
-					" family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>," +
-					" rowkey INT," +
-					" PRIMARY KEY (rowkey) NOT ENFORCED" +
-					") WITH (" +
-					" 'connector' = 'hbase-1.4'," +
-					" 'table-name' = '" + TEST_TABLE_1 + "'," +
-					" 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" +
+							" family1 ROW<col1 INT>," +
+							" family2 ROW<col1 STRING, col2 BIGINT>," +
+							" family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>," +
+							" rowkey INT," +
+							" PRIMARY KEY (rowkey) NOT ENFORCED" +
+							") WITH (" +
+							" 'connector' = 'hbase-2.2'," +
+							" 'table-name' = '" + TEST_TABLE_1 + "'," +
+							" 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" +
 					")");
 		}
 
 		Table table = tEnv.sqlQuery("SELECT " +
-			"  h.family1.col1, " +
-			"  h.family2.col1, " +
-			"  h.family2.col2, " +
-			"  h.family3.col1, " +
-			"  h.family3.col2, " +
-			"  h.family3.col3 " +
-			"FROM hTable AS h");
+				"  h.family1.col1, " +
+				"  h.family2.col1, " +
+				"  h.family2.col2, " +
+				"  h.family3.col1, " +
+				"  h.family3.col2, " +
+				"  h.family3.col3 " +
+				"FROM hTable AS h");
 
 		List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
 		String expected =
-			"10,Hello-1,100,1.01,false,Welt-1\n" +
-				"20,Hello-2,200,2.02,true,Welt-2\n" +
-				"30,Hello-3,300,3.03,false,Welt-3\n" +
-				"40,null,400,4.04,true,Welt-4\n" +
-				"50,Hello-5,500,5.05,false,Welt-5\n" +
-				"60,Hello-6,600,6.06,true,Welt-6\n" +
-				"70,Hello-7,700,7.07,false,Welt-7\n" +
-				"80,null,800,8.08,true,Welt-8\n";
+				"10,Hello-1,100,1.01,false,Welt-1\n" +
+						"20,Hello-2,200,2.02,true,Welt-2\n" +
+						"30,Hello-3,300,3.03,false,Welt-3\n" +
+						"40,null,400,4.04,true,Welt-4\n" +
+						"50,Hello-5,500,5.05,false,Welt-5\n" +
+						"60,Hello-6,600,6.06,true,Welt-6\n" +
+						"70,Hello-7,700,7.07,false,Welt-7\n" +
+						"80,null,800,8.08,true,Welt-8\n";
 
 		TestBaseUtils.compareResultAsText(results, expected);
 	}
@@ -167,35 +169,35 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 		} else {
 			tEnv.executeSql(
 					"CREATE TABLE hTable (" +
-					" family1 ROW<col1 INT>," +
-					" family2 ROW<col1 STRING, col2 BIGINT>," +
-					" family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>," +
-					" rowkey INT," +
-					" PRIMARY KEY (rowkey) NOT ENFORCED" +
-					") WITH (" +
-					" 'connector' = 'hbase-1.4'," +
-					" 'table-name' = '" + TEST_TABLE_1 + "'," +
-					" 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" +
+							" family1 ROW<col1 INT>," +
+							" family2 ROW<col1 STRING, col2 BIGINT>," +
+							" family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>," +
+							" rowkey INT," +
+							" PRIMARY KEY (rowkey) NOT ENFORCED" +
+							") WITH (" +
+							" 'connector' = 'hbase-2.2'," +
+							" 'table-name' = '" + TEST_TABLE_1 + "'," +
+							" 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" +
 					")");
 		}
 
 		Table table = tEnv.sqlQuery("SELECT " +
-			"  h.family1.col1, " +
-			"  h.family3.col1, " +
-			"  h.family3.col2, " +
-			"  h.family3.col3 " +
-			"FROM hTable AS h");
+				"  h.family1.col1, " +
+				"  h.family3.col1, " +
+				"  h.family3.col2, " +
+				"  h.family3.col3 " +
+				"FROM hTable AS h");
 
 		List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
 		String expected =
-			"10,1.01,false,Welt-1\n" +
-				"20,2.02,true,Welt-2\n" +
-				"30,3.03,false,Welt-3\n" +
-				"40,4.04,true,Welt-4\n" +
-				"50,5.05,false,Welt-5\n" +
-				"60,6.06,true,Welt-6\n" +
-				"70,7.07,false,Welt-7\n" +
-				"80,8.08,true,Welt-8\n";
+				"10,1.01,false,Welt-1\n" +
+						"20,2.02,true,Welt-2\n" +
+						"30,3.03,false,Welt-3\n" +
+						"40,4.04,true,Welt-4\n" +
+						"50,5.05,false,Welt-5\n" +
+						"60,6.06,true,Welt-6\n" +
+						"70,7.07,false,Welt-7\n" +
+						"80,8.08,true,Welt-8\n";
 
 		TestBaseUtils.compareResultAsText(results, expected);
 	}
@@ -218,14 +220,14 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 		} else {
 			tEnv.executeSql(
 					"CREATE TABLE hTable (" +
-					" rowkey INT PRIMARY KEY," +
-					" family2 ROW<col1 STRING, col2 BIGINT>," +
-					" family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>," +
-					" family1 ROW<col1 INT>" +
-					") WITH (" +
-					" 'connector' = 'hbase-1.4'," +
-					" 'table-name' = '" + TEST_TABLE_1 + "'," +
-					" 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" +
+							" rowkey INT PRIMARY KEY," +
+							" family2 ROW<col1 STRING, col2 BIGINT>," +
+							" family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>," +
+							" family1 ROW<col1 INT>" +
+							") WITH (" +
+							" 'connector' = 'hbase-2.2'," +
+							" 'table-name' = '" + TEST_TABLE_1 + "'," +
+							" 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" +
 					")");
 		}
 
@@ -233,45 +235,44 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 
 		List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
 		String expected =
-			"1,Hello-1,100,1.01,false,Welt-1,10\n" +
-				"2,Hello-2,200,2.02,true,Welt-2,20\n" +
-				"3,Hello-3,300,3.03,false,Welt-3,30\n" +
-				"4,null,400,4.04,true,Welt-4,40\n" +
-				"5,Hello-5,500,5.05,false,Welt-5,50\n" +
-				"6,Hello-6,600,6.06,true,Welt-6,60\n" +
-				"7,Hello-7,700,7.07,false,Welt-7,70\n" +
-				"8,null,800,8.08,true,Welt-8,80\n";
+				"1,Hello-1,100,1.01,false,Welt-1,10\n" +
+						"2,Hello-2,200,2.02,true,Welt-2,20\n" +
+						"3,Hello-3,300,3.03,false,Welt-3,30\n" +
+						"4,null,400,4.04,true,Welt-4,40\n" +
+						"5,Hello-5,500,5.05,false,Welt-5,50\n" +
+						"6,Hello-6,600,6.06,true,Welt-6,60\n" +
+						"7,Hello-7,700,7.07,false,Welt-7,70\n" +
+						"8,null,800,8.08,true,Welt-8,80\n";
 
 		TestBaseUtils.compareResultAsText(results, expected);
 	}
 
-	@Test
 	public void testTableSourceWithTableAPI() throws Exception {
 		StreamExecutionEnvironment execEnv = StreamExecutionEnvironment.getExecutionEnvironment();
 		StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
 		tEnv.connect(new HBase()
-			.version("1.4.3")
-			.tableName(TEST_TABLE_1)
-			.zookeeperQuorum(getZookeeperQuorum()))
-			.withSchema(new Schema()
+				.version("2.2.3")
+				.tableName(TEST_TABLE_1)
+				.zookeeperQuorum(getZookeeperQuorum()))
+		.withSchema(new Schema()
 				.field("rowkey", DataTypes.INT())
 				.field("family2", DataTypes.ROW(DataTypes.FIELD("col1", DataTypes.STRING()), DataTypes.FIELD("col2", DataTypes.BIGINT())))
 				.field("family3", DataTypes.ROW(DataTypes.FIELD("col1", DataTypes.DOUBLE()), DataTypes.FIELD("col2", DataTypes.BOOLEAN()), DataTypes.FIELD("col3", DataTypes.STRING())))
 				.field("family1", DataTypes.ROW(DataTypes.FIELD("col1", DataTypes.INT()))))
-			.createTemporaryTable("hTable");
+		.createTemporaryTable("hTable");
 		Table table = tEnv.sqlQuery("SELECT * FROM hTable AS h");
 		List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
-		String expected =
-			"1,Hello-1,100,1.01,false,Welt-1,10\n" +
-				"2,Hello-2,200,2.02,true,Welt-2,20\n" +
-				"3,Hello-3,300,3.03,false,Welt-3,30\n" +
-				"4,null,400,4.04,true,Welt-4,40\n" +
-				"5,Hello-5,500,5.05,false,Welt-5,50\n" +
-				"6,Hello-6,600,6.06,true,Welt-6,60\n" +
-				"7,Hello-7,700,7.07,false,Welt-7,70\n" +
-				"8,null,800,8.08,true,Welt-8,80\n";
-
-		TestBaseUtils.compareResultAsText(results, expected);
+			String expected =
+					"1,Hello-1,100,1.01,false,Welt-1,10\n" +
+							"2,Hello-2,200,2.02,true,Welt-2,20\n" +
+							"3,Hello-3,300,3.03,false,Welt-3,30\n" +
+							"4,null,400,4.04,true,Welt-4,40\n" +
+							"5,Hello-5,500,5.05,false,Welt-5,50\n" +
+							"6,Hello-6,600,6.06,true,Welt-6,60\n" +
+							"7,Hello-7,700,7.07,false,Welt-7,70\n" +
+							"8,null,800,8.08,true,Welt-8,80\n";
+
+			TestBaseUtils.compareResultAsText(results, expected);
 	}
 
 	@Test
@@ -288,34 +289,34 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 		} else {
 			tEnv.executeSql(
 					"CREATE TABLE hTable (" +
-					" family2 ROW<col1 BYTES, col2 BYTES>," +
-					" rowkey INT" + // no primary key syntax
-					") WITH (" +
-					" 'connector' = 'hbase-1.4'," +
-					" 'table-name' = '" + TEST_TABLE_1 + "'," +
-					" 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" +
+							" family2 ROW<col1 BYTES, col2 BYTES>," +
+							" rowkey INT" + // no primary key syntax
+							") WITH (" +
+							" 'connector' = 'hbase-2.2'," +
+							" 'table-name' = '" + TEST_TABLE_1 + "'," +
+							" 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" +
 					")");
 		}
 		tEnv.registerFunction("toUTF8", new ToUTF8());
 		tEnv.registerFunction("toLong", new ToLong());
 
 		Table table = tEnv.sqlQuery(
-			"SELECT " +
-				"  toUTF8(h.family2.col1), " +
-				"  toLong(h.family2.col2) " +
-				"FROM hTable AS h"
-		);
+				"SELECT " +
+						"  toUTF8(h.family2.col1), " +
+						"  toLong(h.family2.col2) " +
+						"FROM hTable AS h"
+				);
 
 		List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
 		String expected =
-			"Hello-1,100\n" +
-				"Hello-2,200\n" +
-				"Hello-3,300\n" +
-				"null,400\n" +
-				"Hello-5,500\n" +
-				"Hello-6,600\n" +
-				"Hello-7,700\n" +
-				"null,800\n";
+				"Hello-1,100\n" +
+						"Hello-2,200\n" +
+						"Hello-3,300\n" +
+						"null,400\n" +
+						"Hello-5,500\n" +
+						"Hello-6,600\n" +
+						"Hello-7,700\n" +
+						"null,800\n";
 
 		TestBaseUtils.compareResultAsText(results, expected);
 	}
@@ -325,8 +326,8 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
 
 		DataSet<Tuple1<Integer>> result = env
-			.createInput(new InputFormatForTestTable(getConf()))
-			.reduce((ReduceFunction<Tuple1<Integer>>) (v1, v2) -> Tuple1.of(v1.f0 + v2.f0));
+				.createInput(new InputFormatForTestTable(getConf()))
+				.reduce((ReduceFunction<Tuple1<Integer>>) (v1, v2) -> Tuple1.of(v1.f0 + v2.f0));
 
 		List<Tuple1<Integer>> resultSet = result.collect();
 
@@ -347,12 +348,13 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 		tEnv.executeSql(table2DDL);
 
 		String query = "INSERT INTO " + TEST_TABLE_2 + " SELECT" +
-			" rowkey," +
-			" family1," +
-			" family2," +
-			" family3" +
-			" FROM " + TEST_TABLE_1;
+				" rowkey," +
+				" family1," +
+				" family2," +
+				" family3" +
+				" FROM " + TEST_TABLE_1;
 
+		// wait to finish
 		tEnv.executeSql(query).await();
 
 		// start a batch scan job to verify contents in HBase table
@@ -360,26 +362,26 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 		batchEnv.executeSql(table2DDL);
 
 		Table table = batchEnv.sqlQuery(
-			"SELECT " +
-				"  h.rowkey, " +
-				"  h.family1.col1, " +
-				"  h.family2.col1, " +
-				"  h.family2.col2, " +
-				"  h.family3.col1, " +
-				"  h.family3.col2, " +
-				"  h.family3.col3 " +
-				"FROM " + TEST_TABLE_2 + " AS h"
-		);
+				"SELECT " +
+						"  h.rowkey, " +
+						"  h.family1.col1, " +
+						"  h.family2.col1, " +
+						"  h.family2.col2, " +
+						"  h.family3.col1, " +
+						"  h.family3.col2, " +
+						"  h.family3.col3 " +
+						"FROM " + TEST_TABLE_2 + " AS h"
+				);
 		List<Row> results = CollectionUtil.iteratorToList(table.execute().collect());
 		String expected =
 				"1,10,Hello-1,100,1.01,false,Welt-1\n" +
-				"2,20,Hello-2,200,2.02,true,Welt-2\n" +
-				"3,30,Hello-3,300,3.03,false,Welt-3\n" +
-				"4,40,null,400,4.04,true,Welt-4\n" +
-				"5,50,Hello-5,500,5.05,false,Welt-5\n" +
-				"6,60,Hello-6,600,6.06,true,Welt-6\n" +
-				"7,70,Hello-7,700,7.07,false,Welt-7\n" +
-				"8,80,null,800,8.08,true,Welt-8\n";
+						"2,20,Hello-2,200,2.02,true,Welt-2\n" +
+						"3,30,Hello-3,300,3.03,false,Welt-3\n" +
+						"4,40,null,400,4.04,true,Welt-4\n" +
+						"5,50,Hello-5,500,5.05,false,Welt-5\n" +
+						"6,60,Hello-6,600,6.06,true,Welt-6\n" +
+						"7,70,Hello-7,700,7.07,false,Welt-7\n" +
+						"8,80,null,800,8.08,true,Welt-8\n";
 
 		TestBaseUtils.compareResultAsText(results, expected);
 	}
@@ -404,12 +406,13 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 		tEnv.executeSql(table3DDL);
 
 		String insertStatement = "INSERT INTO " + TEST_TABLE_3 +
-			" SELECT rowkey," +
-			" family1," +
-			" family2," +
-			" family3," +
-			" family4" +
-			" from " + TEST_TABLE_1;
+				" SELECT rowkey," +
+				" family1," +
+				" family2," +
+				" family3," +
+				" family4" +
+				" from " + TEST_TABLE_1;
+		// wait to finish
 		tEnv.executeSql(insertStatement).await();
 
 		// start a batch scan job to verify contents in HBase table
@@ -429,10 +432,10 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 				"  h.family4.col4 " +
 				" FROM " + TEST_TABLE_3 + " AS h";
 		Iterator<Row> collected = tEnv.executeSql(query).collect();
-		List<String> result = CollectionUtil.iteratorToList(collected).stream()
-			.map(Row::toString)
-			.sorted()
-			.collect(Collectors.toList());
+		List<String> result = Lists.newArrayList(collected).stream()
+				.map(Row::toString)
+				.sorted()
+				.collect(Collectors.toList());
 
 		List<String> expected = new ArrayList<>();
 		expected.add("1,10,Hello-1,100,1.01,false,Welt-1,2019-08-18T19:00,2019-08-18,19:00,12345678.0001");
@@ -458,17 +461,17 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 		StreamTableEnvironment tEnv = StreamTableEnvironment.create(execEnv, streamSettings);
 
 		tEnv.executeSql(
-			"CREATE TABLE " + TEST_TABLE_1 + " (" +
-				" family1 ROW<col1 INT>," +
-				" family2 ROW<col1 STRING, col2 BIGINT>," +
-				" family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>," +
-				" rowkey INT," +
-				" family4 ROW<col1 TIMESTAMP(3), col2 DATE, col3 TIME(3), col4 DECIMAL(12, 4)>," +
-				" PRIMARY KEY (rowkey) NOT ENFORCED" +
-				") WITH (" +
-				" 'connector' = 'hbase-1.4'," +
-				" 'table-name' = '" + TEST_TABLE_1 + "'," +
-				" 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" +
+				"CREATE TABLE " + TEST_TABLE_1 + " (" +
+						" family1 ROW<col1 INT>," +
+						" family2 ROW<col1 STRING, col2 BIGINT>," +
+						" family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 STRING>," +
+						" rowkey INT," +
+						" family4 ROW<col1 TIMESTAMP(3), col2 DATE, col3 TIME(3), col4 DECIMAL(12, 4)>," +
+						" PRIMARY KEY (rowkey) NOT ENFORCED" +
+						") WITH (" +
+						" 'connector' = 'hbase-2.2'," +
+						" 'table-name' = '" + TEST_TABLE_1 + "'," +
+						" 'zookeeper.quorum' = '" + getZookeeperQuorum() + "'" +
 				")");
 
 		// prepare a source table
@@ -479,24 +482,24 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 
 		// perform a temporal table join query
 		String dimJoinQuery = "SELECT" +
-			" a," +
-			" b," +
-			" h.family1.col1," +
-			" h.family2.col1," +
-			" h.family2.col2," +
-			" h.family3.col1," +
-			" h.family3.col2," +
-			" h.family3.col3," +
-			" h.family4.col1," +
-			" h.family4.col2," +
-			" h.family4.col3," +
-			" h.family4.col4 " +
-			" FROM src JOIN " + TEST_TABLE_1 + " FOR SYSTEM_TIME AS OF src.proc as h ON src.a = h.rowkey";
+				" a," +
+				" b," +
+				" h.family1.col1," +
+				" h.family2.col1," +
+				" h.family2.col2," +
+				" h.family3.col1," +
+				" h.family3.col2," +
+				" h.family3.col3," +
+				" h.family4.col1," +
+				" h.family4.col2," +
+				" h.family4.col3," +
+				" h.family4.col4 " +
+				" FROM src JOIN " + TEST_TABLE_1 + " FOR SYSTEM_TIME AS OF src.proc as h ON src.a = h.rowkey";
 		Iterator<Row> collected = tEnv.executeSql(dimJoinQuery).collect();
-		List<String> result = CollectionUtil.iteratorToList(collected).stream()
-			.map(Row::toString)
-			.sorted()
-			.collect(Collectors.toList());
+		List<String> result = Lists.newArrayList(collected).stream()
+				.map(Row::toString)
+				.sorted()
+				.collect(Collectors.toList());
 
 		List<String> expected = new ArrayList<>();
 		expected.add("1,1,10,Hello-1,100,1.01,false,Welt-1,2019-08-18T19:00,2019-08-18,19:00,12345678.0001");
@@ -532,8 +535,8 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 	// prepare a source collection.
 	private static final List<Row> testData = new ArrayList<>();
 	private static final RowTypeInfo testTypeInfo = new RowTypeInfo(
-		new TypeInformation[]{Types.INT, Types.LONG, Types.STRING},
-		new String[]{"a", "b", "c"});
+			new TypeInformation[]{Types.INT, Types.LONG, Types.STRING},
+			new String[]{"a", "b", "c"});
 
 	static {
 		testData.add(Row.of(1, 1L, "Hi"));
@@ -615,31 +618,31 @@ public class HBaseConnectorITCase extends HBaseTestBase {
 		}
 		if (isLegacyConnector) {
 			return "CREATE TABLE " + tableName + "(\n" +
-				"	rowkey INT,\n" +
-				"   family1 ROW<col1 INT>,\n" +
-				"   family2 ROW<col1 VARCHAR, col2 BIGINT>,\n" +
-				"   family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 VARCHAR>" +
-				family4Statement.toString() +
-				") WITH (\n" +
-				"   'connector.type' = 'hbase',\n" +
-				"   'connector.version' = '1.4.3',\n" +
-				"   'connector.table-name' = '" + tableName + "',\n" +
-				"   'connector.zookeeper.quorum' = '" + getZookeeperQuorum() + "',\n" +
-				"   'connector.zookeeper.znode.parent' = '/hbase' " +
-				")";
+					"	rowkey INT,\n" +
+					"   family1 ROW<col1 INT>,\n" +
+					"   family2 ROW<col1 VARCHAR, col2 BIGINT>,\n" +
+					"   family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 VARCHAR>" +
+					family4Statement.toString() +
+					") WITH (\n" +
+					"   'connector.type' = 'hbase',\n" +
+					"   'connector.version' = '2.2.3',\n" +
+					"   'connector.table-name' = '" + tableName + "',\n" +
+					"   'connector.zookeeper.quorum' = '" + getZookeeperQuorum() + "',\n" +
+					"   'connector.zookeeper.znode.parent' = '/hbase' " +
+					")";
 		} else {
 			return "CREATE TABLE " + tableName + "(\n" +
-				"   rowkey INT," +
-				"   family1 ROW<col1 INT>,\n" +
-				"   family2 ROW<col1 VARCHAR, col2 BIGINT>,\n" +
-				"   family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 VARCHAR>" +
-				family4Statement.toString() +
-				") WITH (\n" +
-				"   'connector' = 'hbase-1.4',\n" +
-				"   'table-name' = '" + tableName + "',\n" +
-				"   'zookeeper.quorum' = '" + getZookeeperQuorum() + "',\n" +
-				"   'zookeeper.znode.parent' = '/hbase' " +
-				")";
+					"   rowkey INT," +
+					"   family1 ROW<col1 INT>,\n" +
+					"   family2 ROW<col1 VARCHAR, col2 BIGINT>,\n" +
+					"   family3 ROW<col1 DOUBLE, col2 BOOLEAN, col3 VARCHAR>" +
+					family4Statement.toString() +
+					") WITH (\n" +
+					"   'connector' = 'hbase-2.2',\n" +
+					"   'table-name' = '" + tableName + "',\n" +
+					"   'zookeeper.quorum' = '" + getZookeeperQuorum() + "',\n" +
+					"   'zookeeper.znode.parent' = '/hbase' " +
+					")";
 		}
 	}
 }
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDescriptorTest.java b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseDescriptorTest.java
similarity index 88%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDescriptorTest.java
rename to flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseDescriptorTest.java
index d715ffb..de5a869 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDescriptorTest.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseDescriptorTest.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase2;
 
 import org.apache.flink.table.api.DataTypes;
 import org.apache.flink.table.api.ValidationException;
@@ -29,7 +29,6 @@ import org.apache.flink.table.descriptors.DescriptorTestBase;
 import org.apache.flink.table.descriptors.DescriptorValidator;
 import org.apache.flink.table.descriptors.FormatDescriptor;
 import org.apache.flink.table.descriptors.HBase;
-import org.apache.flink.table.descriptors.HBaseValidator;
 import org.apache.flink.table.descriptors.Rowtime;
 import org.apache.flink.table.descriptors.Schema;
 import org.apache.flink.table.descriptors.StreamTableDescriptor;
@@ -51,13 +50,13 @@ public class HBaseDescriptorTest extends DescriptorTestBase {
 	@Override
 	protected List<Descriptor> descriptors() {
 		HBase hbaseDesc0 = new HBase()
-			.version("1.4.3")
+			.version("2.2.3")
 			.tableName("testNs:table0")
 			.zookeeperQuorum("localhost:2181,localhost:2182,localhost:2183")
 			.zookeeperNodeParent("/hbase/root-dir");
 
 		HBase hbaseDesc1 = new HBase()
-			.version("1.4.3")
+			.version("2.2.3")
 			.tableName("testNs:table1")
 			.zookeeperQuorum("localhost:2181")
 			.zookeeperNodeParent("/hbase/root")
@@ -71,7 +70,7 @@ public class HBaseDescriptorTest extends DescriptorTestBase {
 	@Override
 	protected List<Map<String, String>> properties() {
 		Map<String, String> prop0 = new HashMap<>();
-		prop0.put("connector.version", "1.4.3");
+		prop0.put("connector.version", "2.2.3");
 		prop0.put("connector.type", "hbase");
 		prop0.put("connector.table-name", "testNs:table0");
 		prop0.put("connector.zookeeper.quorum", "localhost:2181,localhost:2182,localhost:2183");
@@ -79,7 +78,7 @@ public class HBaseDescriptorTest extends DescriptorTestBase {
 		prop0.put("connector.property-version", "1");
 
 		Map<String, String> prop1 = new HashMap<>();
-		prop1.put("connector.version", "1.4.3");
+		prop1.put("connector.version", "2.2.3");
 		prop1.put("connector.type", "hbase");
 		prop1.put("connector.table-name", "testNs:table1");
 		prop1.put("connector.zookeeper.quorum", "localhost:2181");
@@ -101,18 +100,11 @@ public class HBaseDescriptorTest extends DescriptorTestBase {
 	public void testRequiredFields() {
 		HBase hbaseDesc0 = new HBase();
 		HBase hbaseDesc1 = new HBase()
-			.version("1.4.3")
+			.version("2.2.3")
 			.zookeeperQuorum("localhost:2181")
 			.zookeeperNodeParent("/hbase/root"); // no table name
-		HBase hbaseDesc2 = new HBase()
-			.version("1.4.3")
-			.tableName("ns:table")
-			.zookeeperNodeParent("/hbase/root"); // no zookeeper quorum
-		HBase hbaseDesc3 = new HBase()
-			.tableName("ns:table")
-			.zookeeperQuorum("localhost:2181"); // no version
 
-		HBase[] testCases = new HBase[]{hbaseDesc0, hbaseDesc1, hbaseDesc2, hbaseDesc3};
+		HBase[] testCases = new HBase[]{hbaseDesc0, hbaseDesc1};
 		for (int i = 0; i < testCases.length; i++) {
 			HBase hbaseDesc = testCases[i];
 			DescriptorProperties properties = new DescriptorProperties();
@@ -129,7 +121,7 @@ public class HBaseDescriptorTest extends DescriptorTestBase {
 
 	@Test
 	public void testFormatNeed(){
-		String expected = "The connector org.apache.flink.table.descriptors.HBase does not require a format description but org.apache.flink.connector.hbase.HBaseDescriptorTest$1 found.";
+		String expected = "The connector org.apache.flink.table.descriptors.HBase does not require a format description but org.apache.flink.connector.hbase2.HBaseDescriptorTest$1 found.";
 		AtomicReference<CatalogTableImpl> reference = new AtomicReference<>();
 		HBase hBase = new HBase();
 		Registration registration = (path, table) -> reference.set((CatalogTableImpl) table);
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactoryTest.java b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseDynamicTableFactoryTest.java
similarity index 94%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactoryTest.java
rename to flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseDynamicTableFactoryTest.java
index 47775ac..94530b5 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseDynamicTableFactoryTest.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseDynamicTableFactoryTest.java
@@ -16,16 +16,15 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase2;
 
 import org.apache.flink.api.common.typeinfo.Types;
 import org.apache.flink.configuration.Configuration;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
-import org.apache.flink.connector.hbase.sink.HBaseDynamicTableSink;
-import org.apache.flink.connector.hbase.source.HBaseDynamicTableSource;
 import org.apache.flink.connector.hbase.source.HBaseRowDataLookupFunction;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink;
+import org.apache.flink.connector.hbase2.source.HBaseDynamicTableSource;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.catalog.CatalogTableImpl;
 import org.apache.flink.table.catalog.ObjectIdentifier;
@@ -64,7 +63,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 /**
- * Unit test for {@link HBaseDynamicTableFactory}.
+ * Unit test for {@link HBase2DynamicTableFactory}.
  */
 public class HBaseDynamicTableFactoryTest {
 
@@ -172,14 +171,6 @@ public class HBaseDynamicTableFactoryTest {
 			new DataType[]{DECIMAL(10, 3), TIMESTAMP(3), DATE(), TIME()},
 			hbaseSchema.getQualifierDataTypes("f4"));
 
-		HBaseOptions expectedHBaseOptions = HBaseOptions.builder()
-			.setTableName("testHBastTable")
-			.setZkQuorum("localhost:2181")
-			.setZkNodeParent("/flink")
-			.build();
-		HBaseOptions actualHBaseOptions = hbaseSink.getHBaseOptions();
-		assertEquals(expectedHBaseOptions, actualHBaseOptions);
-
 		HBaseWriteOptions expectedWriteOptions = HBaseWriteOptions.builder()
 			.setBufferFlushMaxRows(1000)
 			.setBufferFlushIntervalMillis(1000)
@@ -321,7 +312,7 @@ public class HBaseDynamicTableFactoryTest {
 
 	private Map<String, String> getAllOptions() {
 		Map<String, String> options = new HashMap<>();
-		options.put("connector", "hbase-1.4");
+		options.put("connector", "hbase-2.2");
 		options.put("table-name", "testHBastTable");
 		options.put("zookeeper.quorum", "localhost:2181");
 		options.put("zookeeper.znode.parent", "/flink");
@@ -334,7 +325,7 @@ public class HBaseDynamicTableFactoryTest {
 			ObjectIdentifier.of("default", "default", "t1"),
 			new CatalogTableImpl(schema, options, "mock source"),
 			new Configuration(),
-			HBaseDynamicTableFactory.class.getClassLoader());
+			HBase2DynamicTableFactory.class.getClassLoader());
 	}
 
 	private static DynamicTableSink createTableSink(TableSchema schema, Map<String, String> options) {
@@ -343,7 +334,7 @@ public class HBaseDynamicTableFactoryTest {
 			ObjectIdentifier.of("default", "default", "t1"),
 			new CatalogTableImpl(schema, options, "mock sink"),
 			new Configuration(),
-			HBaseDynamicTableFactory.class.getClassLoader());
+			HBase2DynamicTableFactory.class.getClassLoader());
 	}
 
 }
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTableFactoryTest.java b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseTableFactoryTest.java
similarity index 91%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTableFactoryTest.java
rename to flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseTableFactoryTest.java
index e5c3217..25d3a94 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTableFactoryTest.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseTableFactoryTest.java
@@ -18,16 +18,15 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase2;
 
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.common.typeinfo.Types;
-import org.apache.flink.connector.hbase.options.HBaseOptions;
 import org.apache.flink.connector.hbase.options.HBaseWriteOptions;
-import org.apache.flink.connector.hbase.sink.HBaseUpsertTableSink;
 import org.apache.flink.connector.hbase.source.HBaseLookupFunction;
-import org.apache.flink.connector.hbase.source.HBaseTableSource;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
+import org.apache.flink.connector.hbase2.sink.HBaseUpsertTableSink;
+import org.apache.flink.connector.hbase2.source.HBaseTableSource;
 import org.apache.flink.table.api.DataTypes;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.descriptors.DescriptorProperties;
@@ -63,7 +62,7 @@ public class HBaseTableFactoryTest {
 	private DescriptorProperties createDescriptor(TableSchema tableSchema) {
 		Map<String, String> tableProperties = new HashMap<>();
 		tableProperties.put("connector.type", "hbase");
-		tableProperties.put("connector.version", "1.4.3");
+		tableProperties.put("connector.version", "2.2.3");
 		tableProperties.put("connector.property-version", "1");
 		tableProperties.put("connector.table-name", "testHBastTable");
 		tableProperties.put("connector.zookeeper.quorum", "localhost:2181");
@@ -97,7 +96,7 @@ public class HBaseTableFactoryTest {
 				DataTypes.FIELD(COL4, DataTypes.TIME())))
 			.build();
 		DescriptorProperties descriptorProperties = createDescriptor(schema);
-		TableSource source = TableFactoryService.find(HBaseTableFactory.class,
+		TableSource source = TableFactoryService.find(HBase2TableFactory.class,
 			descriptorProperties.asMap()).createTableSource(descriptorProperties.asMap());
 		Assert.assertTrue(source instanceof HBaseTableSource);
 		TableFunction<Row> tableFunction = ((HBaseTableSource) source).getLookupFunction(new String[]{ROWKEY});
@@ -145,7 +144,7 @@ public class HBaseTableFactoryTest {
 		DescriptorProperties descriptorProperties = createDescriptor(schema);
 
 		TableSink sink = TableFactoryService
-			.find(HBaseTableFactory.class, descriptorProperties.asMap())
+			.find(HBase2TableFactory.class, descriptorProperties.asMap())
 			.createTableSink(descriptorProperties.asMap());
 
 		Assert.assertTrue(sink instanceof HBaseUpsertTableSink);
@@ -167,14 +166,6 @@ public class HBaseTableFactoryTest {
 			new TypeInformation[]{Types.BIG_DEC, Types.SQL_TIMESTAMP, Types.SQL_DATE, Types.SQL_TIME},
 			hbaseSchema.getQualifierTypes("f4"));
 
-		HBaseOptions expectedHBaseOptions = HBaseOptions.builder()
-			.setTableName("testHBastTable")
-			.setZkQuorum("localhost:2181")
-			.setZkNodeParent("/flink")
-			.build();
-		HBaseOptions actualHBaseOptions = ((HBaseUpsertTableSink) sink).getHBaseOptions();
-		Assert.assertEquals(expectedHBaseOptions, actualHBaseOptions);
-
 		HBaseWriteOptions expectedWriteOptions = HBaseWriteOptions.builder()
 			.setBufferFlushMaxRows(1000)
 			.setBufferFlushIntervalMillis(10 * 1000)
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTablePlanTest.java b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseTablePlanTest.java
similarity index 94%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTablePlanTest.java
rename to flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseTablePlanTest.java
index 863c345..0ddbb7e 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/HBaseTablePlanTest.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/HBaseTablePlanTest.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase;
+package org.apache.flink.connector.hbase2;
 
 import org.apache.flink.table.api.TableConfig;
 import org.apache.flink.table.planner.utils.StreamTableTestUtil;
@@ -42,7 +42,7 @@ public class HBaseTablePlanTest extends TableTestBase {
 				" rowkey INT," +
 				" rowkey2 STRING " +
 				") WITH (" +
-				" 'connector' = 'hbase-1.4'," +
+				" 'connector' = 'hbase-2.2'," +
 				" 'table-name' = 'my_table'," +
 				" 'zookeeper.quorum' = 'localhost:2021'" +
 				")");
@@ -57,7 +57,7 @@ public class HBaseTablePlanTest extends TableTestBase {
 				" family1 ROW<col1 INT>," +
 				" family2 ROW<col1 STRING, col2 BIGINT>" +
 				") WITH (" +
-				" 'connector' = 'hbase-1.4'," +
+				" 'connector' = 'hbase-2.2'," +
 				" 'table-name' = 'my_table'," +
 				" 'zookeeper.quorum' = 'localhost:2021'" +
 				")");
@@ -77,7 +77,7 @@ public class HBaseTablePlanTest extends TableTestBase {
 				" rowkey STRING, " +
 				" PRIMARY KEY (family1) NOT ENFORCED " +
 				") WITH (" +
-				" 'connector' = 'hbase-1.4'," +
+				" 'connector' = 'hbase-2.2'," +
 				" 'table-name' = 'my_table'," +
 				" 'zookeeper.quorum' = 'localhost:2021'" +
 				")");
@@ -98,7 +98,7 @@ public class HBaseTablePlanTest extends TableTestBase {
 				" rowkey STRING, " +
 				" PRIMARY KEY (rowkey) NOT ENFORCED " +
 				") WITH (" +
-				" 'connector' = 'hbase-1.4'," +
+				" 'connector' = 'hbase-2.2'," +
 				" 'table-name' = 'my_table'," +
 				" 'zookeeper.quorum' = 'localhost:2021'" +
 				")");
@@ -117,7 +117,7 @@ public class HBaseTablePlanTest extends TableTestBase {
 				" rowkey INT," +
 				" PRIMARY KEY (rowkey) NOT ENFORCED" +
 				") WITH (" +
-				" 'connector' = 'hbase-1.4'," +
+				" 'connector' = 'hbase-2.2'," +
 				" 'table-name' = 'my_table'," +
 				" 'zookeeper.quorum' = 'localhost:2021'" +
 				")");
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseWriteExample.java b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/example/HBaseWriteExample.java
similarity index 96%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseWriteExample.java
rename to flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/example/HBaseWriteExample.java
index 42fd97d..d54a934 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseWriteExample.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/example/HBaseWriteExample.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.example;
+package org.apache.flink.connector.hbase2.example;
 
 import org.apache.flink.api.common.functions.FlatMapFunction;
 import org.apache.flink.api.common.functions.RichMapFunction;
@@ -26,6 +26,7 @@ import org.apache.flink.api.java.hadoop.mapreduce.HadoopOutputFormat;
 import org.apache.flink.api.java.tuple.Tuple2;
 import org.apache.flink.configuration.ConfigConstants;
 import org.apache.flink.configuration.Configuration;
+import org.apache.flink.connector.hbase.example.HBaseFlinkTestConstants;
 import org.apache.flink.util.Collector;
 
 import org.apache.hadoop.hbase.client.Mutation;
@@ -90,8 +91,7 @@ public class HBaseWriteExample {
 			public Tuple2<Text, Mutation> map(Tuple2<String, Integer> t) throws Exception {
 				reuse.f0 = new Text(t.f0);
 				Put put = new Put(t.f0.getBytes(ConfigConstants.DEFAULT_CHARSET));
-				put.add(HBaseFlinkTestConstants.CF_SOME, HBaseFlinkTestConstants.Q_SOME, Bytes.toBytes(t.f1));
-				reuse.f1 = put;
+				put.addColumn(HBaseFlinkTestConstants.CF_SOME, HBaseFlinkTestConstants.Q_SOME, Bytes.toBytes(t.f1));				reuse.f1 = put;
 				return reuse;
 			}
 		}).output(new HadoopOutputFormat<Text, Mutation>(new TableOutputFormat<Text>(), job));
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestBase.java b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/util/HBaseTestBase.java
similarity index 97%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestBase.java
rename to flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/util/HBaseTestBase.java
index c376b95..eb9559c 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestBase.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/util/HBaseTestBase.java
@@ -16,13 +16,14 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.util;
+package org.apache.flink.connector.hbase2.util;
 
+import org.apache.flink.connector.hbase.util.PlannerType;
 import org.apache.flink.table.api.EnvironmentSettings;
 
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -118,7 +119,7 @@ public abstract class HBaseTestBase extends HBaseTestingClusterAutoStarter {
 		createTable(tableName, FAMILIES, SPLIT_KEYS);
 
 		// get the HTable instance
-		HTable table = openTable(tableName);
+		Table table = openTable(tableName);
 		List<Put> puts = new ArrayList<>();
 		// add some data
 		puts.add(putRow(1, 10, "Hello-1", 100L, 1.01, false, "Welt-1",
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestingClusterAutoStarter.java b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/util/HBaseTestingClusterAutoStarter.java
similarity index 60%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestingClusterAutoStarter.java
rename to flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/util/HBaseTestingClusterAutoStarter.java
index 4ba1c17..c6fc111 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseTestingClusterAutoStarter.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/apache/flink/connector/hbase2/util/HBaseTestingClusterAutoStarter.java
@@ -18,9 +18,7 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.util;
-
-import org.apache.flink.test.util.AbstractTestBase;
+package org.apache.flink.connector.hbase2.util;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -33,11 +31,9 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.util.VersionUtil;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Table;
 import org.junit.AfterClass;
-import org.junit.Assume;
 import org.junit.BeforeClass;
 
 import java.io.IOException;
@@ -51,32 +47,17 @@ import static org.junit.Assert.assertTrue;
 /**
  * By using this class as the super class of a set of tests you will have a HBase testing
  * cluster available that is very suitable for writing tests for scanning and filtering against.
- * This is usable by any downstream application because the HBase cluster is 'injected' because
- * a dynamically generated hbase-site.xml is added to the classpath.
- * Because of this classpath manipulation it is not possible to start a second testing cluster in the same JVM.
- * So if you have this you should either put all hbase related tests in a single class or force surefire to
- * setup a new JVM for each testclass.
- * See: http://maven.apache.org/surefire/maven-surefire-plugin/examples/fork-options-and-parallel-execution.html
  */
-//
-// NOTE: The code in this file is based on code from the
-// Apache HBase project, licensed under the Apache License v 2.0
-//
-// https://github.com/apache/hbase/blob/master/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java
-//
-public abstract class HBaseTestingClusterAutoStarter extends AbstractTestBase {
-
+public class HBaseTestingClusterAutoStarter {
 	private static final Log LOG = LogFactory.getLog(HBaseTestingClusterAutoStarter.class);
 
 	private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-	private static HBaseAdmin admin = null;
+	private static Admin admin = null;
 	private static List<TableName> createdTables = new ArrayList<>();
 
 	private static Configuration conf;
 
 	protected static void createTable(TableName tableName, byte[][] columnFamilyName, byte[][] splitKeys) {
-		LOG.info("HBase minicluster: Creating table " + tableName.getNameAsString());
-
 		assertNotNull("HBaseAdmin is not initialized successfully.", admin);
 		HTableDescriptor desc = new HTableDescriptor(tableName);
 		for (byte[] fam : columnFamilyName) {
@@ -93,8 +74,8 @@ public abstract class HBaseTestingClusterAutoStarter extends AbstractTestBase {
 		}
 	}
 
-	protected static HTable openTable(TableName tableName) throws IOException {
-		HTable table = (HTable) admin.getConnection().getTable(tableName);
+	protected static Table openTable(TableName tableName) throws IOException {
+		Table table = TEST_UTIL.getConnection().getTable(tableName);
 		assertTrue("Fail to create the table", admin.tableExists(tableName));
 		return table;
 	}
@@ -114,11 +95,19 @@ public abstract class HBaseTestingClusterAutoStarter extends AbstractTestBase {
 		}
 	}
 
-	private static Configuration initialize(Configuration conf) {
-		conf = HBaseConfiguration.create(conf);
+	public static Configuration getConf() {
+		return conf;
+	}
+
+	public static String getZookeeperQuorum() {
+		return "localhost:" + TEST_UTIL.getZkCluster().getClientPort();
+	}
+
+	private static void initialize(Configuration c) {
+		conf = HBaseConfiguration.create(c);
 		conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
 		try {
-			admin = TEST_UTIL.getHBaseAdmin();
+			admin = TEST_UTIL.getAdmin();
 		} catch (MasterNotRunningException e) {
 			assertNull("Master is not running", e);
 		} catch (ZooKeeperConnectionException e) {
@@ -126,40 +115,12 @@ public abstract class HBaseTestingClusterAutoStarter extends AbstractTestBase {
 		} catch (IOException e) {
 			assertNull("IOException", e);
 		}
-		return conf;
 	}
 
 	@BeforeClass
 	public static void setUp() throws Exception {
-		// HBase 1.4 does not work with Hadoop 3
-		// because it uses Guava 12.0.1, Hadoop 3 uses Guava 27.0-jre.
-		// There is no Guava version in between that works with both.
-		Assume.assumeTrue("This test is skipped for Hadoop versions above 3", VersionUtil.compareVersions(System.getProperty("hadoop.version"), "3.0.0") < 0);
-
-		LOG.info("HBase minicluster: Starting");
-
 		TEST_UTIL.startMiniCluster(1);
-
-		// https://issues.apache.org/jira/browse/HBASE-11711
-		TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", -1);
-
-		// Make sure the zookeeper quorum value contains the right port number (varies per run).
-		LOG.info("Hbase minicluster client port: " + TEST_UTIL.getZkCluster().getClientPort());
-		TEST_UTIL.getConfiguration().set("hbase.zookeeper.quorum", "localhost:" + TEST_UTIL.getZkCluster().getClientPort());
-
-		conf = initialize(TEST_UTIL.getConfiguration());
-		LOG.info("HBase minicluster: Running");
-	}
-
-	/**
-	 * Returns zookeeper quorum value contains the right port number (varies per run).
-	 */
-	protected static String getZookeeperQuorum() {
-		return "localhost:" + TEST_UTIL.getZkCluster().getClientPort();
-	}
-
-	public static Configuration getConf() {
-		return conf;
+		initialize(TEST_UTIL.getConfiguration());
 	}
 
 	@AfterClass
@@ -173,5 +134,4 @@ public abstract class HBaseTestingClusterAutoStarter extends AbstractTestBase {
 		TEST_UTIL.shutdownMiniCluster();
 		LOG.info("HBase minicluster: Down");
 	}
-
 }
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/PlannerType.java b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/slf4j/impl/Log4jLoggerAdapter.java
similarity index 86%
copy from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/PlannerType.java
copy to flink-connectors/flink-connector-hbase-2.2/src/test/java/org/slf4j/impl/Log4jLoggerAdapter.java
index 5bad636..30d49da 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/PlannerType.java
+++ b/flink-connectors/flink-connector-hbase-2.2/src/test/java/org/slf4j/impl/Log4jLoggerAdapter.java
@@ -16,12 +16,10 @@
  * limitations under the License.
  */
 
-package org.apache.flink.connector.hbase.util;
+package org.slf4j.impl;
 
 /**
- * Planner type to use.
+ * Fake appender to work around HBase referring to it directly.
  */
-public enum PlannerType {
-	BLINK_PLANNER,
-	OLD_PLANNER
+public interface Log4jLoggerAdapter {
 }
diff --git a/flink-connectors/flink-connector-hbase/src/test/resources/hbase-site.xml b/flink-connectors/flink-connector-hbase-2.2/src/test/resources/hbase-site.xml
similarity index 100%
copy from flink-connectors/flink-connector-hbase/src/test/resources/hbase-site.xml
copy to flink-connectors/flink-connector-hbase-2.2/src/test/resources/hbase-site.xml
diff --git a/flink-connectors/flink-connector-hbase/src/test/resources/log4j2-test.properties b/flink-connectors/flink-connector-hbase-2.2/src/test/resources/log4j2-test.properties
similarity index 100%
copy from flink-connectors/flink-connector-hbase/src/test/resources/log4j2-test.properties
copy to flink-connectors/flink-connector-hbase-2.2/src/test/resources/log4j2-test.properties
diff --git a/flink-connectors/flink-connector-hbase/src/test/resources/org/apache/flink/connector/hbase/HBaseTablePlanTest.xml b/flink-connectors/flink-connector-hbase-2.2/src/test/resources/org/apache/flink/connector/hbase2/HBaseTablePlanTest.xml
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/test/resources/org/apache/flink/connector/hbase/HBaseTablePlanTest.xml
rename to flink-connectors/flink-connector-hbase-2.2/src/test/resources/org/apache/flink/connector/hbase2/HBaseTablePlanTest.xml
diff --git a/flink-connectors/flink-connector-hbase/pom.xml b/flink-connectors/flink-connector-hbase-base/pom.xml
similarity index 87%
rename from flink-connectors/flink-connector-hbase/pom.xml
rename to flink-connectors/flink-connector-hbase-base/pom.xml
index 39f81e8..34bdb7d 100644
--- a/flink-connectors/flink-connector-hbase/pom.xml
+++ b/flink-connectors/flink-connector-hbase-base/pom.xml
@@ -29,33 +29,16 @@ under the License.
 		<relativePath>..</relativePath>
 	</parent>
 
-	<artifactId>flink-connector-hbase_${scala.binary.version}</artifactId>
-	<name>Flink : Connectors : HBase</name>
+	<artifactId>flink-connector-hbase-base_${scala.binary.version}</artifactId>
+	<name>Flink : Connectors : HBase base</name>
 	<packaging>jar</packaging>
 
 	<properties>
 		<hbase.version>1.4.3</hbase.version>
 	</properties>
 
-	<build>
-		<plugins>
-			<plugin>
-				<groupId>org.apache.maven.plugins</groupId>
-				<artifactId>maven-surefire-plugin</artifactId>
-				<version>2.19.1</version>
-				<configuration>
-					<argLine>-XX:MaxPermSize=128m -Dmvn.forkNumber=${surefire.forkNumber}</argLine>
-					<!-- Enforce single fork execution due to heavy mini cluster use in the tests -->
-					<forkCount>1</forkCount>
-				</configuration>
-			</plugin>
-		</plugins>
-	</build>
-
 	<dependencies>
 
-		<!-- core dependencies -->
-
 		<dependency>
 			<groupId>org.apache.flink</groupId>
 			<artifactId>flink-core</artifactId>
@@ -113,8 +96,6 @@ under the License.
 			<scope>provided</scope>
 		</dependency>
 
-		<!-- HBase server needed for TableOutputFormat -->
-		<!-- TODO implement bulk output format for HBase -->
 		<dependency>
 			<groupId>org.apache.hbase</groupId>
 			<artifactId>hbase-server</artifactId>
@@ -186,6 +167,10 @@ under the License.
 					<groupId>org.slf4j</groupId>
 					<artifactId>slf4j-log4j12</artifactId>
 				</exclusion>
+				<exclusion>
+					<groupId>org.apache.hadoop</groupId>
+					<artifactId>hadoop-auth</artifactId>
+				</exclusion>
 			</exclusions>
 		</dependency>
 
@@ -225,7 +210,6 @@ under the License.
 			</exclusions>
 		</dependency>
 
-		<!-- Test dependencies are only available for Hadoop-2. -->
 		<dependency>
 			<groupId>org.apache.hbase</groupId>
 			<artifactId>hbase-server</artifactId>
@@ -348,32 +332,24 @@ under the License.
 		</dependency>
 	</dependencies>
 
+	<build>
+		<plugins>
+			<plugin>
+				<groupId>org.apache.maven.plugins</groupId>
+				<artifactId>maven-jar-plugin</artifactId>
+				<executions>
+					<execution>
+						<goals>
+							<goal>test-jar</goal>
+						</goals>
+					</execution>
+				</executions>
+			</plugin>
+		</plugins>
+	</build>
+
 	<profiles>
 		<profile>
-			<id>cdh5.1.3</id>
-			<activation>
-				<property>
-					<name>cdh5.1.3</name>
-				</property>
-			</activation>
-			<properties>
-				<hbase.version>0.98.1-cdh5.1.3</hbase.version>
-				<hadoop.version>2.3.0-cdh5.1.3</hadoop.version>
-				<!-- Cloudera use different versions for hadoop core and commons-->
-				<!-- This profile could be removed if Cloudera fix this mismatch! -->
-				<hadoop.core.version>2.3.0-mr1-cdh5.1.3</hadoop.core.version>
-			</properties>
-			<dependencyManagement>
-				<dependencies>
-					<dependency>
-						<groupId>org.apache.hadoop</groupId>
-						<artifactId>hadoop-core</artifactId>
-						<version>${hadoop.core.version}</version>
-					</dependency>
-				</dependencies>
-			</dependencyManagement>
-		</profile>
-		<profile>
 			<id>java11</id>
 			<activation>
 				<jdk>11</jdk>
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/options/HBaseWriteOptions.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/options/HBaseWriteOptions.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/options/HBaseWriteOptions.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/options/HBaseWriteOptions.java
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/HBaseMutationConverter.java
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseSinkFunction.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/HBaseSinkFunction.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/HBaseSinkFunction.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/HBaseSinkFunction.java
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/LegacyMutationConverter.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/LegacyMutationConverter.java
similarity index 97%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/LegacyMutationConverter.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/LegacyMutationConverter.java
index 3a63a95..24208fd 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/LegacyMutationConverter.java
+++ b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/LegacyMutationConverter.java
@@ -26,7 +26,7 @@ import org.apache.flink.types.Row;
 import org.apache.hadoop.hbase.client.Mutation;
 
 /**
- * Legacy implementation for {@link org.apache.flink.connector.hbase.source.HBaseTableSource}.
+ * Legacy implementation for {@link org.apache.flink.connector.hbase.source.AbstractHBaseTableSource}.
  */
 public class LegacyMutationConverter implements HBaseMutationConverter<Tuple2<Boolean, Row>> {
 	private static final long serialVersionUID = 7358222494016900667L;
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/RowDataToMutationConverter.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/RowDataToMutationConverter.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/sink/RowDataToMutationConverter.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/sink/RowDataToMutationConverter.java
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseDynamicTableSource.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/AbstractHBaseDynamicTableSource.java
similarity index 85%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseDynamicTableSource.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/AbstractHBaseDynamicTableSource.java
index 537d4b2..8fe2391 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseDynamicTableSource.java
+++ b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/AbstractHBaseDynamicTableSource.java
@@ -20,15 +20,16 @@ package org.apache.flink.connector.hbase.source;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.api.common.io.InputFormat;
 import org.apache.flink.connector.hbase.util.HBaseTableSchema;
 import org.apache.flink.table.api.TableSchema;
 import org.apache.flink.table.connector.ChangelogMode;
-import org.apache.flink.table.connector.source.DynamicTableSource;
 import org.apache.flink.table.connector.source.InputFormatProvider;
 import org.apache.flink.table.connector.source.LookupTableSource;
 import org.apache.flink.table.connector.source.ScanTableSource;
 import org.apache.flink.table.connector.source.TableFunctionProvider;
 import org.apache.flink.table.connector.source.abilities.SupportsProjectionPushDown;
+import org.apache.flink.table.data.RowData;
 import org.apache.flink.table.utils.TableSchemaUtils;
 
 import org.apache.hadoop.conf.Configuration;
@@ -39,14 +40,14 @@ import static org.apache.flink.util.Preconditions.checkArgument;
  * HBase table source implementation.
  */
 @Internal
-public class HBaseDynamicTableSource implements ScanTableSource, LookupTableSource, SupportsProjectionPushDown {
+public abstract class AbstractHBaseDynamicTableSource implements ScanTableSource, LookupTableSource, SupportsProjectionPushDown {
 
-	private final Configuration conf;
-	private final String tableName;
-	private HBaseTableSchema hbaseSchema;
-	private final String nullStringLiteral;
+	protected final Configuration conf;
+	protected final String tableName;
+	protected HBaseTableSchema hbaseSchema;
+	protected final String nullStringLiteral;
 
-	public HBaseDynamicTableSource(
+	public AbstractHBaseDynamicTableSource(
 			Configuration conf,
 			String tableName,
 			HBaseTableSchema hbaseSchema,
@@ -59,9 +60,11 @@ public class HBaseDynamicTableSource implements ScanTableSource, LookupTableSour
 
 	@Override
 	public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) {
-		return InputFormatProvider.of(new HBaseRowDataInputFormat(conf, tableName, hbaseSchema, nullStringLiteral));
+		return InputFormatProvider.of(getInputFormat());
 	}
 
+	protected abstract InputFormat<RowData, ?> getInputFormat();
+
 	@Override
 	public LookupRuntimeProvider getLookupRuntimeProvider(LookupContext context) {
 		checkArgument(context.getKeys().length == 1 && context.getKeys()[0].length == 1,
@@ -100,11 +103,6 @@ public class HBaseDynamicTableSource implements ScanTableSource, LookupTableSour
 	}
 
 	@Override
-	public DynamicTableSource copy() {
-		return new HBaseDynamicTableSource(conf, tableName, hbaseSchema, nullStringLiteral);
-	}
-
-	@Override
 	public String asSummaryString() {
 		return "HBase";
 	}
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseTableSource.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/AbstractHBaseTableSource.java
similarity index 85%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseTableSource.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/AbstractHBaseTableSource.java
index 93af179..7f09c6e 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseTableSource.java
+++ b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/AbstractHBaseTableSource.java
@@ -20,6 +20,7 @@ package org.apache.flink.connector.hbase.source;
 
 import org.apache.flink.annotation.Internal;
 import org.apache.flink.annotation.VisibleForTesting;
+import org.apache.flink.api.common.io.InputFormat;
 import org.apache.flink.api.common.typeinfo.TypeInformation;
 import org.apache.flink.api.java.DataSet;
 import org.apache.flink.api.java.ExecutionEnvironment;
@@ -43,7 +44,7 @@ import java.util.Arrays;
 /**
  * Creates a TableSource to scan an HBase table.
  *
- * <p>The table name and required HBase configuration is passed during {@link HBaseTableSource} construction.
+ * <p>The table name and required HBase configuration is passed during {@link AbstractHBaseTableSource} construction.
  * Use {@link #addColumn(String, String, Class)} to specify the family, qualifier, and type of columns to scan.
  *
  * <p>The TableSource returns {@link Row} with nested Rows for each column family.
@@ -66,24 +67,14 @@ import java.util.Arrays;
  * </pre>
  */
 @Internal
-public class HBaseTableSource implements BatchTableSource<Row>, ProjectableTableSource<Row>, StreamTableSource<Row>, LookupableTableSource<Row> {
+public abstract class AbstractHBaseTableSource implements BatchTableSource<Row>, ProjectableTableSource<Row>, StreamTableSource<Row>, LookupableTableSource<Row> {
 
-	private final Configuration conf;
-	private final String tableName;
-	private final HBaseTableSchema hbaseSchema;
+	protected final Configuration conf;
+	protected final String tableName;
+	protected final HBaseTableSchema hbaseSchema;
 	private final int[] projectFields;
 
-	/**
-	 * The HBase configuration and the name of the table to read.
-	 *
-	 * @param conf      hbase configuration
-	 * @param tableName the tableName
-	 */
-	public HBaseTableSource(Configuration conf, String tableName) {
-		this(conf, tableName, new HBaseTableSchema(), null);
-	}
-
-	public HBaseTableSource(Configuration conf, String tableName, HBaseTableSchema hbaseSchema, int[] projectFields) {
+	public AbstractHBaseTableSource(Configuration conf, String tableName, HBaseTableSchema hbaseSchema, int[] projectFields) {
 		this.conf = conf;
 		this.tableName = Preconditions.checkNotNull(tableName, "Table  name");
 		this.hbaseSchema = hbaseSchema;
@@ -134,16 +125,11 @@ public class HBaseTableSource implements BatchTableSource<Row>, ProjectableTable
 	public DataSet<Row> getDataSet(ExecutionEnvironment execEnv) {
 		HBaseTableSchema projectedSchema = hbaseSchema.getProjectedHBaseTableSchema(projectFields);
 		return execEnv
-			.createInput(new HBaseRowInputFormat(conf, tableName, projectedSchema), getReturnType())
+			.createInput(getInputFormat(projectedSchema), getReturnType())
 			.name(explainSource());
 	}
 
 	@Override
-	public HBaseTableSource projectFields(int[] fields) {
-		return new HBaseTableSource(this.conf, tableName, hbaseSchema, fields);
-	}
-
-	@Override
 	public String explainSource() {
 		return "HBaseTableSource[schema=" + Arrays.toString(getTableSchema().getFieldNames())
 			+ ", projectFields=" + Arrays.toString(projectFields) + "]";
@@ -187,10 +173,12 @@ public class HBaseTableSource implements BatchTableSource<Row>, ProjectableTable
 	public DataStream<Row> getDataStream(StreamExecutionEnvironment execEnv) {
 		HBaseTableSchema projectedSchema = hbaseSchema.getProjectedHBaseTableSchema(projectFields);
 		return execEnv
-			.createInput(new HBaseRowInputFormat(conf, tableName, projectedSchema), getReturnType())
+			.createInput(getInputFormat(projectedSchema), getReturnType())
 			.name(explainSource());
 	}
 
+	protected abstract InputFormat<Row, ?> getInputFormat(HBaseTableSchema projectedSchema);
+
 	@VisibleForTesting
 	public HBaseTableSchema getHBaseTableSchema() {
 		return this.hbaseSchema;
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseLookupFunction.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/HBaseLookupFunction.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseLookupFunction.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/HBaseLookupFunction.java
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataLookupFunction.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataLookupFunction.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataLookupFunction.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/HBaseRowDataLookupFunction.java
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/TableInputSplit.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/TableInputSplit.java
similarity index 92%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/TableInputSplit.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/TableInputSplit.java
index 2790d48..2ef4781 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/source/TableInputSplit.java
+++ b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/source/TableInputSplit.java
@@ -26,7 +26,7 @@ import org.apache.flink.core.io.LocatableInputSplit;
  * references to row below refer to the key of the row.
  */
 @Internal
-class TableInputSplit extends LocatableInputSplit {
+public class TableInputSplit extends LocatableInputSplit {
 
 	private static final long serialVersionUID = 1L;
 
@@ -53,7 +53,7 @@ class TableInputSplit extends LocatableInputSplit {
 	 * @param endRow
 	 *        the end row of the split
 	 */
-	TableInputSplit(final int splitNumber, final String[] hostnames, final byte[] tableName, final byte[] startRow,
+	public TableInputSplit(final int splitNumber, final String[] hostnames, final byte[] tableName, final byte[] startRow,
 			final byte[] endRow) {
 		super(splitNumber, hostnames);
 
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseConfigurationUtil.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseConfigurationUtil.java
similarity index 81%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseConfigurationUtil.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseConfigurationUtil.java
index 165f95a..ca6a3f6 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseConfigurationUtil.java
+++ b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseConfigurationUtil.java
@@ -42,6 +42,8 @@ public class HBaseConfigurationUtil {
 
 	private static final Logger LOG = LoggerFactory.getLogger(HBaseConfigurationUtil.class);
 
+	public static final String ENV_HBASE_CONF_DIR = "HBASE_CONF_DIR";
+
 	public static Configuration getHBaseConfiguration() {
 
 		// Instantiate an HBaseConfiguration to load the hbase-default.xml and hbase-site.xml from the classpath.
@@ -161,4 +163,35 @@ public class HBaseConfigurationUtil {
 		DataInputStream dataInputStream = new DataInputStream(byteArrayInputStream);
 		writable.readFields(dataInputStream);
 	}
+
+	public static org.apache.hadoop.conf.Configuration createHBaseConf() {
+		org.apache.hadoop.conf.Configuration hbaseClientConf = HBaseConfiguration.create();
+
+		String hbaseConfDir = System.getenv(ENV_HBASE_CONF_DIR);
+
+		if (hbaseConfDir != null) {
+			if (new File(hbaseConfDir).exists()) {
+				String coreSite = hbaseConfDir + "/core-site.xml";
+				String hdfsSite = hbaseConfDir + "/hdfs-site.xml";
+				String hbaseSite = hbaseConfDir + "/hbase-site.xml";
+				if (new File(coreSite).exists()) {
+					hbaseClientConf.addResource(new org.apache.hadoop.fs.Path(coreSite));
+					LOG.info("Adding " + coreSite + " to hbase configuration");
+				}
+				if (new File(hdfsSite).exists()) {
+					hbaseClientConf.addResource(new org.apache.hadoop.fs.Path(hdfsSite));
+					LOG.info("Adding " + hdfsSite + " to hbase configuration");
+				}
+				if (new File(hbaseSite).exists()) {
+					hbaseClientConf.addResource(new org.apache.hadoop.fs.Path(hbaseSite));
+					LOG.info("Adding " + hbaseSite + " to hbase configuration");
+				}
+			} else {
+				LOG.warn("HBase config directory '{}' not found, cannot load HBase configuration.", hbaseConfDir);
+			}
+		} else {
+			LOG.warn("{} env variable not found, cannot load HBase configuration.", ENV_HBASE_CONF_DIR);
+		}
+		return hbaseClientConf;
+	}
 }
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseReadWriteHelper.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseReadWriteHelper.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseReadWriteHelper.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseReadWriteHelper.java
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseSerde.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseSerde.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseSerde.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseSerde.java
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseTableSchema.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseTableSchema.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseTableSchema.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseTableSchema.java
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseTypeUtils.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseTypeUtils.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/connector/hbase/util/HBaseTypeUtils.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/connector/hbase/util/HBaseTypeUtils.java
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/table/descriptors/HBaseValidator.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/table/descriptors/AbstractHBaseValidator.java
similarity index 84%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/table/descriptors/HBaseValidator.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/table/descriptors/AbstractHBaseValidator.java
index e08909c..a102943 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/table/descriptors/HBaseValidator.java
+++ b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/table/descriptors/AbstractHBaseValidator.java
@@ -28,10 +28,9 @@ import java.util.List;
  * More features to be supported, e.g., batch read/write, async api(support from hbase version 2.0.0), Caching for LookupFunction.
  */
 @Internal
-public class HBaseValidator extends ConnectorDescriptorValidator {
+public abstract class AbstractHBaseValidator extends ConnectorDescriptorValidator {
 
 	public static final String CONNECTOR_TYPE_VALUE_HBASE = "hbase";
-	public static final String CONNECTOR_VERSION_VALUE_143 = "1.4.3";
 	public static final String CONNECTOR_TABLE_NAME = "connector.table-name";
 	public static final String CONNECTOR_ZK_QUORUM = "connector.zookeeper.quorum";
 	public static final String CONNECTOR_ZK_NODE_PARENT = "connector.zookeeper.znode.parent";
@@ -44,12 +43,14 @@ public class HBaseValidator extends ConnectorDescriptorValidator {
 		super.validate(properties);
 		properties.validateValue(CONNECTOR_TYPE, CONNECTOR_TYPE_VALUE_HBASE, false);
 		properties.validateString(CONNECTOR_TABLE_NAME, false, 1);
-		properties.validateString(CONNECTOR_ZK_QUORUM, false, 1);
+		properties.validateString(CONNECTOR_ZK_QUORUM, validateZkQuorum(), 1);
 		properties.validateString(CONNECTOR_ZK_NODE_PARENT, true, 1);
 		validateSinkProperties(properties);
 		validateVersion(properties);
 	}
 
+	protected abstract boolean validateZkQuorum();
+
 	private void validateSinkProperties(DescriptorProperties properties) {
 		properties.validateMemorySize(CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE, true, 1024 * 1024); // only allow MB precision
 		properties.validateInt(CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS, true, 1);
@@ -57,7 +58,11 @@ public class HBaseValidator extends ConnectorDescriptorValidator {
 	}
 
 	private void validateVersion(DescriptorProperties properties) {
-		final List<String> versions = Arrays.asList(CONNECTOR_VERSION_VALUE_143);
-		properties.validateEnumValues(CONNECTOR_VERSION, false, versions);
+		final List<String> versions = Arrays.asList(getConnectorVersion());
+		properties.validateEnumValues(CONNECTOR_VERSION, zkQuorumIsOptional(), versions);
 	}
+
+	protected abstract String getConnectorVersion();
+
+	protected abstract boolean zkQuorumIsOptional();
 }
diff --git a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/table/descriptors/HBase.java b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/table/descriptors/HBase.java
similarity index 85%
rename from flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/table/descriptors/HBase.java
rename to flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/table/descriptors/HBase.java
index 471ec73..47e167e 100644
--- a/flink-connectors/flink-connector-hbase/src/main/java/org/apache/flink/table/descriptors/HBase.java
+++ b/flink-connectors/flink-connector-hbase-base/src/main/java/org/apache/flink/table/descriptors/HBase.java
@@ -24,14 +24,14 @@ import org.apache.flink.util.TimeUtils;
 
 import java.util.Map;
 
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_TABLE_NAME;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_TYPE_VALUE_HBASE;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_INTERVAL;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_ZK_NODE_PARENT;
+import static org.apache.flink.table.descriptors.AbstractHBaseValidator.CONNECTOR_ZK_QUORUM;
 import static org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR_VERSION;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_TABLE_NAME;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_TYPE_VALUE_HBASE;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_INTERVAL;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_ZK_NODE_PARENT;
-import static org.apache.flink.table.descriptors.HBaseValidator.CONNECTOR_ZK_QUORUM;
 
 /**
  * Connector descriptor for Apache HBase.
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseFlinkTestConstants.java b/flink-connectors/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase/example/HBaseFlinkTestConstants.java
similarity index 72%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseFlinkTestConstants.java
rename to flink-connectors/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase/example/HBaseFlinkTestConstants.java
index 160eb21..f34f8b1 100644
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseFlinkTestConstants.java
+++ b/flink-connectors/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase/example/HBaseFlinkTestConstants.java
@@ -20,11 +20,14 @@ package org.apache.flink.connector.hbase.example;
 
 import org.apache.hadoop.hbase.util.Bytes;
 
-class HBaseFlinkTestConstants {
+/**
+ * Constants used during the hbase connector tests.
+ */
+public class HBaseFlinkTestConstants {
 
-	static final byte[] CF_SOME = Bytes.toBytes("someCf");
-	static final byte[] Q_SOME = Bytes.toBytes("someQual");
-	static final String TEST_TABLE_NAME = "test-table";
-	static final String TMP_DIR = "/tmp/test";
+	public static final byte[] CF_SOME = Bytes.toBytes("someCf");
+	public static final byte[] Q_SOME = Bytes.toBytes("someQual");
+	public static final String TEST_TABLE_NAME = "test-table";
+	public static final String TMP_DIR = "/tmp/test";
 
 }
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseConfigLoadingTest.java b/flink-connectors/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase/util/HBaseConfigLoadingTest.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/HBaseConfigLoadingTest.java
rename to flink-connectors/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase/util/HBaseConfigLoadingTest.java
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/PlannerType.java b/flink-connectors/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase/util/PlannerType.java
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/util/PlannerType.java
rename to flink-connectors/flink-connector-hbase-base/src/test/java/org/apache/flink/connector/hbase/util/PlannerType.java
diff --git a/flink-connectors/flink-connector-hbase/src/test/resources/hbase-site.xml b/flink-connectors/flink-connector-hbase-base/src/test/resources/hbase-site.xml
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/test/resources/hbase-site.xml
rename to flink-connectors/flink-connector-hbase-base/src/test/resources/hbase-site.xml
diff --git a/flink-connectors/flink-connector-hbase/src/test/resources/log4j2-test.properties b/flink-connectors/flink-connector-hbase-base/src/test/resources/log4j2-test.properties
similarity index 100%
rename from flink-connectors/flink-connector-hbase/src/test/resources/log4j2-test.properties
rename to flink-connectors/flink-connector-hbase-base/src/test/resources/log4j2-test.properties
diff --git a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseReadExample.java b/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseReadExample.java
deleted file mode 100644
index 026c6d0..0000000
--- a/flink-connectors/flink-connector-hbase/src/test/java/org/apache/flink/connector/hbase/example/HBaseReadExample.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.flink.connector.hbase.example;
-
-import org.apache.flink.api.common.functions.FilterFunction;
-import org.apache.flink.api.java.DataSet;
-import org.apache.flink.api.java.ExecutionEnvironment;
-import org.apache.flink.api.java.tuple.Tuple2;
-import org.apache.flink.connector.hbase.source.HBaseInputFormat;
-
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Simple stub for HBase DataSet read
- *
- * <p>To run the test first create the test table with hbase shell.
- *
- * <p>Use the following commands:
- * <ul>
- *     <li>create 'test-table', 'someCf'</li>
- *     <li>put 'test-table', '1', 'someCf:someQual', 'someString'</li>
- *     <li>put 'test-table', '2', 'someCf:someQual', 'anotherString'</li>
- * </ul>
- *
- * <p>The test should return just the first entry.
- *
- */
-public class HBaseReadExample {
-	public static void main(String[] args) throws Exception {
-		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
-		@SuppressWarnings("serial")
-		DataSet<Tuple2<String, String>> hbaseDs = env.createInput(new HBaseInputFormat<Tuple2<String, String>>(HBaseConfiguration.create()) {
-
-				@Override
-				public String getTableName() {
-					return HBaseFlinkTestConstants.TEST_TABLE_NAME;
-				}
-
-				@Override
-				protected Scan getScanner() {
-					Scan scan = new Scan();
-					scan.addColumn(HBaseFlinkTestConstants.CF_SOME, HBaseFlinkTestConstants.Q_SOME);
-					return scan;
-				}
-
-				private Tuple2<String, String> reuse = new Tuple2<>();
-
-				@Override
-				protected Tuple2<String, String> mapResultToTuple(Result r) {
-					String key = Bytes.toString(r.getRow());
-					String val = Bytes.toString(r.getValue(HBaseFlinkTestConstants.CF_SOME, HBaseFlinkTestConstants.Q_SOME));
-					reuse.setField(key, 0);
-					reuse.setField(val, 1);
-					return reuse;
-				}
-		})
-		.filter((FilterFunction<Tuple2<String, String>>) t -> {
-			String val = t.getField(1);
-			return val.startsWith("someStr");
-		});
-
-		hbaseDs.print();
-
-	}
-
-}
diff --git a/flink-connectors/flink-sql-connector-hbase/pom.xml b/flink-connectors/flink-sql-connector-hbase-1.4/pom.xml
similarity index 92%
copy from flink-connectors/flink-sql-connector-hbase/pom.xml
copy to flink-connectors/flink-sql-connector-hbase-1.4/pom.xml
index 38b6d77..08e74e3 100644
--- a/flink-connectors/flink-sql-connector-hbase/pom.xml
+++ b/flink-connectors/flink-sql-connector-hbase-1.4/pom.xml
@@ -28,15 +28,15 @@ under the License.
 	</parent>
 	<modelVersion>4.0.0</modelVersion>
 
-	<artifactId>flink-sql-connector-hbase_${scala.binary.version}</artifactId>
-	<name>Flink : Connectors : SQL : HBase</name>
+	<artifactId>flink-sql-connector-hbase-1.4_${scala.binary.version}</artifactId>
+	<name>Flink : Connectors : SQL : HBase 1.4</name>
 
 	<packaging>jar</packaging>
 
 	<dependencies>
 		<dependency>
 			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-connector-hbase_${scala.binary.version}</artifactId>
+			<artifactId>flink-connector-hbase-1.4_${scala.binary.version}</artifactId>
 			<version>${project.version}</version>
 		</dependency>
 	</dependencies>
@@ -55,7 +55,7 @@ under the License.
 						</goals>
 						<configuration>
 							<!--
-							Make the file hbase-default.xml under flink-sql-connector-hbase/src/main/resources as the
+							Make the file hbase-default.xml under flink-sql-connector-hbase-1.4/src/main/resources as the
 							hbase-default.xml in the shaded target jar here, because we don't want to check the hbase
 							version at client side. Also we don't need the extra default configs keys.
 							-->
@@ -69,7 +69,8 @@ under the License.
 							<shadeTestJar>false</shadeTestJar>
 							<artifactSet>
 								<includes>
-									<include>org.apache.flink:flink-connector-hbase_${scala.binary.version}</include>
+									<include>org.apache.flink:flink-connector-hbase-base_${scala.binary.version}</include>
+									<include>org.apache.flink:flink-connector-hbase-1.4_${scala.binary.version}</include>
 									<include>org.apache.hbase:hbase-*</include>
 									<include>org.apache.zookeeper:zookeeper</include>
 									<include>org.apache.htrace:htrace-core</include>
diff --git a/flink-connectors/flink-sql-connector-hbase/src/main/resources/META-INF/NOTICE b/flink-connectors/flink-sql-connector-hbase-1.4/src/main/resources/META-INF/NOTICE
similarity index 97%
rename from flink-connectors/flink-sql-connector-hbase/src/main/resources/META-INF/NOTICE
rename to flink-connectors/flink-sql-connector-hbase-1.4/src/main/resources/META-INF/NOTICE
index a550c8b..dd35463 100644
--- a/flink-connectors/flink-sql-connector-hbase/src/main/resources/META-INF/NOTICE
+++ b/flink-connectors/flink-sql-connector-hbase-1.4/src/main/resources/META-INF/NOTICE
@@ -1,11 +1,10 @@
-flink-sql-connector-hbase
+flink-sql-connector-hbase-1.4
 Copyright 2014-2020 The Apache Software Foundation
 
 This product includes software developed at
 The Apache Software Foundation (http://www.apache.org/).
 
 This project bundles the following dependencies under the Apache Software License 2.0. (http://www.apache.org/licenses/LICENSE-2.0.txt)
-
 - com.google.guava:guava:12.0.1
 - com.yammer.metrics:metrics-core:2.2.0
 - commons-codec:commons-codec:1.10
diff --git a/flink-connectors/flink-sql-connector-hbase/src/main/resources/META-INF/licenses/LICENSE.protobuf b/flink-connectors/flink-sql-connector-hbase-1.4/src/main/resources/META-INF/licenses/LICENSE.protobuf
similarity index 100%
copy from flink-connectors/flink-sql-connector-hbase/src/main/resources/META-INF/licenses/LICENSE.protobuf
copy to flink-connectors/flink-sql-connector-hbase-1.4/src/main/resources/META-INF/licenses/LICENSE.protobuf
diff --git a/flink-connectors/flink-sql-connector-hbase/src/main/resources/hbase-default.xml b/flink-connectors/flink-sql-connector-hbase-1.4/src/main/resources/hbase-default.xml
similarity index 100%
copy from flink-connectors/flink-sql-connector-hbase/src/main/resources/hbase-default.xml
copy to flink-connectors/flink-sql-connector-hbase-1.4/src/main/resources/hbase-default.xml
diff --git a/flink-connectors/flink-sql-connector-hbase/pom.xml b/flink-connectors/flink-sql-connector-hbase-2.2/pom.xml
similarity index 86%
rename from flink-connectors/flink-sql-connector-hbase/pom.xml
rename to flink-connectors/flink-sql-connector-hbase-2.2/pom.xml
index 38b6d77..521b9dd 100644
--- a/flink-connectors/flink-sql-connector-hbase/pom.xml
+++ b/flink-connectors/flink-sql-connector-hbase-2.2/pom.xml
@@ -28,15 +28,15 @@ under the License.
 	</parent>
 	<modelVersion>4.0.0</modelVersion>
 
-	<artifactId>flink-sql-connector-hbase_${scala.binary.version}</artifactId>
-	<name>Flink : Connectors : SQL : HBase</name>
+	<artifactId>flink-sql-connector-hbase-2.2_${scala.binary.version}</artifactId>
+	<name>Flink : Connectors : SQL : HBase 2.2</name>
 
 	<packaging>jar</packaging>
 
 	<dependencies>
 		<dependency>
 			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-connector-hbase_${scala.binary.version}</artifactId>
+			<artifactId>flink-connector-hbase-2.2_${scala.binary.version}</artifactId>
 			<version>${project.version}</version>
 		</dependency>
 	</dependencies>
@@ -55,7 +55,7 @@ under the License.
 						</goals>
 						<configuration>
 							<!--
-							Make the file hbase-default.xml under flink-sql-connector-hbase/src/main/resources as the
+							Make the file hbase-default.xml under flink-sql-connector-hbase-2.2/src/main/resources as the
 							hbase-default.xml in the shaded target jar here, because we don't want to check the hbase
 							version at client side. Also we don't need the extra default configs keys.
 							-->
@@ -69,18 +69,18 @@ under the License.
 							<shadeTestJar>false</shadeTestJar>
 							<artifactSet>
 								<includes>
-									<include>org.apache.flink:flink-connector-hbase_${scala.binary.version}</include>
+									<include>org.apache.flink:flink-connector-hbase-base_${scala.binary.version}</include>
+									<include>org.apache.flink:flink-connector-hbase-2.2_${scala.binary.version}</include>
 									<include>org.apache.hbase:hbase-*</include>
+									<include>org.apache.hbase.thirdparty:hbase-shaded-*</include>
 									<include>org.apache.zookeeper:zookeeper</include>
-									<include>org.apache.htrace:htrace-core</include>
-									<include>com.google.guava:guava</include>
+									<include>org.apache.htrace:htrace-core4</include>
 									<include>com.google.protobuf:protobuf-java</include>
-									<include>com.yammer.metrics:metrics-core</include>
-									<include>commons-logging:commons-logging</include>
-									<include>commons-lang:commons-lang</include>
-									<include>commons-configuration:commons-configuration</include>
 									<include>commons-codec:commons-codec</include>
+									<include>org.apache.commons:commons-crypto</include>
+									<include>org.apache.commons:commons-lang3</include>
 									<include>io.netty:netty-all</include>
+									<include>io.dropwizard.metrics:metrics-core</include>
 								</includes>
 								<excludes>
 									<exclude>org.apache.hbase:hbase-metrics*</exclude>
@@ -100,6 +100,8 @@ under the License.
 										<exclude>properties.dtd</exclude>
 										<exclude>PropertyList-1.0.dtd</exclude>
 										<exclude>LICENSE.txt</exclude>
+										<exclude>*.proto</exclude>
+										<exclude>protobuf/*</exclude>
 									</excludes>
 								</filter>
 							</filters>
diff --git a/flink-connectors/flink-sql-connector-hbase-2.2/src/main/resources/META-INF/NOTICE b/flink-connectors/flink-sql-connector-hbase-2.2/src/main/resources/META-INF/NOTICE
new file mode 100644
index 0000000..a8098b7
--- /dev/null
+++ b/flink-connectors/flink-sql-connector-hbase-2.2/src/main/resources/META-INF/NOTICE
@@ -0,0 +1,35 @@
+flink-sql-connector-hbase-2.2
+Copyright 2014-2020 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+This project bundles the following dependencies under the Apache Software License 2.0. (http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+ - commons-codec:commons-codec:jar:1.10
+ - commons-lang:commons-lang:2.6
+ - commons-logging:commons-logging:1.2
+ - io.netty:netty-all:jar:4.1.44.Final
+ - org.apache.commons:commons-crypto:jar:1.0.0
+ - org.apache.commons:commons-lang3:jar:3.3.2
+ - org.apache.hbase:hbase-client:jar:2.2.3
+ - org.apache.hbase:hbase-common:jar:2.2.3
+ - org.apache.hbase:hbase-hadoop-compat:jar:2.2.3
+ - org.apache.hbase:hbase-hadoop2-compat:jar:2.2.3
+ - org.apache.hbase:hbase-protocol:jar:2.2.3
+ - org.apache.hbase:hbase-protocol-shaded:2.2.3
+ - org.apache.htrace:htrace-core4:jar:4.2.0-incubating
+ - org.apache.zookeeper:zookeeper:pom:3.4.14
+
+This project bundles the following dependencies under the BSD license.
+See bundled license files for details.
+
+ - com.google.protobuf:protobuf-java:2.5.0
+
+The bundled Apache HTrace org.apache.htrace:htrace-core4 dependency bundles the following dependencies under
+the Apache Software License 2.0 (http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+ - com.fasterxml.jackson.core:jackson-annotations:2.4.0
+ - com.fasterxml.jackson.core:jackson-core:2.4.0
+ - com.fasterxml.jackson.core:jackson-databind:2.4.0
+ - commons-logging:commons-logging:1.1.1
diff --git a/flink-connectors/flink-sql-connector-hbase/src/main/resources/META-INF/licenses/LICENSE.protobuf b/flink-connectors/flink-sql-connector-hbase-2.2/src/main/resources/META-INF/licenses/LICENSE.protobuf
similarity index 100%
rename from flink-connectors/flink-sql-connector-hbase/src/main/resources/META-INF/licenses/LICENSE.protobuf
rename to flink-connectors/flink-sql-connector-hbase-2.2/src/main/resources/META-INF/licenses/LICENSE.protobuf
diff --git a/flink-connectors/flink-sql-connector-hbase/src/main/resources/hbase-default.xml b/flink-connectors/flink-sql-connector-hbase-2.2/src/main/resources/hbase-default.xml
similarity index 72%
rename from flink-connectors/flink-sql-connector-hbase/src/main/resources/hbase-default.xml
rename to flink-connectors/flink-sql-connector-hbase-2.2/src/main/resources/hbase-default.xml
index cd2c15e..5251f12 100644
--- a/flink-connectors/flink-sql-connector-hbase/src/main/resources/hbase-default.xml
+++ b/flink-connectors/flink-sql-connector-hbase-2.2/src/main/resources/hbase-default.xml
@@ -1,31 +1,34 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied.  See the License for the
-specific language governing permissions and limitations
-under the License.
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 -->
 
 <!--
 OVERVIEW
+
 The important configs. are listed near the top.  You should change
 at least the setting for hbase.tmp.dir.  Other settings will change
 dependent on whether you are running hbase in standalone mode or
 distributed.  See the hbase reference guide for requirements and
 guidance making configuration.
+
 This file does not contain all possible configurations.  The file would be
 much larger if it carried everything. The absent configurations will only be
 found through source code reading.  The idea is that such configurations are
@@ -37,8 +40,8 @@ possible configurations would overwhelm and obscure the important.
 
 <configuration>
 	<!--Configs you will likely change are listed here at the top of the file.
-	-->
-	<property>
+    -->
+	<property >
 		<name>hbase.tmp.dir</name>
 		<value>${java.io.tmpdir}/hbase-${user.name}</value>
 		<description>Temporary directory on the local filesystem.
@@ -46,7 +49,7 @@ possible configurations would overwhelm and obscure the important.
 			than '/tmp', the usual resolve for java.io.tmpdir, as the
 			'/tmp' directory is cleared on machine restart.</description>
 	</property>
-	<property>
+	<property >
 		<name>hbase.rootdir</name>
 		<value>${hbase.tmp.dir}/hbase</value>
 		<description>The directory shared by region servers and into
@@ -60,20 +63,6 @@ possible configurations would overwhelm and obscure the important.
 			machine restart.</description>
 	</property>
 	<property >
-		<name>hbase.fs.tmp.dir</name>
-		<value>/user/${user.name}/hbase-staging</value>
-		<description>A staging directory in default file system (HDFS)
-			for keeping temporary data.
-		</description>
-	</property>
-	<property >
-		<name>hbase.bulkload.staging.dir</name>
-		<value>${hbase.fs.tmp.dir}</value>
-		<description>A staging directory in default file system (HDFS)
-			for bulk loading.
-		</description>
-	</property>
-	<property >
 		<name>hbase.cluster.distributed</name>
 		<value>false</value>
 		<description>The mode the cluster will be in. Possible values are
@@ -92,12 +81,12 @@ possible configurations would overwhelm and obscure the important.
 			list of ZooKeeper ensemble servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
 			this is the list of servers which hbase will start/stop ZooKeeper on as
 			part of cluster start/stop.  Client-side, we will take this list of
-			ensemble members and put it together with the hbase.zookeeper.clientPort
+			ensemble members and put it together with the hbase.zookeeper.property.clientPort
 			config. and pass it into zookeeper constructor as the connectString
 			parameter.</description>
 	</property>
 	<!--The above are the important configurations for getting hbase up
-	  and running -->
+      and running -->
 
 	<property>
 		<name>zookeeper.recovery.retry.maxsleeptime</name>
@@ -134,15 +123,34 @@ possible configurations would overwhelm and obscure the important.
 	<property>
 		<name>hbase.master.logcleaner.ttl</name>
 		<value>600000</value>
-		<description>Maximum time a WAL can stay in the .oldlogdir directory,
-			after which it will be cleaned by a Master thread.</description>
+		<description>How long a WAL remain in the archive ({hbase.rootdir}/oldWALs) directory,
+			after which it will be cleaned by a Master thread. The value is in milliseconds.</description>
+	</property>
+	<property>
+		<name>hbase.master.procedurewalcleaner.ttl</name>
+		<value>604800000</value>
+		<description>How long a Procedure WAL will remain in the
+			archive directory, after which it will be cleaned
+			by a Master thread. The value is in milliseconds.</description>
 	</property>
 	<property>
 		<name>hbase.master.infoserver.redirect</name>
 		<value>true</value>
 		<description>Whether or not the Master listens to the Master web
 			UI port (hbase.master.info.port) and redirects requests to the web
-			UI server shared by the Master and RegionServer.</description>
+			UI server shared by the Master and RegionServer. Config. makes
+			sense when Master is serving Regions (not the default).</description>
+	</property>
+	<property>
+		<name>hbase.master.fileSplitTimeout</name>
+		<value>600000</value>
+		<description>Splitting a region, how long to wait on the file-splitting
+			step before aborting the attempt. Default: 600000. This setting used
+			to be known as hbase.regionserver.fileSplitTimeout in hbase-1.x.
+			Split is now run master-side hence the rename (If a
+			'hbase.master.fileSplitTimeout' setting found, will use it to
+			prime the current 'hbase.master.fileSplitTimeout'
+			Configuration.</description>
 	</property>
 
 	<!--RegionServer configurations-->
@@ -174,7 +182,10 @@ possible configurations would overwhelm and obscure the important.
 		<name>hbase.regionserver.handler.count</name>
 		<value>30</value>
 		<description>Count of RPC Listener instances spun up on RegionServers.
-			Same property is used by the Master for count of master handlers.</description>
+			Same property is used by the Master for count of master handlers.
+			Too many handlers can be counter-productive. Make it a multiple of
+			CPU count. If mostly read-only, handlers count close to cpu count
+			does well. Start with twice the CPU count and tune from there.</description>
 	</property>
 	<property>
 		<name>hbase.ipc.server.callqueue.handler.factor</name>
@@ -258,24 +269,37 @@ possible configurations would overwhelm and obscure the important.
 			Updates are blocked and flushes are forced until size of all memstores
 			in a region server hits hbase.regionserver.global.memstore.size.lower.limit.
 			The default value in this configuration has been intentionally left empty in order to
-			honor the old hbase.regionserver.global.memstore.upperLimit property if present.</description>
+			honor the old hbase.regionserver.global.memstore.upperLimit property if present.
+		</description>
 	</property>
 	<property>
 		<name>hbase.regionserver.global.memstore.size.lower.limit</name>
 		<value></value>
-		<description>Maximum size of all memstores in a region server before flushes are forced.
-			Defaults to 95% of hbase.regionserver.global.memstore.size (0.95).
-			A 100% value for this value causes the minimum possible flushing to occur when updates are
-			blocked due to memstore limiting.
-			The default value in this configuration has been intentionally left empty in order to
-			honor the old hbase.regionserver.global.memstore.lowerLimit property if present.</description>
+		<description>Maximum size of all memstores in a region server before flushes
+			are forced. Defaults to 95% of hbase.regionserver.global.memstore.size
+			(0.95). A 100% value for this value causes the minimum possible flushing
+			to occur when updates are blocked due to memstore limiting. The default
+			value in this configuration has been intentionally left empty in order to
+			honor the old hbase.regionserver.global.memstore.lowerLimit property if
+			present.
+		</description>
+	</property>
+	<property>
+		<name>hbase.systemtables.compacting.memstore.type</name>
+		<value>NONE</value>
+		<description>Determines the type of memstore to be used for system tables like
+			META, namespace tables etc. By default NONE is the type and hence we use the
+			default memstore for all the system tables. If we need to use compacting
+			memstore for system tables then set this property to BASIC/EAGER
+		</description>
 	</property>
 	<property>
 		<name>hbase.regionserver.optionalcacheflushinterval</name>
 		<value>3600000</value>
 		<description>
 			Maximum amount of time an edit lives in memory before being automatically flushed.
-			Default 1 hour. Set it to 0 to disable automatic flushing.</description>
+			Default 1 hour. Set it to 0 to disable automatic flushing.
+		</description>
 	</property>
 	<property>
 		<name>hbase.regionserver.dns.interface</name>
@@ -294,9 +318,10 @@ possible configurations would overwhelm and obscure the important.
 		<name>hbase.regionserver.regionSplitLimit</name>
 		<value>1000</value>
 		<description>
-			Limit for the number of regions after which no more region splitting should take place.
-			This is not hard limit for the number of regions but acts as a guideline for the regionserver
-			to stop splitting after a certain limit. Default is set to 1000.
+			Limit for the number of regions after which no more region splitting
+			should take place. This is not hard limit for the number of regions
+			but acts as a guideline for the regionserver to stop splitting after
+			a certain limit. Default is set to 1000.
 		</description>
 	</property>
 
@@ -306,13 +331,15 @@ possible configurations would overwhelm and obscure the important.
 		<value>90000</value>
 		<description>ZooKeeper session timeout in milliseconds. It is used in two different ways.
 			First, this value is used in the ZK client that HBase uses to connect to the ensemble.
-			It is also used by HBase when it starts a ZK server and it is passed as the 'maxSessionTimeout'. See
-			http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions.
-			For example, if a HBase region server connects to a ZK ensemble that's also managed by HBase, then the
-			session timeout will be the one specified by this configuration. But, a region server that connects
-			to an ensemble managed with a different configuration will be subjected that ensemble's maxSessionTimeout. So,
-			even though HBase might propose using 90 seconds, the ensemble can have a max timeout lower than this and
-			it will take precedence. The current default that ZK ships with is 40 seconds, which is lower than HBase's.
+			It is also used by HBase when it starts a ZK server and it is passed as the 'maxSessionTimeout'.
+			See https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#ch_zkSessions.
+			For example, if an HBase region server connects to a ZK ensemble that's also managed
+			by HBase, then the session timeout will be the one specified by this configuration.
+			But, a region server that connects to an ensemble managed with a different configuration
+			will be subjected that ensemble's maxSessionTimeout. So, even though HBase might propose
+			using 90 seconds, the ensemble can have a max timeout lower than this and it will take
+			precedence. The current default maxSessionTimeout that ZK ships with is 40 seconds, which is lower than
+			HBase's.
 		</description>
 	</property>
 	<property>
@@ -320,8 +347,9 @@ possible configurations would overwhelm and obscure the important.
 		<value>/hbase</value>
 		<description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
 			files that are configured with a relative path will go under this node.
-			By default, all of HBase's ZooKeeper file path are configured with a
-			relative path, so they will all go under this directory unless changed.</description>
+			By default, all of HBase's ZooKeeper file paths are configured with a
+			relative path, so they will all go under this directory unless changed.
+		</description>
 	</property>
 	<property>
 		<name>zookeeper.znode.acl.parent</name>
@@ -342,53 +370,35 @@ possible configurations would overwhelm and obscure the important.
 			master for communication and display purposes.</description>
 	</property>
 	<!--
-	The following three properties are used together to create the list of
-	host:peer_port:leader_port quorum servers for ZooKeeper.
-	-->
+    The following three properties are used together to create the list of
+    host:peer_port:leader_port quorum servers for ZooKeeper.
+    -->
 	<property>
 		<name>hbase.zookeeper.peerport</name>
 		<value>2888</value>
 		<description>Port used by ZooKeeper peers to talk to each other.
-			See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
+			See https://zookeeper.apache.org/doc/r3.3.3/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
 			for more information.</description>
 	</property>
 	<property>
 		<name>hbase.zookeeper.leaderport</name>
 		<value>3888</value>
 		<description>Port used by ZooKeeper for leader election.
-			See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
+			See https://zookeeper.apache.org/doc/r3.3.3/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
 			for more information.</description>
 	</property>
 	<!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-	<property>
-		<name>hbase.zookeeper.useMulti</name>
-		<value>true</value>
-		<description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-			This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-			with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).
-			IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-			and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and
-			will not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).</description>
-	</property>
-	<property>
-		<name>hbase.config.read.zookeeper.config</name>
-		<value>false</value>
-		<description>
-			Set to true to allow HBaseConfiguration to read the
-			zoo.cfg file for ZooKeeper properties. Switching this to true
-			is not recommended, since the functionality of reading ZK
-			properties from a zoo.cfg file has been deprecated.</description>
-	</property>
+
 	<!--
-	Beginning of properties that are directly mapped from ZooKeeper's zoo.cfg.
-	All properties with an "hbase.zookeeper.property." prefix are converted for
-	ZooKeeper's configuration. Hence, if you want to add an option from zoo.cfg,
-	e.g.  "initLimit=10" you would append the following to your configuration:
-	  <property>
-		<name>hbase.zookeeper.property.initLimit</name>
-		<value>10</value>
-	  </property>
-	-->
+    Beginning of properties that are directly mapped from ZooKeeper's zoo.cfg.
+    All properties with an "hbase.zookeeper.property." prefix are converted for
+    ZooKeeper's configuration. Hence, if you want to add an option from zoo.cfg,
+    e.g.  "initLimit=10" you would append the following to your configuration:
+      <property>
+        <name>hbase.zookeeper.property.initLimit</name>
+        <value>10</value>
+      </property>
+    -->
 	<property>
 		<name>hbase.zookeeper.property.initLimit</name>
 		<value>10</value>
@@ -429,7 +439,7 @@ possible configurations would overwhelm and obscure the important.
 	<property>
 		<name>hbase.client.write.buffer</name>
 		<value>2097152</value>
-		<description>Default size of the HTable client write buffer in bytes.
+		<description>Default size of the BufferedMutator write buffer in bytes.
 			A bigger buffer takes more memory -- on both the client and server
 			side since server instantiates the passed write buffer to process
 			it -- but a larger buffer size reduces the number of RPCs made.
@@ -454,7 +464,7 @@ possible configurations would overwhelm and obscure the important.
 	</property>
 	<property>
 		<name>hbase.client.retries.number</name>
-		<value>35</value>
+		<value>15</value>
 		<description>Maximum retries.  Used as maximum for all retryable
 			operations such as the getting of a cell's value, starting a row update,
 			etc.  Retry interval is a rough function based on hbase.client.pause.  At
@@ -470,7 +480,7 @@ possible configurations would overwhelm and obscure the important.
 	</property>
 	<property>
 		<name>hbase.client.max.perserver.tasks</name>
-		<value>5</value>
+		<value>2</value>
 		<description>The maximum number of concurrent mutation tasks a single HTable instance will
 			send to a single region server.</description>
 	</property>
@@ -561,7 +571,7 @@ possible configurations would overwhelm and obscure the important.
 	</property>
 	<property>
 		<name>hbase.normalizer.period</name>
-		<value>1800000</value>
+		<value>300000</value>
 		<description>Period at which the region normalizer runs in the Master.</description>
 	</property>
 	<property>
@@ -581,8 +591,8 @@ possible configurations would overwhelm and obscure the important.
 		<name>hbase.server.versionfile.writeattempts</name>
 		<value>3</value>
 		<description>
-			How many time to retry attempting to write a version file
-			before just aborting. Each attempt is seperated by the
+			How many times to retry attempting to write a version file
+			before just aborting. Each attempt is separated by the
 			hbase.server.thread.wakefrequency milliseconds.</description>
 	</property>
 	<property>
@@ -594,16 +604,17 @@ possible configurations would overwhelm and obscure the important.
 			every hbase.server.thread.wakefrequency.</description>
 	</property>
 	<property>
-		<name>hbase.hregion.percolumnfamilyflush.size.lower.bound</name>
+		<name>hbase.hregion.percolumnfamilyflush.size.lower.bound.min</name>
 		<value>16777216</value>
 		<description>
-			If FlushLargeStoresPolicy is used, then every time that we hit the
-			total memstore limit, we find out all the column families whose memstores
-			exceed this value, and only flush them, while retaining the others whose
-			memstores are lower than this limit. If none of the families have their
-			memstore size more than this, all the memstores will be flushed
-			(just as usual). This value should be less than half of the total memstore
-			threshold (hbase.hregion.memstore.flush.size).
+			If FlushLargeStoresPolicy is used and there are multiple column families,
+			then every time that we hit the total memstore limit, we find out all the
+			column families whose memstores exceed a "lower bound" and only flush them
+			while retaining the others in memory. The "lower bound" will be
+			"hbase.hregion.memstore.flush.size / column_family_number" by default
+			unless value of this property is larger than that. If none of the families
+			have their memstore size more than lower bound, all the memstores will be
+			flushed (just as usual).
 		</description>
 	</property>
 	<property>
@@ -644,77 +655,133 @@ possible configurations would overwhelm and obscure the important.
 		<name>hbase.hregion.max.filesize</name>
 		<value>10737418240</value>
 		<description>
-			Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-			grown to exceed this value, the hosting HRegion is split in two.</description>
+			Maximum HFile size. If the sum of the sizes of a region's HFiles has grown to exceed this
+			value, the region is split in two.</description>
 	</property>
 	<property>
 		<name>hbase.hregion.majorcompaction</name>
 		<value>604800000</value>
-		<description>The time (in miliseconds) between 'major' compactions of all
-			HStoreFiles in a region.  Default: Set to 7 days.  Major compactions tend to
-			happen exactly when you need them least so enable them such that they run at
-			off-peak for your deploy; or, since this setting is on a periodicity that is
-			unlikely to match your loading, run the compactions via an external
-			invocation out of a cron job or some such.</description>
+		<description>Time between major compactions, expressed in milliseconds. Set to 0 to disable
+			time-based automatic major compactions. User-requested and size-based major compactions will
+			still run. This value is multiplied by hbase.hregion.majorcompaction.jitter to cause
+			compaction to start at a somewhat-random time during a given window of time. The default value
+			is 7 days, expressed in milliseconds. If major compactions are causing disruption in your
+			environment, you can configure them to run at off-peak times for your deployment, or disable
+			time-based major compactions by setting this parameter to 0, and run major compactions in a
+			cron job or by another external mechanism.</description>
 	</property>
 	<property>
 		<name>hbase.hregion.majorcompaction.jitter</name>
 		<value>0.50</value>
-		<description>Jitter outer bound for major compactions.
-			On each regionserver, we multiply the hbase.region.majorcompaction
-			interval by some random fraction that is inside the bounds of this
-			maximum.  We then add this + or - product to when the next
-			major compaction is to run.  The idea is that major compaction
-			does happen on every regionserver at exactly the same time.  The
-			smaller this number, the closer the compactions come together.</description>
+		<description>A multiplier applied to hbase.hregion.majorcompaction to cause compaction to occur
+			a given amount of time either side of hbase.hregion.majorcompaction. The smaller the number,
+			the closer the compactions will happen to the hbase.hregion.majorcompaction
+			interval.</description>
 	</property>
 	<property>
 		<name>hbase.hstore.compactionThreshold</name>
 		<value>3</value>
-		<description>
-			If more than this number of HStoreFiles in any one HStore
-			(one HStoreFile is written per flush of memstore) then a compaction
-			is run to rewrite all HStoreFiles files as one.  Larger numbers
-			put off compaction but when it runs, it takes longer to complete.</description>
+		<description> If more than this number of StoreFiles exist in any one Store
+			(one StoreFile is written per flush of MemStore), a compaction is run to rewrite all
+			StoreFiles into a single StoreFile. Larger values delay compaction, but when compaction does
+			occur, it takes longer to complete.</description>
+	</property>
+	<property>
+		<name>hbase.regionserver.compaction.enabled</name>
+		<value>true</value>
+		<description>Enable/disable compactions on by setting true/false.
+			We can further switch compactions dynamically with the
+			compaction_switch shell command.</description>
 	</property>
 	<property>
 		<name>hbase.hstore.flusher.count</name>
 		<value>2</value>
-		<description>
-			The number of flush threads. With less threads, the memstore flushes will be queued. With
-			more threads, the flush will be executed in parallel, increasing the hdfs load. This can
-			lead as well to more compactions.
-		</description>
+		<description> The number of flush threads. With fewer threads, the MemStore flushes will be
+			queued. With more threads, the flushes will be executed in parallel, increasing the load on
+			HDFS, and potentially causing more compactions. </description>
 	</property>
 	<property>
 		<name>hbase.hstore.blockingStoreFiles</name>
-		<value>10</value>
-		<description>
-			If more than this number of StoreFiles in any one Store
-			(one StoreFile is written per flush of MemStore) then updates are
-			blocked for this HRegion until a compaction is completed, or
-			until hbase.hstore.blockingWaitTime has been exceeded.</description>
+		<value>16</value>
+		<description> If more than this number of StoreFiles exist in any one Store (one StoreFile
+			is written per flush of MemStore), updates are blocked for this region until a compaction is
+			completed, or until hbase.hstore.blockingWaitTime has been exceeded.</description>
 	</property>
 	<property>
 		<name>hbase.hstore.blockingWaitTime</name>
 		<value>90000</value>
-		<description>
-			The time an HRegion will block updates for after hitting the StoreFile
-			limit defined by hbase.hstore.blockingStoreFiles.
-			After this time has elapsed, the HRegion will stop blocking updates even
-			if a compaction has not been completed.</description>
+		<description> The time for which a region will block updates after reaching the StoreFile limit
+			defined by hbase.hstore.blockingStoreFiles. After this time has elapsed, the region will stop
+			blocking updates even if a compaction has not been completed.</description>
+	</property>
+	<property>
+		<name>hbase.hstore.compaction.min</name>
+		<value>3</value>
+		<description>The minimum number of StoreFiles which must be eligible for compaction before
+			compaction can run. The goal of tuning hbase.hstore.compaction.min is to avoid ending up with
+			too many tiny StoreFiles to compact. Setting this value to 2 would cause a minor compaction
+			each time you have two StoreFiles in a Store, and this is probably not appropriate. If you
+			set this value too high, all the other values will need to be adjusted accordingly. For most
+			cases, the default value is appropriate. In previous versions of HBase, the parameter
+			hbase.hstore.compaction.min was named hbase.hstore.compactionThreshold.</description>
 	</property>
 	<property>
 		<name>hbase.hstore.compaction.max</name>
 		<value>10</value>
-		<description>Max number of HStoreFiles to compact per 'minor' compaction.</description>
+		<description>The maximum number of StoreFiles which will be selected for a single minor
+			compaction, regardless of the number of eligible StoreFiles. Effectively, the value of
+			hbase.hstore.compaction.max controls the length of time it takes a single compaction to
+			complete. Setting it larger means that more StoreFiles are included in a compaction. For most
+			cases, the default value is appropriate.</description>
 	</property>
 	<property>
-		<name>hbase.hstore.compaction.kv.max</name>
-		<value>10</value>
-		<description>How many KeyValues to read and then write in a batch when flushing
-			or compacting.  Do less if big KeyValues and problems with OOME.
-			Do more if wide, small rows.</description>
+		<name>hbase.hstore.compaction.min.size</name>
+		<value>134217728</value>
+		<description>A StoreFile (or a selection of StoreFiles, when using ExploringCompactionPolicy)
+			smaller than this size will always be eligible for minor compaction.
+			HFiles this size or larger are evaluated by hbase.hstore.compaction.ratio to determine if
+			they are eligible. Because this limit represents the "automatic include" limit for all
+			StoreFiles smaller than this value, this value may need to be reduced in write-heavy
+			environments where many StoreFiles in the 1-2 MB range are being flushed, because every
+			StoreFile will be targeted for compaction and the resulting StoreFiles may still be under the
+			minimum size and require further compaction. If this parameter is lowered, the ratio check is
+			triggered more quickly. This addressed some issues seen in earlier versions of HBase but
+			changing this parameter is no longer necessary in most situations. Default: 128 MB expressed
+			in bytes.</description>
+	</property>
+	<property>
+		<name>hbase.hstore.compaction.max.size</name>
+		<value>9223372036854775807</value>
+		<description>A StoreFile (or a selection of StoreFiles, when using ExploringCompactionPolicy)
+			larger than this size will be excluded from compaction. The effect of
+			raising hbase.hstore.compaction.max.size is fewer, larger StoreFiles that do not get
+			compacted often. If you feel that compaction is happening too often without much benefit, you
+			can try raising this value. Default: the value of LONG.MAX_VALUE, expressed in bytes.</description>
+	</property>
+	<property>
+		<name>hbase.hstore.compaction.ratio</name>
+		<value>1.2F</value>
+		<description>For minor compaction, this ratio is used to determine whether a given StoreFile
+			which is larger than hbase.hstore.compaction.min.size is eligible for compaction. Its
+			effect is to limit compaction of large StoreFiles. The value of hbase.hstore.compaction.ratio
+			is expressed as a floating-point decimal. A large ratio, such as 10, will produce a single
+			giant StoreFile. Conversely, a low value, such as .25, will produce behavior similar to the
+			BigTable compaction algorithm, producing four StoreFiles. A moderate value of between 1.0 and
+			1.4 is recommended. When tuning this value, you are balancing write costs with read costs.
+			Raising the value (to something like 1.4) will have more write costs, because you will
+			compact larger StoreFiles. However, during reads, HBase will need to seek through fewer
+			StoreFiles to accomplish the read. Consider this approach if you cannot take advantage of
+			Bloom filters. Otherwise, you can lower this value to something like 1.0 to reduce the
+			background cost of writes, and use Bloom filters to control the number of StoreFiles touched
+			during reads. For most cases, the default value is appropriate.</description>
+	</property>
+	<property>
+		<name>hbase.hstore.compaction.ratio.offpeak</name>
+		<value>5.0F</value>
+		<description>Allows you to set a different (by default, more aggressive) ratio for determining
+			whether larger StoreFiles are included in compactions during off-peak hours. Works in the
+			same way as hbase.hstore.compaction.ratio. Only applies if hbase.offpeak.start.hour and
+			hbase.offpeak.end.hour are also enabled.</description>
 	</property>
 	<property>
 		<name>hbase.hstore.time.to.purge.deletes</name>
@@ -726,6 +793,29 @@ possible configurations would overwhelm and obscure the important.
 		</description>
 	</property>
 	<property>
+		<name>hbase.offpeak.start.hour</name>
+		<value>-1</value>
+		<description>The start of off-peak hours, expressed as an integer between 0 and 23, inclusive.
+			Set to -1 to disable off-peak.</description>
+	</property>
+	<property>
+		<name>hbase.offpeak.end.hour</name>
+		<value>-1</value>
+		<description>The end of off-peak hours, expressed as an integer between 0 and 23, inclusive. Set
+			to -1 to disable off-peak.</description>
+	</property>
+	<property>
+		<name>hbase.regionserver.thread.compaction.throttle</name>
+		<value>2684354560</value>
+		<description>There are two different thread pools for compactions, one for large compactions and
+			the other for small compactions. This helps to keep compaction of lean tables (such as
+			hbase:meta) fast. If a compaction is larger than this threshold, it
+			goes into the large compaction pool. In most cases, the default value is appropriate. Default:
+			2 x hbase.hstore.compaction.max x hbase.hregion.memstore.flush.size (which defaults to 128MB).
+			The value field assumes that the value of hbase.hregion.memstore.flush.size is unchanged from
+			the default.</description>
+	</property>
+	<property>
 		<name>hbase.regionserver.majorcompaction.pagecache.drop</name>
 		<value>true</value>
 		<description>Specifies whether to drop pages read/written into the system page cache by
@@ -744,6 +834,13 @@ possible configurations would overwhelm and obscure the important.
 			on the most recently written data.</description>
 	</property>
 	<property>
+		<name>hbase.hstore.compaction.kv.max</name>
+		<value>10</value>
+		<description>The maximum number of KeyValues to read and then write in a batch when flushing or
+			compacting. Set this lower if you have big KeyValues and problems with Out Of Memory
+			Exceptions Set this higher if you have wide, small rows. </description>
+	</property>
+	<property>
 		<name>hbase.storescanner.parallel.seek.enable</name>
 		<value>false</value>
 		<description>
@@ -760,7 +857,7 @@ possible configurations would overwhelm and obscure the important.
 		<name>hfile.block.cache.size</name>
 		<value>0.4</value>
 		<description>Percentage of maximum heap (-Xmx setting) to allocate to block cache
-			used by HFile/StoreFile. Default of 0.4 means allocate 40%.
+			used by a StoreFile. Default of 0.4 means allocate 40%.
 			Set to 0 to disable but it's not recommended; you need at least
 			enough cache to hold the storefile indices.</description>
 	</property>
@@ -780,19 +877,13 @@ possible configurations would overwhelm and obscure the important.
 	<property>
 		<name>hbase.bucketcache.ioengine</name>
 		<value></value>
-		<description>Where to store the contents of the bucketcache. One of: heap,
-			offheap, or file. If a file, set it to file:PATH_TO_FILE. See
-			http://hbase.apache.org/book.html#offheap.blockcache for more information.
+		<description>Where to store the contents of the bucketcache. One of: offheap,
+			file, files or mmap. If a file or files, set it to file(s):PATH_TO_FILE.
+			mmap means the content will be in an mmaped file. Use mmap:PATH_TO_FILE.
+			See http://hbase.apache.org/book.html#offheap.blockcache for more information.
 		</description>
 	</property>
 	<property>
-		<name>hbase.bucketcache.combinedcache.enabled</name>
-		<value>true</value>
-		<description>Whether or not the bucketcache is used in league with the LRU
-			on-heap block cache. In this mode, indices and blooms are kept in the LRU
-			blockcache and the data blocks are kept in the bucketcache.</description>
-	</property>
-	<property>
 		<name>hbase.bucketcache.size</name>
 		<value></value>
 		<description>A float that EITHER represents a percentage of total heap memory
@@ -816,8 +907,7 @@ possible configurations would overwhelm and obscure the important.
 		<value>3</value>
 		<description>The HFile format version to use for new files.
 			Version 3 adds support for tags in hfiles (See http://hbase.apache.org/book.html#hbase.tags).
-			Distributed Log Replay requires that tags are enabled. Also see the configuration
-			'hbase.replication.rpc.codec'.
+			Also see the configuration 'hbase.replication.rpc.codec'.
 		</description>
 	</property>
 	<property>
@@ -898,8 +988,8 @@ possible configurations would overwhelm and obscure the important.
 			for more details.</description>
 	</property>
 	<!-- The following properties configure authentication information for
-		 HBase processes when using Kerberos security.  There are no default
-		 values, included here for documentation purposes -->
+         HBase processes when using Kerberos security.  There are no default
+         values, included here for documentation purposes -->
 	<property>
 		<name>hbase.master.keytab.file</name>
 		<value></value>
@@ -980,6 +1070,13 @@ possible configurations would overwhelm and obscure the important.
 			MUST BE DISABLED for secure operation.</description>
 	</property>
 	<property>
+		<name>hbase.display.keys</name>
+		<value>true</value>
+		<description>When this is set to true the webUI and such will display all start/end keys
+			as part of the table details, region names, etc. When this is set to false,
+			the keys are hidden.</description>
+	</property>
+	<property>
 		<name>hbase.coprocessor.enabled</name>
 		<value>true</value>
 		<description>Enables or disables coprocessor loading. If 'false'
@@ -1005,6 +1102,26 @@ possible configurations would overwhelm and obscure the important.
 			A coprocessor can also be loaded on demand by setting HTableDescriptor.</description>
 	</property>
 	<property>
+		<name>hbase.coprocessor.master.classes</name>
+		<value></value>
+		<description>A comma-separated list of
+			org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+			loaded by default on the active HMaster process. For any implemented
+			coprocessor methods, the listed classes will be called in order. After
+			implementing your own MasterObserver, just put it in HBase's classpath
+			and add the fully qualified class name here.</description>
+	</property>
+	<property>
+		<name>hbase.coprocessor.abortonerror</name>
+		<value>true</value>
+		<description>Set to true to cause the hosting server (master or regionserver)
+			to abort if a coprocessor fails to load, fails to initialize, or throws an
+			unexpected Throwable object. Setting this to false will allow the server to
+			continue execution but the system wide state of the coprocessor in question
+			will become inconsistent as it will be properly executing in only a subset
+			of servers, so this is most useful for debugging only.</description>
+	</property>
+	<property>
 		<name>hbase.rest.port</name>
 		<value>8080</value>
 		<description>The port for the HBase REST server.</description>
@@ -1038,33 +1155,25 @@ possible configurations would overwhelm and obscure the important.
 		<value>false</value>
 		<description>Enables running the REST server to support proxy-user mode.</description>
 	</property>
+	<property skipInDoc="true">
+		<name>hbase.defaults.for.version</name>
+		<value>2.2.3</value>
+		<description>This defaults file was compiled for version ${project.version}. This variable is used
+			to make sure that a user doesn't have an old version of hbase-default.xml on the
+			classpath.</description>
+	</property>
 	<property>
 		<name>hbase.defaults.for.version.skip</name>
-		<value>true</value>
+		<value>false</value>
 		<description>Set to true to skip the 'hbase.defaults.for.version' check.
 			Setting this to true can be useful in contexts other than
 			the other side of a maven generation; i.e. running in an
-			ide.  You'll want to set this boolean to true to avoid
-			seeing the RuntimException complaint: "hbase-default.xml file
+			IDE.  You'll want to set this boolean to true to avoid
+			seeing the RuntimeException complaint: "hbase-default.xml file
 			seems to be for and old version of HBase (\${hbase.version}), this
 			version is X.X.X-SNAPSHOT"</description>
 	</property>
 	<property>
-		<name>hbase.coprocessor.abortonerror</name>
-		<value>true</value>
-		<description>Set to true to cause the hosting server (master or regionserver)
-			to abort if a coprocessor fails to load, fails to initialize, or throws an
-			unexpected Throwable object. Setting this to false will allow the server to
-			continue execution but the system wide state of the coprocessor in question
-			will become inconsistent as it will be properly executing in only a subset
-			of servers, so this is most useful for debugging only.</description>
-	</property>
-	<property>
-		<name>hbase.online.schema.update.enable</name>
-		<value>true</value>
-		<description>Set true to enable online schema changes.</description>
-	</property>
-	<property>
 		<name>hbase.table.lock.enable</name>
 		<value>true</value>
 		<description>Set to true to enable locking the table in zookeeper for schema change operations.
@@ -1113,7 +1222,7 @@ possible configurations would overwhelm and obscure the important.
 	<property>
 		<name>hbase.regionserver.thrift.framed.max_frame_size_in_mb</name>
 		<value>2</value>
-		<description>Default frame size when using framed transport</description>
+		<description>Default frame size when using framed transport, in MB</description>
 	</property>
 	<property>
 		<name>hbase.regionserver.thrift.compact</name>
@@ -1123,7 +1232,7 @@ possible configurations would overwhelm and obscure the important.
 	<property>
 		<name>hbase.rootdir.perms</name>
 		<value>700</value>
-		<description>FS Permissions for the root directory in a secure(kerberos) setup.
+		<description>FS Permissions for the root data subdirectory in a secure (kerberos) setup.
 			When master starts, it creates the rootdir with this permissions or sets the permissions
 			if it does not match.</description>
 	</property>
@@ -1166,6 +1275,15 @@ possible configurations would overwhelm and obscure the important.
 			to create a name based on what you are restoring.</description>
 	</property>
 	<property>
+		<name>hbase.snapshot.working.dir</name>
+		<value></value>
+		<description>Location where the snapshotting process will occur. The location of the
+			completed snapshots will not change, but the temporary directory where the snapshot
+			process occurs will be set to this location. This can be a separate filesystem than
+			the root directory, for performance increase purposes. See HBASE-21098 for more
+			information</description>
+	</property>
+	<property>
 		<name>hbase.server.compactchecker.interval.multiplier</name>
 		<value>1000</value>
 		<description>The number that determines how often we scan to see if compaction is necessary.
@@ -1196,6 +1314,26 @@ possible configurations would overwhelm and obscure the important.
 			to keep.</description>
 	</property>
 	<property>
+		<name>dfs.client.read.shortcircuit</name>
+		<value>false</value>
+		<description>
+			If set to true, this configuration parameter enables short-circuit local
+			reads.
+		</description>
+	</property>
+	<property>
+		<name>dfs.domain.socket.path</name>
+		<value>none</value>
+		<description>
+			This is a path to a UNIX domain socket that will be used for
+			communication between the DataNode and local HDFS clients, if
+			dfs.client.read.shortcircuit is set to true. If the string "_PORT" is
+			present in this path, it will be replaced by the TCP port of the DataNode.
+			Be careful about permissions for the directory that hosts the shared
+			domain socket; dfsclient will complain if open to other users than the HBase user.
+		</description>
+	</property>
+	<property>
 		<name>hbase.dfs.client.read.shortcircuit.buffer.size</name>
 		<value>131072</value>
 		<description>If the DFSClient configuration
@@ -1241,7 +1379,6 @@ possible configurations would overwhelm and obscure the important.
 			are NULL, CRC32, CRC32C.
 		</description>
 	</property>
-
 	<property>
 		<name>hbase.client.scanner.max.result.size</name>
 		<value>2097152</value>
@@ -1251,7 +1388,6 @@ possible configurations would overwhelm and obscure the important.
 			With faster and/or high latency networks this value should be increased.
 		</description>
 	</property>
-
 	<property>
 		<name>hbase.server.scanner.max.result.size</name>
 		<value>104857600</value>
@@ -1261,7 +1397,6 @@ possible configurations would overwhelm and obscure the important.
 			This is a safety setting to protect the server from OOM situations.
 		</description>
 	</property>
-
 	<property>
 		<name>hbase.status.published</name>
 		<value>false</value>
@@ -1286,15 +1421,16 @@ possible configurations would overwhelm and obscure the important.
 			Multicast port to use for the status publication by multicast.
 		</description>
 	</property>
-
 	<property>
 		<name>hbase.dynamic.jars.dir</name>
 		<value>${hbase.rootdir}/lib</value>
 		<description>
-			The directory from which the custom filter/co-processor jars can be loaded
+			The directory from which the custom filter JARs can be loaded
 			dynamically by the region server without the need to restart. However,
 			an already loaded filter/co-processor class would not be un-loaded. See
 			HBASE-1936 for more details.
+
+			Does not apply to coprocessors.
 		</description>
 	</property>
 	<property>
@@ -1306,6 +1442,13 @@ possible configurations would overwhelm and obscure the important.
 		</description>
 	</property>
 	<property>
+		<name>hbase.master.loadbalance.bytable</name>
+		<value>false</value>
+		<description>Factor Table name when the balancer runs.
+			Default: false.
+		</description>
+	</property>
+	<property>
 		<name>hbase.rest.csrf.enabled</name>
 		<value>false</value>
 		<description>
@@ -1347,6 +1490,27 @@ possible configurations would overwhelm and obscure the important.
 		</description>
 	</property>
 	<property>
+		<name>hbase.procedure.regionserver.classes</name>
+		<value></value>
+		<description>A comma-separated list of
+			org.apache.hadoop.hbase.procedure.RegionServerProcedureManager procedure managers that are
+			loaded by default on the active HRegionServer process. The lifecycle methods (init/start/stop)
+			will be called by the active HRegionServer process to perform the specific globally barriered
+			procedure. After implementing your own RegionServerProcedureManager, just put it in
+			HBase's classpath and add the fully qualified class name here.
+		</description>
+	</property>
+	<property>
+		<name>hbase.procedure.master.classes</name>
+		<value></value>
+		<description>A comma-separated list of
+			org.apache.hadoop.hbase.procedure.MasterProcedureManager procedure managers that are
+			loaded by default on the active HMaster process. A procedure is identified by its signature and
+			users can use the signature and an instant name to trigger an execution of a globally barriered
+			procedure. After implementing your own MasterProcedureManager, just put it in HBase's classpath
+			and add the fully qualified class name here.</description>
+	</property>
+	<property>
 		<name>hbase.regionserver.storefile.refresh.period</name>
 		<value>0</value>
 		<description>
@@ -1365,25 +1529,23 @@ possible configurations would overwhelm and obscure the important.
 		<description>
 			Whether asynchronous WAL replication to the secondary region replicas is enabled or not.
 			If this is enabled, a replication peer named "region_replica_replication" will be created
-			which will tail the logs and replicate the mutatations to region replicas for tables that
+			which will tail the logs and replicate the mutations to region replicas for tables that
 			have region replication > 1. If this is enabled once, disabling this replication also
-			requires disabling the replication peer using shell or ReplicationAdmin java class.
+			requires disabling the replication peer using shell or Admin java class.
 			Replication to secondary region replicas works over standard inter-cluster replication.
-			So replication, if disabled explicitly, also has to be enabled by setting "hbase.replication"
-			to true for this feature to work.
 		</description>
 	</property>
 	<property>
 		<name>hbase.security.visibility.mutations.checkauths</name>
 		<value>false</value>
 		<description>
-			This property if enabled, will check whether the labels in the visibility expression are associated
-			with the user issuing the mutation
+			This property if enabled, will check whether the labels in the visibility
+			expression are associated with the user issuing the mutation
 		</description>
 	</property>
 	<property>
 		<name>hbase.http.max.threads</name>
-		<value>10</value>
+		<value>16</value>
 		<description>
 			The maximum number of threads that the HTTP Server will create in its
 			ThreadPool.
@@ -1395,20 +1557,20 @@ possible configurations would overwhelm and obscure the important.
 		<description>
 			The maximum number of threads any replication source will use for
 			shipping edits to the sinks in parallel. This also limits the number of
-			chunks each replication batch is broken into.
-			Larger values can improve the replication throughput between the master and
-			slave clusters. The default of 10 will rarely need to be changed.
+			chunks each replication batch is broken into. Larger values can improve
+			the replication throughput between the master and slave clusters. The
+			default of 10 will rarely need to be changed.
 		</description>
 	</property>
 	<!-- Static Web User Filter properties. -->
 	<property>
+		<name>hbase.http.staticuser.user</name>
+		<value>dr.stack</value>
 		<description>
 			The user name to filter as, on static web filters
 			while rendering content. An example use is the HDFS
 			web UI (user to be used for browsing files).
 		</description>
-		<name>hbase.http.staticuser.user</name>
-		<value>dr.stack</value>
 	</property>
 	<property>
 		<name>hbase.regionserver.handler.abort.on.error.percent</name>
@@ -1418,18 +1580,102 @@ possible configurations would overwhelm and obscure the important.
 			0.x Abort only when this percent of handlers have died;
 			1 Abort only all of the handers have died.</description>
 	</property>
+	<!-- Mob properties. -->
+	<property>
+		<name>hbase.mob.file.cache.size</name>
+		<value>1000</value>
+		<description>
+			Number of opened file handlers to cache.
+			A larger value will benefit reads by providing more file handlers per mob
+			file cache and would reduce frequent file opening and closing.
+			However, if this is set too high, this could lead to a "too many opened file handlers"
+			The default value is 1000.
+		</description>
+	</property>
+	<property>
+		<name>hbase.mob.cache.evict.period</name>
+		<value>3600</value>
+		<description>
+			The amount of time in seconds before the mob cache evicts cached mob files.
+			The default value is 3600 seconds.
+		</description>
+	</property>
+	<property>
+		<name>hbase.mob.cache.evict.remain.ratio</name>
+		<value>0.5f</value>
+		<description>
+			The ratio (between 0.0 and 1.0) of files that remains cached after an eviction
+			is triggered when the number of cached mob files exceeds the hbase.mob.file.cache.size.
+			The default value is 0.5f.
+		</description>
+	</property>
+	<property>
+		<name>hbase.master.mob.ttl.cleaner.period</name>
+		<value>86400</value>
+		<description>
+			The period that ExpiredMobFileCleanerChore runs. The unit is second.
+			The default value is one day. The MOB file name uses only the date part of
+			the file creation time in it. We use this time for deciding TTL expiry of
+			the files. So the removal of TTL expired files might be delayed. The max
+			delay might be 24 hrs.
+		</description>
+	</property>
+	<property>
+		<name>hbase.mob.compaction.mergeable.threshold</name>
+		<value>1342177280</value>
+		<description>
+			If the size of a mob file is less than this value, it's regarded as a small
+			file and needs to be merged in mob compaction. The default value is 1280MB.
+		</description>
+	</property>
+	<property>
+		<name>hbase.mob.delfile.max.count</name>
+		<value>3</value>
+		<description>
+			The max number of del files that is allowed in the mob compaction.
+			In the mob compaction, when the number of existing del files is larger than
+			this value, they are merged until number of del files is not larger this value.
+			The default value is 3.
+		</description>
+	</property>
+	<property>
+		<name>hbase.mob.compaction.batch.size</name>
+		<value>100</value>
+		<description>
+			The max number of the mob files that is allowed in a batch of the mob compaction.
+			The mob compaction merges the small mob files to bigger ones. If the number of the
+			small files is very large, it could lead to a "too many opened file handlers" in the merge.
+			And the merge has to be split into batches. This value limits the number of mob files
+			that are selected in a batch of the mob compaction. The default value is 100.
+		</description>
+	</property>
+	<property>
+		<name>hbase.mob.compaction.chore.period</name>
+		<value>604800</value>
+		<description>
+			The period that MobCompactionChore runs. The unit is second.
+			The default value is one week.
+		</description>
+	</property>
+	<property>
+		<name>hbase.mob.compaction.threads.max</name>
+		<value>1</value>
+		<description>
+			The max number of threads used in MobCompactor.
+		</description>
+	</property>
 	<property>
 		<name>hbase.snapshot.master.timeout.millis</name>
 		<value>300000</value>
 		<description>
-			Timeout for master for the snapshot procedure execution
+			Timeout for master for the snapshot procedure execution.
 		</description>
 	</property>
 	<property>
 		<name>hbase.snapshot.region.timeout</name>
 		<value>300000</value>
 		<description>
-			Timeout for regionservers to keep threads in snapshot request pool waiting
+			Timeout for regionservers to keep threads in snapshot request pool waiting.
 		</description>
 	</property>
 	<property>
@@ -1439,20 +1685,25 @@ possible configurations would overwhelm and obscure the important.
 			Number of rows in a batch operation above which a warning will be logged.
 		</description>
 	</property>
+	<property>
+		<name>hbase.master.wait.on.service.seconds</name>
+		<value>30</value>
+		<description>Default is 5 minutes. Make it 30 seconds for tests. See
+			HBASE-19794 for some context.</description>
+	</property>
 
 	<!--NOTE: HBase client try to load the class that configured in hbase-default.xml. -->
 	<!--But actually all these classes were already shaded and can't be loaded by those default name, -->
 	<!--so the following classes are Flink shaded classes.-->
 	<property>
 		<name>hbase.master.logcleaner.plugins</name>
-		<value>org.apache.flink.hbase.shaded.org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner</value>
+		<value>org.apache.flink.hbase.shaded.org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner,org.apache.flink.hbase.shaded.org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner</value>
 		<description>A comma-separated list of BaseLogCleanerDelegate invoked by
 			the LogsCleaner service. These WAL cleaners are called in order,
 			so put the cleaner that prunes the most files in front. To
 			implement your own BaseLogCleanerDelegate, just put it in HBase's classpath
 			and add the fully qualified class name here. Always add the above
-			default log cleaners in the list.
-		</description>
+			default log cleaners in the list.</description>
 	</property>
 	<property>
 		<name>hbase.master.hfilecleaner.plugins</name>
@@ -1463,8 +1714,7 @@ possible configurations would overwhelm and obscure the important.
 			implement your own BaseHFileCleanerDelegate, just put it in HBase's classpath
 			and add the fully qualified class name here. Always add the above
 			default log cleaners in the list as they will be overwritten in
-			hbase-site.xml.
-		</description>
+			hbase-site.xml.</description>
 	</property>
 	<property>
 		<name>hbase.regionserver.hlog.reader.impl</name>
@@ -1478,11 +1728,13 @@ possible configurations would overwhelm and obscure the important.
 	</property>
 	<property>
 		<name>hbase.regionserver.region.split.policy</name>
-		<value>org.apache.flink.hbase.shaded.org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy</value>
+		<value>org.apache.flink.hbase.shaded.org.apache.hadoop.hbase.regionserver.SteppingSplitPolicy</value>
 		<description>
-			A split policy determines when a region should be split. The various other split policies that
-			are available currently are BusyRegionSplitPolicy, ConstantSizeRegionSplitPolicy,
-			DisabledRegionSplitPolicy, DelimitedKeyPrefixRegionSplitPolicy, KeyPrefixRegionSplitPolicy etc.
+			A split policy determines when a region should be split. The various
+			other split policies that are available currently are BusyRegionSplitPolicy,
+			ConstantSizeRegionSplitPolicy, DisabledRegionSplitPolicy,
+			DelimitedKeyPrefixRegionSplitPolicy, KeyPrefixRegionSplitPolicy, and
+			SteppingSplitPolicy. DisabledRegionSplitPolicy blocks manual region splitting.
 		</description>
 	</property>
 	<property>
@@ -1541,7 +1793,7 @@ possible configurations would overwhelm and obscure the important.
 		<description>
 			The codec that is to be used when replication is enabled so that
 			the tags are also replicated. This is used along with HFileV3 which
-			supports tags in them. If tags are not used or if the hfile version used
+			supports tags in them.  If tags are not used or if the hfile version used
 			is HFileV2 then KeyValueCodec can be used as the replication codec. Note that
 			using KeyValueCodecWithTags for replication when there are no tags causes no harm.
 		</description>
@@ -1555,4 +1807,11 @@ possible configurations would overwhelm and obscure the important.
 			http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.html
 		</description>
 	</property>
-</configuration>
+	<property>
+		<name>hbase.mob.compactor.class</name>
+		<value>org.apache.flink.hbase.shaded.org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactor</value>
+		<description>
+			Implementation of mob compactor, the default one is PartitionedMobCompactor.
+		</description>
+	</property>
+</configuration>
\ No newline at end of file
diff --git a/flink-connectors/pom.xml b/flink-connectors/pom.xml
index 9401351..1a97972 100644
--- a/flink-connectors/pom.xml
+++ b/flink-connectors/pom.xml
@@ -43,7 +43,9 @@ under the License.
 		<module>flink-connector-elasticsearch5</module>
 		<module>flink-connector-elasticsearch6</module>
 		<module>flink-connector-elasticsearch7</module>
-		<module>flink-connector-hbase</module>
+		<module>flink-connector-hbase-base</module>
+		<module>flink-connector-hbase-1.4</module>
+		<module>flink-connector-hbase-2.2</module>
 		<module>flink-connector-hive</module>
 		<module>flink-connector-jdbc</module>
 		<module>flink-connector-rabbitmq</module>
@@ -94,7 +96,8 @@ under the License.
 			<modules>
 				<module>flink-sql-connector-elasticsearch6</module>
 				<module>flink-sql-connector-elasticsearch7</module>
-				<module>flink-sql-connector-hbase</module>
+				<module>flink-sql-connector-hbase-1.4</module>
+				<module>flink-sql-connector-hbase-2.2</module>
 				<module>flink-sql-connector-hive-1.2.2</module>
 				<module>flink-sql-connector-hive-2.2.0</module>
 				<module>flink-sql-connector-hive-2.3.6</module>
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/pom.xml b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/pom.xml
index c280259..4875e6f 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/pom.xml
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/pom.xml
@@ -49,7 +49,13 @@ under the License.
 		<!--using hbase shade jar to execute end-to-end test-->
 		<dependency>
 			<groupId>org.apache.flink</groupId>
-			<artifactId>flink-sql-connector-hbase_${scala.binary.version}</artifactId>
+			<artifactId>flink-sql-connector-hbase-1.4_${scala.binary.version}</artifactId>
+			<version>${project.version}</version>
+			<scope>test</scope>
+		</dependency>
+		<dependency>
+			<groupId>org.apache.flink</groupId>
+			<artifactId>flink-sql-connector-hbase-2.2_${scala.binary.version}</artifactId>
 			<version>${project.version}</version>
 			<scope>test</scope>
 		</dependency>
@@ -186,9 +192,17 @@ under the License.
 								</artifactItem>
 								<artifactItem>
 									<groupId>org.apache.flink</groupId>
-									<artifactId>flink-sql-connector-hbase_${scala.binary.version}</artifactId>
+									<artifactId>flink-sql-connector-hbase-1.4_${scala.binary.version}</artifactId>
+									<version>${project.version}</version>
+									<destFileName>sql-hbase-1.4.jar</destFileName>
+									<type>jar</type>
+									<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
+								</artifactItem>
+								<artifactItem>
+									<groupId>org.apache.flink</groupId>
+									<artifactId>flink-sql-connector-hbase-2.2_${scala.binary.version}</artifactId>
 									<version>${project.version}</version>
-									<destFileName>sql-hbase.jar</destFileName>
+									<destFileName>sql-hbase-2.2.jar</destFileName>
 									<type>jar</type>
 									<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
 								</artifactItem>
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/HBaseResource.java b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/HBaseResource.java
index 4b9e091..83bf249 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/HBaseResource.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/HBaseResource.java
@@ -61,12 +61,13 @@ public interface HBaseResource extends ExternalResource {
 	/**
 	 * Returns the configured HBaseResource implementation, or a {@link LocalStandaloneHBaseResource} if none is configured.
 	 *
+	 * @param version The hbase version
 	 * @return configured HbaseResource, or {@link LocalStandaloneHBaseResource} if none is configured
 	 */
-	static HBaseResource get() {
+	static HBaseResource get(String version) {
 		return FactoryUtils.loadAndInvokeFactory(
 			HBaseResourceFactory.class,
-			HBaseResourceFactory::create,
+			factory -> factory.create(version),
 			LocalStandaloneHBaseResourceFactory::new);
 	}
 }
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/HBaseResourceFactory.java b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/HBaseResourceFactory.java
index bd873fa..27b3864 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/HBaseResourceFactory.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/HBaseResourceFactory.java
@@ -30,8 +30,9 @@ public interface HBaseResourceFactory {
 	 * Returns a {@link HBaseResource} instance. If the instance could not be instantiated (for example, because a
 	 * mandatory parameter was missing), then an empty {@link Optional} should be returned.
 	 *
+	 * @param version The hbase version
 	 * @return HBaseResource instance
 	 * @throws Exception if the instance could not be instantiated
 	 */
-	HBaseResource create() throws Exception;
+	HBaseResource create(String version) throws Exception;
 }
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/LocalStandaloneHBaseResource.java b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/LocalStandaloneHBaseResource.java
index c1b6b7b..2478432 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/LocalStandaloneHBaseResource.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/LocalStandaloneHBaseResource.java
@@ -49,16 +49,18 @@ public class LocalStandaloneHBaseResource implements HBaseResource {
 	private final TemporaryFolder tmp = new TemporaryFolder();
 
 	private final DownloadCache downloadCache = DownloadCache.get();
+	private final String hbaseVersion;
 	private Path hbaseDir;
 
-	LocalStandaloneHBaseResource() {
+	LocalStandaloneHBaseResource(String hbaseVersion) {
 		OperatingSystemRestriction.forbid(
 			String.format("The %s relies on UNIX utils and shell scripts.", getClass().getSimpleName()),
 			OperatingSystem.WINDOWS);
+		this.hbaseVersion = hbaseVersion;
 	}
 
-	private static String getHBaseDownloadUrl() {
-		return "https://archive.apache.org/dist/hbase/1.4.3/hbase-1.4.3-bin.tar.gz";
+	private String getHBaseDownloadUrl() {
+		return String.format("https://archive.apache.org/dist/hbase/%1$s/hbase-%1$s-bin.tar.gz", hbaseVersion);
 	}
 
 	@Override
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/LocalStandaloneHBaseResourceFactory.java b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/LocalStandaloneHBaseResourceFactory.java
index 9eb1ab9..2e57f0f 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/LocalStandaloneHBaseResourceFactory.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/main/java/org/apache/flink/tests/util/hbase/LocalStandaloneHBaseResourceFactory.java
@@ -24,7 +24,7 @@ package org.apache.flink.tests.util.hbase;
 public class LocalStandaloneHBaseResourceFactory implements HBaseResourceFactory {
 
 	@Override
-	public HBaseResource create() {
-		return new LocalStandaloneHBaseResource();
+	public HBaseResource create(String version) {
+		return new LocalStandaloneHBaseResource(version);
 	}
 }
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/test/java/org/apache/flink/tests/util/hbase/SQLClientHBaseITCase.java b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/test/java/org/apache/flink/tests/util/hbase/SQLClientHBaseITCase.java
index ad5f4f7..ca603f4 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/test/java/org/apache/flink/tests/util/hbase/SQLClientHBaseITCase.java
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/test/java/org/apache/flink/tests/util/hbase/SQLClientHBaseITCase.java
@@ -40,6 +40,8 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TemporaryFolder;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -51,8 +53,12 @@ import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.time.Duration;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.stream.Collectors;
 
 import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
@@ -62,6 +68,7 @@ import static org.junit.Assert.assertThat;
 /**
  * End-to-end test for the HBase connectors.
  */
+@RunWith(Parameterized.class)
 @Category(value = {TravisGroup1.class, PreCommit.class, FailsOnJava11.class})
 public class SQLClientHBaseITCase extends TestLogger {
 
@@ -69,6 +76,14 @@ public class SQLClientHBaseITCase extends TestLogger {
 
 	private static final String HBASE_E2E_SQL = "hbase_e2e.sql";
 
+	@Parameterized.Parameters(name = "{index}: hbase-version:{0}")
+	public static Collection<Object[]> data() {
+		return Arrays.asList(new Object[][]{
+				{"1.4.3", "hbase-1.4"},
+				{"2.2.3", "hbase-2.2"}
+		});
+	}
+
 	@Rule
 	public final HBaseResource hbase;
 
@@ -79,16 +94,20 @@ public class SQLClientHBaseITCase extends TestLogger {
 	@Rule
 	public final TemporaryFolder tmp = new TemporaryFolder();
 
+	private final String hbaseConnector;
+	private final Path sqlConnectorHBaseJar;
+
 	@ClassRule
 	public static final DownloadCache DOWNLOAD_CACHE = DownloadCache.get();
 
 	private static final Path sqlToolBoxJar = TestUtils.getResource(".*SqlToolbox.jar");
-	private static final Path sqlConnectorHBaseJar = TestUtils.getResource(".*hbase.jar");
 	private static final Path hadoopClasspath = TestUtils.getResource(".*hadoop.classpath");
 	private List<Path> hadoopClasspathJars;
 
-	public SQLClientHBaseITCase() {
-		this.hbase = HBaseResource.get();
+	public SQLClientHBaseITCase(String hbaseVersion, String hbaseConnector) {
+		this.hbase = HBaseResource.get(hbaseVersion);
+		this.hbaseConnector = hbaseConnector;
+		this.sqlConnectorHBaseJar = TestUtils.getResource(".*sql-" + hbaseConnector + ".jar");
 	}
 
 	@Before
@@ -125,7 +144,9 @@ public class SQLClientHBaseITCase extends TestLogger {
 			hbase.putData("source", "row2", "family2", "f2c2", "v6");
 
 			// Initialize the SQL statements from "hbase_e2e.sql" file
-			List<String> sqlLines = initializeSqlLines();
+			Map<String, String> varsMap = new HashMap<>();
+			varsMap.put("$HBASE_CONNECTOR", hbaseConnector);
+			List<String> sqlLines = initializeSqlLines(varsMap);
 
 			// Execute SQL statements in "hbase_e2e.sql" file
 			executeSqlStatements(clusterController, sqlLines);
@@ -172,12 +193,21 @@ public class SQLClientHBaseITCase extends TestLogger {
 		Assert.assertTrue("Did not get expected results before timeout.", success);
 	}
 
-	private List<String> initializeSqlLines() throws IOException {
+	private List<String> initializeSqlLines(Map<String, String> vars) throws IOException {
 		URL url = SQLClientHBaseITCase.class.getClassLoader().getResource(HBASE_E2E_SQL);
 		if (url == null) {
 			throw new FileNotFoundException(HBASE_E2E_SQL);
 		}
-		return Files.readAllLines(new File(url.getFile()).toPath());
+		List<String> lines = Files.readAllLines(new File(url.getFile()).toPath());
+		List<String> result = new ArrayList<>();
+		for (String line : lines) {
+			for (Map.Entry<String, String> var : vars.entrySet()) {
+				line = line.replace(var.getKey(), var.getValue());
+			}
+			result.add(line);
+		}
+
+		return result;
 	}
 
 	private void executeSqlStatements(ClusterController clusterController, List<String> sqlLines) throws IOException {
diff --git a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/test/resources/hbase_e2e.sql b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/test/resources/hbase_e2e.sql
index a5a8ec1..5ae3845 100644
--- a/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/test/resources/hbase_e2e.sql
+++ b/flink-end-to-end-tests/flink-end-to-end-tests-hbase/src/test/resources/hbase_e2e.sql
@@ -19,7 +19,7 @@ CREATE TABLE MyHBaseSource (
   family1 ROW<f1c1 STRING>,
   family2 ROW<f2c1 STRING, f2c2 STRING>
 ) WITH (
-  'connector' = 'hbase-1.4',
+  'connector' = '$HBASE_CONNECTOR',
   'table-name' = 'source',
   'zookeeper.quorum' = 'localhost:2181',
   'zookeeper.znode.parent' = '/hbase'
@@ -30,7 +30,7 @@ CREATE TABLE MyHBaseSink (
   family1 ROW<f1c1 STRING>,
   family2 ROW<f2c1 STRING, f2c2 STRING>
 ) WITH (
-  'connector' = 'hbase-1.4',
+  'connector' = '$HBASE_CONNECTOR',
   'table-name' = 'sink',
   'zookeeper.quorum' = 'localhost:2181',
   'zookeeper.znode.parent' = '/hbase',
diff --git a/flink-python/pyflink/table/tests/test_descriptor.py b/flink-python/pyflink/table/tests/test_descriptor.py
index d1f9342..d81c688 100644
--- a/flink-python/pyflink/table/tests/test_descriptor.py
+++ b/flink-python/pyflink/table/tests/test_descriptor.py
@@ -399,7 +399,7 @@ class HBaseDescriptorTests(PyFlinkTestCase):
     def setUpClass(cls):
         super(HBaseDescriptorTests, cls).setUpClass()
         cls._cxt_clz_loader = get_gateway().jvm.Thread.currentThread().getContextClassLoader()
-        _load_specific_flink_module_jars('/flink-connectors/flink-connector-hbase')
+        _load_specific_flink_module_jars('/flink-connectors/flink-connector-hbase-base')
 
     def test_version(self):
         hbase = HBase().version("1.4.3")
diff --git a/tools/ci/stage.sh b/tools/ci/stage.sh
index b2c9e5a..a064d86 100755
--- a/tools/ci/stage.sh
+++ b/tools/ci/stage.sh
@@ -83,7 +83,9 @@ flink-formats/flink-json,\
 flink-formats/flink-csv,\
 flink-formats/flink-orc,\
 flink-formats/flink-orc-nohive,\
-flink-connectors/flink-connector-hbase,\
+flink-connectors/flink-connector-hbase-base,\
+flink-connectors/flink-connector-hbase-1.4,\
+flink-connectors/flink-connector-hbase-2.2,\
 flink-connectors/flink-hcatalog,\
 flink-connectors/flink-hadoop-compatibility,\
 flink-connectors,\