You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@kylin.apache.org by GitBox <gi...@apache.org> on 2018/12/17 15:53:09 UTC

[GitHub] xiaoyunancy closed pull request #400: Kylin on druid

xiaoyunancy closed pull request #400: Kylin on druid
URL: https://github.com/apache/kylin/pull/400
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/assembly/pom.xml b/assembly/pom.xml
index 8a1a035926..dd3211a7cb 100644
--- a/assembly/pom.xml
+++ b/assembly/pom.xml
@@ -38,6 +38,10 @@
             <groupId>org.apache.kylin</groupId>
             <artifactId>kylin-source-hive</artifactId>
         </dependency>
+        <dependency>
+            <groupId>org.apache.kylin</groupId>
+            <artifactId>kylin-source-jdbc</artifactId>
+        </dependency>
         <dependency>
             <groupId>org.apache.kylin</groupId>
             <artifactId>kylin-source-kafka</artifactId>
diff --git a/assembly/src/test/java/org/apache/kylin/job/DeployUtil.java b/assembly/src/test/java/org/apache/kylin/job/DeployUtil.java
index cda850fab4..d79caf8fab 100644
--- a/assembly/src/test/java/org/apache/kylin/job/DeployUtil.java
+++ b/assembly/src/test/java/org/apache/kylin/job/DeployUtil.java
@@ -70,8 +70,8 @@ public static void initCliWorkDir() throws IOException {
 
     public static void deployMetadata(String localMetaData) throws IOException {
         // install metadata to hbase
-        ResourceTool.reset(config());
-        ResourceTool.copy(KylinConfig.createInstanceFromUri(localMetaData), config());
+        new ResourceTool().reset(config());
+        new ResourceTool().copy(KylinConfig.createInstanceFromUri(localMetaData), config());
 
         // update cube desc signature.
         for (CubeInstance cube : CubeManager.getInstance(config()).listAllCubes()) {
@@ -177,7 +177,7 @@ public static void appendFactTableData(String factTableContent, String factTable
         InputStream tempIn = null;
         try {
             if (store.exists(factTablePath)) {
-                InputStream oldContent = store.getResource(factTablePath).inputStream;
+                InputStream oldContent = store.getResource(factTablePath).content();
                 IOUtils.copy(oldContent, out);
             }
             IOUtils.copy(in, out);
@@ -226,7 +226,7 @@ private static void deployTables(String modelName) throws Exception {
             localBufferFile.createNewFile();
 
             logger.info(String.format(Locale.ROOT, "get resource from hbase:/data/%s.csv", tablename));
-            InputStream hbaseDataStream = metaMgr.getStore().getResource("/data/" + tablename + ".csv").inputStream;
+            InputStream hbaseDataStream = metaMgr.getStore().getResource("/data/" + tablename + ".csv").content();
             FileOutputStream localFileStream = new FileOutputStream(localBufferFile);
             IOUtils.copy(hbaseDataStream, localFileStream);
 
diff --git a/atopcalcite/.settings/org.eclipse.core.resources.prefs b/atopcalcite/.settings/org.eclipse.core.resources.prefs
deleted file mode 100644
index 29abf99956..0000000000
--- a/atopcalcite/.settings/org.eclipse.core.resources.prefs
+++ /dev/null
@@ -1,6 +0,0 @@
-eclipse.preferences.version=1
-encoding//src/main/java=UTF-8
-encoding//src/main/resources=UTF-8
-encoding//src/test/java=UTF-8
-encoding//src/test/resources=UTF-8
-encoding/<project>=UTF-8
diff --git a/atopcalcite/.settings/org.eclipse.jdt.core.prefs b/atopcalcite/.settings/org.eclipse.jdt.core.prefs
deleted file mode 100644
index 500de29f7f..0000000000
--- a/atopcalcite/.settings/org.eclipse.jdt.core.prefs
+++ /dev/null
@@ -1,386 +0,0 @@
-eclipse.preferences.version=1
-org.eclipse.jdt.core.compiler.annotation.inheritNullAnnotations=disabled
-org.eclipse.jdt.core.compiler.annotation.missingNonNullByDefaultAnnotation=ignore
-org.eclipse.jdt.core.compiler.annotation.nonnull=org.eclipse.jdt.annotation.NonNull
-org.eclipse.jdt.core.compiler.annotation.nonnull.secondary=
-org.eclipse.jdt.core.compiler.annotation.nonnullbydefault=org.eclipse.jdt.annotation.NonNullByDefault
-org.eclipse.jdt.core.compiler.annotation.nonnullbydefault.secondary=
-org.eclipse.jdt.core.compiler.annotation.nullable=org.eclipse.jdt.annotation.Nullable
-org.eclipse.jdt.core.compiler.annotation.nullable.secondary=
-org.eclipse.jdt.core.compiler.annotation.nullanalysis=disabled
-org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
-org.eclipse.jdt.core.compiler.codegen.methodParameters=do not generate
-org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.7
-org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
-org.eclipse.jdt.core.compiler.compliance=1.7
-org.eclipse.jdt.core.compiler.debug.lineNumber=generate
-org.eclipse.jdt.core.compiler.debug.localVariable=generate
-org.eclipse.jdt.core.compiler.debug.sourceFile=generate
-org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=warning
-org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
-org.eclipse.jdt.core.compiler.problem.autoboxing=ignore
-org.eclipse.jdt.core.compiler.problem.comparingIdentical=warning
-org.eclipse.jdt.core.compiler.problem.deadCode=warning
-org.eclipse.jdt.core.compiler.problem.deprecation=warning
-org.eclipse.jdt.core.compiler.problem.deprecationInDeprecatedCode=disabled
-org.eclipse.jdt.core.compiler.problem.deprecationWhenOverridingDeprecatedMethod=disabled
-org.eclipse.jdt.core.compiler.problem.discouragedReference=ignore
-org.eclipse.jdt.core.compiler.problem.emptyStatement=ignore
-org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
-org.eclipse.jdt.core.compiler.problem.explicitlyClosedAutoCloseable=ignore
-org.eclipse.jdt.core.compiler.problem.fallthroughCase=ignore
-org.eclipse.jdt.core.compiler.problem.fatalOptionalError=disabled
-org.eclipse.jdt.core.compiler.problem.fieldHiding=ignore
-org.eclipse.jdt.core.compiler.problem.finalParameterBound=warning
-org.eclipse.jdt.core.compiler.problem.finallyBlockNotCompletingNormally=warning
-org.eclipse.jdt.core.compiler.problem.forbiddenReference=ignore
-org.eclipse.jdt.core.compiler.problem.hiddenCatchBlock=warning
-org.eclipse.jdt.core.compiler.problem.includeNullInfoFromAsserts=disabled
-org.eclipse.jdt.core.compiler.problem.incompatibleNonInheritedInterfaceMethod=warning
-org.eclipse.jdt.core.compiler.problem.incompleteEnumSwitch=warning
-org.eclipse.jdt.core.compiler.problem.indirectStaticAccess=ignore
-org.eclipse.jdt.core.compiler.problem.localVariableHiding=ignore
-org.eclipse.jdt.core.compiler.problem.methodWithConstructorName=warning
-org.eclipse.jdt.core.compiler.problem.missingDefaultCase=ignore
-org.eclipse.jdt.core.compiler.problem.missingDeprecatedAnnotation=ignore
-org.eclipse.jdt.core.compiler.problem.missingEnumCaseDespiteDefault=disabled
-org.eclipse.jdt.core.compiler.problem.missingHashCodeMethod=ignore
-org.eclipse.jdt.core.compiler.problem.missingOverrideAnnotation=ignore
-org.eclipse.jdt.core.compiler.problem.missingOverrideAnnotationForInterfaceMethodImplementation=enabled
-org.eclipse.jdt.core.compiler.problem.missingSerialVersion=warning
-org.eclipse.jdt.core.compiler.problem.missingSynchronizedOnInheritedMethod=ignore
-org.eclipse.jdt.core.compiler.problem.noEffectAssignment=warning
-org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=warning
-org.eclipse.jdt.core.compiler.problem.nonExternalizedStringLiteral=ignore
-org.eclipse.jdt.core.compiler.problem.nonnullParameterAnnotationDropped=warning
-org.eclipse.jdt.core.compiler.problem.nonnullTypeVariableFromLegacyInvocation=warning
-org.eclipse.jdt.core.compiler.problem.nullAnnotationInferenceConflict=error
-org.eclipse.jdt.core.compiler.problem.nullReference=warning
-org.eclipse.jdt.core.compiler.problem.nullSpecViolation=error
-org.eclipse.jdt.core.compiler.problem.nullUncheckedConversion=warning
-org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=warning
-org.eclipse.jdt.core.compiler.problem.parameterAssignment=ignore
-org.eclipse.jdt.core.compiler.problem.pessimisticNullAnalysisForFreeTypeVariables=warning
-org.eclipse.jdt.core.compiler.problem.possibleAccidentalBooleanAssignment=ignore
-org.eclipse.jdt.core.compiler.problem.potentialNullReference=ignore
-org.eclipse.jdt.core.compiler.problem.potentiallyUnclosedCloseable=ignore
-org.eclipse.jdt.core.compiler.problem.rawTypeReference=ignore
-org.eclipse.jdt.core.compiler.problem.redundantNullAnnotation=warning
-org.eclipse.jdt.core.compiler.problem.redundantNullCheck=ignore
-org.eclipse.jdt.core.compiler.problem.redundantSpecificationOfTypeArguments=ignore
-org.eclipse.jdt.core.compiler.problem.redundantSuperinterface=ignore
-org.eclipse.jdt.core.compiler.problem.reportMethodCanBePotentiallyStatic=ignore
-org.eclipse.jdt.core.compiler.problem.reportMethodCanBeStatic=ignore
-org.eclipse.jdt.core.compiler.problem.specialParameterHidingField=disabled
-org.eclipse.jdt.core.compiler.problem.staticAccessReceiver=warning
-org.eclipse.jdt.core.compiler.problem.suppressOptionalErrors=disabled
-org.eclipse.jdt.core.compiler.problem.suppressWarnings=enabled
-org.eclipse.jdt.core.compiler.problem.syntacticNullAnalysisForFields=disabled
-org.eclipse.jdt.core.compiler.problem.syntheticAccessEmulation=ignore
-org.eclipse.jdt.core.compiler.problem.typeParameterHiding=warning
-org.eclipse.jdt.core.compiler.problem.unavoidableGenericTypeProblems=enabled
-org.eclipse.jdt.core.compiler.problem.uncheckedTypeOperation=ignore
-org.eclipse.jdt.core.compiler.problem.unclosedCloseable=warning
-org.eclipse.jdt.core.compiler.problem.undocumentedEmptyBlock=ignore
-org.eclipse.jdt.core.compiler.problem.unhandledWarningToken=warning
-org.eclipse.jdt.core.compiler.problem.unnecessaryElse=ignore
-org.eclipse.jdt.core.compiler.problem.unnecessaryTypeCheck=ignore
-org.eclipse.jdt.core.compiler.problem.unqualifiedFieldAccess=ignore
-org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownException=ignore
-org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionExemptExceptionAndThrowable=enabled
-org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionIncludeDocCommentReference=enabled
-org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionWhenOverriding=disabled
-org.eclipse.jdt.core.compiler.problem.unusedExceptionParameter=ignore
-org.eclipse.jdt.core.compiler.problem.unusedImport=warning
-org.eclipse.jdt.core.compiler.problem.unusedLabel=warning
-org.eclipse.jdt.core.compiler.problem.unusedLocal=warning
-org.eclipse.jdt.core.compiler.problem.unusedObjectAllocation=ignore
-org.eclipse.jdt.core.compiler.problem.unusedParameter=ignore
-org.eclipse.jdt.core.compiler.problem.unusedParameterIncludeDocCommentReference=enabled
-org.eclipse.jdt.core.compiler.problem.unusedParameterWhenImplementingAbstract=disabled
-org.eclipse.jdt.core.compiler.problem.unusedParameterWhenOverridingConcrete=disabled
-org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=warning
-org.eclipse.jdt.core.compiler.problem.unusedTypeParameter=ignore
-org.eclipse.jdt.core.compiler.problem.unusedWarningToken=warning
-org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=warning
-org.eclipse.jdt.core.compiler.source=1.7
-org.eclipse.jdt.core.formatter.align_type_members_on_columns=false
-org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16
-org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation=0
-org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16
-org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16
-org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation=16
-org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression=16
-org.eclipse.jdt.core.formatter.alignment_for_assignment=0
-org.eclipse.jdt.core.formatter.alignment_for_binary_expression=16
-org.eclipse.jdt.core.formatter.alignment_for_compact_if=16
-org.eclipse.jdt.core.formatter.alignment_for_conditional_expression=80
-org.eclipse.jdt.core.formatter.alignment_for_enum_constants=0
-org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer=16
-org.eclipse.jdt.core.formatter.alignment_for_method_declaration=0
-org.eclipse.jdt.core.formatter.alignment_for_multiple_fields=16
-org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration=16
-org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration=16
-org.eclipse.jdt.core.formatter.alignment_for_resources_in_try=80
-org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation=16
-org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration=16
-org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration=16
-org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration=16
-org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration=16
-org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration=16
-org.eclipse.jdt.core.formatter.alignment_for_union_type_in_multicatch=16
-org.eclipse.jdt.core.formatter.blank_lines_after_imports=1
-org.eclipse.jdt.core.formatter.blank_lines_after_package=1
-org.eclipse.jdt.core.formatter.blank_lines_before_field=0
-org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration=0
-org.eclipse.jdt.core.formatter.blank_lines_before_imports=1
-org.eclipse.jdt.core.formatter.blank_lines_before_member_type=1
-org.eclipse.jdt.core.formatter.blank_lines_before_method=1
-org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk=1
-org.eclipse.jdt.core.formatter.blank_lines_before_package=0
-org.eclipse.jdt.core.formatter.blank_lines_between_import_groups=1
-org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations=1
-org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration=end_of_line
-org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration=end_of_line
-org.eclipse.jdt.core.formatter.brace_position_for_array_initializer=end_of_line
-org.eclipse.jdt.core.formatter.brace_position_for_block=end_of_line
-org.eclipse.jdt.core.formatter.brace_position_for_block_in_case=end_of_line
-org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration=end_of_line
-org.eclipse.jdt.core.formatter.brace_position_for_enum_constant=end_of_line
-org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration=end_of_line
-org.eclipse.jdt.core.formatter.brace_position_for_method_declaration=end_of_line
-org.eclipse.jdt.core.formatter.brace_position_for_switch=end_of_line
-org.eclipse.jdt.core.formatter.brace_position_for_type_declaration=end_of_line
-org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment=false
-org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment=false
-org.eclipse.jdt.core.formatter.comment.format_block_comments=false
-org.eclipse.jdt.core.formatter.comment.format_header=false
-org.eclipse.jdt.core.formatter.comment.format_html=true
-org.eclipse.jdt.core.formatter.comment.format_javadoc_comments=false
-org.eclipse.jdt.core.formatter.comment.format_line_comments=false
-org.eclipse.jdt.core.formatter.comment.format_source_code=true
-org.eclipse.jdt.core.formatter.comment.indent_parameter_description=true
-org.eclipse.jdt.core.formatter.comment.indent_root_tags=true
-org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags=insert
-org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter=insert
-org.eclipse.jdt.core.formatter.comment.line_length=80
-org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries=true
-org.eclipse.jdt.core.formatter.comment.new_lines_at_javadoc_boundaries=true
-org.eclipse.jdt.core.formatter.comment.preserve_white_space_between_code_and_line_comments=false
-org.eclipse.jdt.core.formatter.compact_else_if=true
-org.eclipse.jdt.core.formatter.continuation_indentation=2
-org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer=2
-org.eclipse.jdt.core.formatter.disabling_tag=@formatter\:off
-org.eclipse.jdt.core.formatter.enabling_tag=@formatter\:on
-org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line=false
-org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column=true
-org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header=true
-org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header=true
-org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header=true
-org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header=true
-org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases=true
-org.eclipse.jdt.core.formatter.indent_empty_lines=false
-org.eclipse.jdt.core.formatter.indent_statements_compare_to_block=true
-org.eclipse.jdt.core.formatter.indent_statements_compare_to_body=true
-org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases=true
-org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch=false
-org.eclipse.jdt.core.formatter.indentation.size=4
-org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_field=insert
-org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable=insert
-org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_method=insert
-org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_package=insert
-org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter=do not insert
-org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_type=insert
-org.eclipse.jdt.core.formatter.insert_new_line_after_label=do not insert
-org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer=do not insert
-org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing=do not insert
-org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement=do not insert
-org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer=do not insert
-org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement=do not insert
-org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement=do not insert
-org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement=do not insert
-org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration=insert
-org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration=insert
-org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block=insert
-org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant=insert
-org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration=insert
-org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body=insert
-org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration=insert
-org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter=insert
-org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator=insert
-org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_binary_operator=insert
-org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments=insert
-org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters=insert
-org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block=insert
-org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast=insert
-org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert=insert
-org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case=insert
-org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional=insert
-org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for=insert
-org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments=insert
-org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters=insert
-org.eclipse.jdt.core.formatter.insert_space_after_ellipsis=insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer=insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_try=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional=insert
-org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard=do not insert
-org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for=insert
-org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_try_resources=insert
-org.eclipse.jdt.core.formatter.insert_space_after_unary_operator=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter=insert
-org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator=insert
-org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration=insert
-org.eclipse.jdt.core.formatter.insert_space_before_binary_operator=insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer=insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_try=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert=insert
-org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional=insert
-org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for=insert
-org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_ellipsis=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_try=insert
-org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while=insert
-org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return=insert
-org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw=insert
-org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional=insert
-org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_semicolon=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_try_resources=do not insert
-org.eclipse.jdt.core.formatter.insert_space_before_unary_operator=do not insert
-org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference=do not insert
-org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer=do not insert
-org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression=do not insert
-org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant=do not insert
-org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration=do not insert
-org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation=do not insert
-org.eclipse.jdt.core.formatter.join_lines_in_comments=true
-org.eclipse.jdt.core.formatter.join_wrapped_lines=true
-org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line=false
-org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line=false
-org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line=false
-org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line=false
-org.eclipse.jdt.core.formatter.lineSplit=120
-org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column=false
-org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column=false
-org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body=0
-org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve=1
-org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line=true
-org.eclipse.jdt.core.formatter.tabulation.char=space
-org.eclipse.jdt.core.formatter.tabulation.size=4
-org.eclipse.jdt.core.formatter.use_on_off_tags=false
-org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations=false
-org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true
-org.eclipse.jdt.core.formatter.wrap_before_or_operator_multicatch=true
-org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested=true
diff --git a/atopcalcite/.settings/org.eclipse.jdt.ui.prefs b/atopcalcite/.settings/org.eclipse.jdt.ui.prefs
deleted file mode 100644
index d521bab91b..0000000000
--- a/atopcalcite/.settings/org.eclipse.jdt.ui.prefs
+++ /dev/null
@@ -1,7 +0,0 @@
-eclipse.preferences.version=1
-formatter_profile=_Space Indent & Long Lines
-formatter_settings_version=12
-org.eclipse.jdt.ui.ignorelowercasenames=true
-org.eclipse.jdt.ui.importorder=java;javax;org;com;
-org.eclipse.jdt.ui.ondemandthreshold=99
-org.eclipse.jdt.ui.staticondemandthreshold=99
diff --git a/atopcalcite/pom.xml b/atopcalcite/pom.xml
deleted file mode 100644
index 58e40ef822..0000000000
--- a/atopcalcite/pom.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
- 
-     http://www.apache.org/licenses/LICENSE-2.0
- 
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <artifactId>atopcalcite</artifactId>
-    <packaging>jar</packaging>
-    <name>Apache Kylin - Calcite Overrides</name>
-    <description>Apache Kylin - Calcite Overrides</description>
-
-    <parent>
-        <groupId>org.apache.kylin</groupId>
-        <artifactId>kylin</artifactId>
-        <version>2.6.0-SNAPSHOT</version>
-    </parent>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.apache.calcite</groupId>
-            <artifactId>calcite-core</artifactId>
-            <exclusions>
-                <exclusion>
-                    <groupId>org.apache.calcite.avatica</groupId>
-                    <artifactId>avatica-core</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-        <!-- It should be avatica(the shaded one), not avatica-core, since the inconsistency protobuf dependency with Hadoop -->
-        <dependency>
-            <groupId>org.apache.calcite.avatica</groupId>
-            <artifactId>avatica</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>junit</groupId>
-            <artifactId>junit</artifactId>
-            <scope>test</scope>
-        </dependency>
-    </dependencies>
-</project>
diff --git a/atopcalcite/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableWindowBridge.java b/atopcalcite/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableWindowBridge.java
deleted file mode 100644
index 13a33e3059..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/adapter/enumerable/EnumerableWindowBridge.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-
-package org.apache.calcite.adapter.enumerable;
-
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelTraitSet;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Window;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rex.RexLiteral;
-
-import java.util.List;
-
-/**
- * EnumerableWindow cant'be created out of package, here's hack of workaround
- */
-public class EnumerableWindowBridge {
-
-    public static EnumerableWindow createEnumerableWindow(RelOptCluster cluster, RelTraitSet traits, RelNode child,
-                                                   List<RexLiteral> constants, RelDataType rowType, List<Window.Group> groups) {
-        return new EnumerableWindow(cluster, traits, child, constants, rowType, groups);
-    }
-}
diff --git a/atopcalcite/src/main/java/org/apache/calcite/prepare/CalcitePrepareImpl.java b/atopcalcite/src/main/java/org/apache/calcite/prepare/CalcitePrepareImpl.java
deleted file mode 100644
index b63beeeb49..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/prepare/CalcitePrepareImpl.java
+++ /dev/null
@@ -1,1518 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-package org.apache.calcite.prepare;
-
-import static org.apache.calcite.util.Static.RESOURCE;
-
-import java.lang.reflect.Type;
-import java.math.BigDecimal;
-import java.sql.DatabaseMetaData;
-import java.sql.Types;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.calcite.DataContext;
-import org.apache.calcite.adapter.enumerable.EnumerableBindable;
-import org.apache.calcite.adapter.enumerable.EnumerableCalc;
-import org.apache.calcite.adapter.enumerable.EnumerableConvention;
-import org.apache.calcite.adapter.enumerable.EnumerableInterpretable;
-import org.apache.calcite.adapter.enumerable.EnumerableInterpreterRule;
-import org.apache.calcite.adapter.enumerable.EnumerableRel;
-import org.apache.calcite.adapter.enumerable.EnumerableRules;
-import org.apache.calcite.adapter.enumerable.RexToLixTranslator;
-import org.apache.calcite.adapter.java.JavaTypeFactory;
-import org.apache.calcite.avatica.AvaticaParameter;
-import org.apache.calcite.avatica.ColumnMetaData;
-import org.apache.calcite.avatica.Meta;
-import org.apache.calcite.config.CalciteConnectionConfig;
-import org.apache.calcite.interpreter.BindableConvention;
-import org.apache.calcite.interpreter.Bindables;
-import org.apache.calcite.interpreter.Interpreters;
-import org.apache.calcite.jdbc.CalcitePrepare;
-import org.apache.calcite.jdbc.CalciteSchema;
-import org.apache.calcite.jdbc.CalciteSchema.LatticeEntry;
-import org.apache.calcite.linq4j.Enumerable;
-import org.apache.calcite.linq4j.Linq4j;
-import org.apache.calcite.linq4j.Ord;
-import org.apache.calcite.linq4j.Queryable;
-import org.apache.calcite.linq4j.function.Function1;
-import org.apache.calcite.linq4j.tree.BinaryExpression;
-import org.apache.calcite.linq4j.tree.BlockStatement;
-import org.apache.calcite.linq4j.tree.Blocks;
-import org.apache.calcite.linq4j.tree.ConstantExpression;
-import org.apache.calcite.linq4j.tree.Expression;
-import org.apache.calcite.linq4j.tree.Expressions;
-import org.apache.calcite.linq4j.tree.MemberExpression;
-import org.apache.calcite.linq4j.tree.MethodCallExpression;
-import org.apache.calcite.linq4j.tree.NewExpression;
-import org.apache.calcite.linq4j.tree.ParameterExpression;
-import org.apache.calcite.materialize.MaterializationService;
-import org.apache.calcite.plan.Contexts;
-import org.apache.calcite.plan.Convention;
-import org.apache.calcite.plan.ConventionTraitDef;
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptCostFactory;
-import org.apache.calcite.plan.RelOptPlanner;
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptTable;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.plan.hep.HepPlanner;
-import org.apache.calcite.plan.hep.HepProgramBuilder;
-import org.apache.calcite.plan.volcano.VolcanoPlanner;
-import org.apache.calcite.rel.RelCollation;
-import org.apache.calcite.rel.RelCollationTraitDef;
-import org.apache.calcite.rel.RelCollations;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.RelRoot;
-import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.Sort;
-import org.apache.calcite.rel.core.TableScan;
-import org.apache.calcite.rel.rules.AbstractMaterializedViewRule;
-import org.apache.calcite.rel.rules.AggregateExpandDistinctAggregatesRule;
-import org.apache.calcite.rel.rules.AggregateReduceFunctionsRule;
-import org.apache.calcite.rel.rules.AggregateStarTableRule;
-import org.apache.calcite.rel.rules.AggregateValuesRule;
-import org.apache.calcite.rel.rules.FilterAggregateTransposeRule;
-import org.apache.calcite.rel.rules.FilterJoinRule;
-import org.apache.calcite.rel.rules.FilterProjectTransposeRule;
-import org.apache.calcite.rel.rules.FilterTableScanRule;
-import org.apache.calcite.rel.rules.JoinAssociateRule;
-import org.apache.calcite.rel.rules.JoinCommuteRule;
-import org.apache.calcite.rel.rules.JoinPushExpressionsRule;
-import org.apache.calcite.rel.rules.JoinPushThroughJoinRule;
-import org.apache.calcite.rel.rules.MaterializedViewFilterScanRule;
-import org.apache.calcite.rel.rules.ProjectFilterTransposeRule;
-import org.apache.calcite.rel.rules.ProjectMergeRule;
-import org.apache.calcite.rel.rules.ProjectTableScanRule;
-import org.apache.calcite.rel.rules.ProjectWindowTransposeRule;
-import org.apache.calcite.rel.rules.ReduceExpressionsRule;
-import org.apache.calcite.rel.rules.SortJoinTransposeRule;
-import org.apache.calcite.rel.rules.SortProjectTransposeRule;
-import org.apache.calcite.rel.rules.SortUnionTransposeRule;
-import org.apache.calcite.rel.rules.TableScanRule;
-import org.apache.calcite.rel.rules.ValuesReduceRule;
-import org.apache.calcite.rel.stream.StreamRules;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.rel.type.RelDataTypeField;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexProgram;
-import org.apache.calcite.runtime.Bindable;
-import org.apache.calcite.runtime.Hook;
-import org.apache.calcite.runtime.Typed;
-import org.apache.calcite.schema.Schemas;
-import org.apache.calcite.schema.Table;
-import org.apache.calcite.server.CalciteServerStatement;
-import org.apache.calcite.sql.SqlBinaryOperator;
-import org.apache.calcite.sql.SqlExecutableStatement;
-import org.apache.calcite.sql.SqlExplainFormat;
-import org.apache.calcite.sql.SqlExplainLevel;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.sql.SqlOperator;
-import org.apache.calcite.sql.SqlOperatorTable;
-import org.apache.calcite.sql.SqlUtil;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.parser.SqlParseException;
-import org.apache.calcite.sql.parser.SqlParser;
-import org.apache.calcite.sql.parser.SqlParserImplFactory;
-import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.sql.util.ChainedSqlOperatorTable;
-import org.apache.calcite.sql.validate.SqlConformance;
-import org.apache.calcite.sql.validate.SqlValidator;
-import org.apache.calcite.sql2rel.SqlRexConvertletTable;
-import org.apache.calcite.sql2rel.SqlToRelConverter;
-import org.apache.calcite.sql2rel.StandardConvertletTable;
-import org.apache.calcite.tools.Frameworks;
-import org.apache.calcite.tools.RelUtils;
-import org.apache.calcite.util.ImmutableIntList;
-import org.apache.calcite.util.Pair;
-import org.apache.calcite.util.Util;
-
-import com.google.common.base.Supplier;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Maps;
-
-/*
- * OVERRIDE POINT:
- * - grep KYLIN_ONLY_PREPARE
- */
-
-/**
- * Shit just got real.
- *
- * <p>This class is public so that projects that create their own JDBC driver
- * and server can fine-tune preferences. However, this class and its methods are
- * subject to change without notice.</p>
- */
-public class CalcitePrepareImpl implements CalcitePrepare {
-
-  public static final ThreadLocal<Boolean> KYLIN_ONLY_PREPARE = new ThreadLocal<>();
-
-  public static final boolean DEBUG = Util.getBooleanProperty("calcite.debug");
-
-  public static final boolean COMMUTE =
-      Util.getBooleanProperty("calcite.enable.join.commute");
-
-  /** Whether to enable the collation trait. Some extra optimizations are
-   * possible if enabled, but queries should work either way. At some point
-   * this will become a preference, or we will run multiple phases: first
-   * disabled, then enabled. */
-  private static final boolean ENABLE_COLLATION_TRAIT = true;
-
-  /** Whether the bindable convention should be the root convention of any
-   * plan. If not, enumerable convention is the default. */
-  public final boolean enableBindable = Hook.ENABLE_BINDABLE.get(false);
-
-  /** Whether the enumerable convention is enabled. */
-  public static final boolean ENABLE_ENUMERABLE = true;
-
-  /** Whether the streaming is enabled. */
-  public static final boolean ENABLE_STREAM = true;
-
-  private static final Set<String> SIMPLE_SQLS =
-      ImmutableSet.of(
-          "SELECT 1",
-          "select 1",
-          "SELECT 1 FROM DUAL",
-          "select 1 from dual",
-          "values 1",
-          "VALUES 1");
-
-  public static final List<RelOptRule> ENUMERABLE_RULES =
-      ImmutableList.of(
-          EnumerableRules.ENUMERABLE_JOIN_RULE,
-          EnumerableRules.ENUMERABLE_MERGE_JOIN_RULE,
-          EnumerableRules.ENUMERABLE_SEMI_JOIN_RULE,
-          EnumerableRules.ENUMERABLE_CORRELATE_RULE,
-          EnumerableRules.ENUMERABLE_PROJECT_RULE,
-          EnumerableRules.ENUMERABLE_FILTER_RULE,
-          EnumerableRules.ENUMERABLE_AGGREGATE_RULE,
-          EnumerableRules.ENUMERABLE_SORT_RULE,
-          EnumerableRules.ENUMERABLE_LIMIT_RULE,
-          EnumerableRules.ENUMERABLE_COLLECT_RULE,
-          EnumerableRules.ENUMERABLE_UNCOLLECT_RULE,
-          EnumerableRules.ENUMERABLE_UNION_RULE,
-          EnumerableRules.ENUMERABLE_INTERSECT_RULE,
-          EnumerableRules.ENUMERABLE_MINUS_RULE,
-          EnumerableRules.ENUMERABLE_TABLE_MODIFICATION_RULE,
-          EnumerableRules.ENUMERABLE_VALUES_RULE,
-          EnumerableRules.ENUMERABLE_WINDOW_RULE,
-          EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE,
-          EnumerableRules.ENUMERABLE_TABLE_FUNCTION_SCAN_RULE);
-
-  private static final List<RelOptRule> DEFAULT_RULES =
-      ImmutableList.of(
-          AggregateStarTableRule.INSTANCE,
-          AggregateStarTableRule.INSTANCE2,
-          TableScanRule.INSTANCE,
-          COMMUTE
-              ? JoinAssociateRule.INSTANCE
-              : ProjectMergeRule.INSTANCE,
-          FilterTableScanRule.INSTANCE,
-          ProjectFilterTransposeRule.INSTANCE,
-          FilterProjectTransposeRule.INSTANCE,
-          FilterJoinRule.FILTER_ON_JOIN,
-          JoinPushExpressionsRule.INSTANCE,
-          AggregateExpandDistinctAggregatesRule.INSTANCE,
-          AggregateReduceFunctionsRule.INSTANCE,
-          FilterAggregateTransposeRule.INSTANCE,
-          ProjectWindowTransposeRule.INSTANCE,
-          JoinCommuteRule.INSTANCE,
-          JoinPushThroughJoinRule.RIGHT,
-          JoinPushThroughJoinRule.LEFT,
-          SortProjectTransposeRule.INSTANCE,
-          SortJoinTransposeRule.INSTANCE,
-          SortUnionTransposeRule.INSTANCE);
-
-  private static final List<RelOptRule> CONSTANT_REDUCTION_RULES =
-      ImmutableList.of(
-          ReduceExpressionsRule.PROJECT_INSTANCE,
-          ReduceExpressionsRule.FILTER_INSTANCE,
-          ReduceExpressionsRule.CALC_INSTANCE,
-          ReduceExpressionsRule.JOIN_INSTANCE,
-          ValuesReduceRule.FILTER_INSTANCE,
-          ValuesReduceRule.PROJECT_FILTER_INSTANCE,
-          ValuesReduceRule.PROJECT_INSTANCE,
-          AggregateValuesRule.INSTANCE);
-
-  public CalcitePrepareImpl() {
-  }
-
-  public ParseResult parse(
-          Context context, String sql) {
-    return parse_(context, sql, false, false, false);
-  }
-
-  public ConvertResult convert(Context context, String sql) {
-    return (ConvertResult) parse_(context, sql, true, false, false);
-  }
-
-  public AnalyzeViewResult analyzeView(Context context, String sql, boolean fail) {
-    return (AnalyzeViewResult) parse_(context, sql, true, true, fail);
-  }
-
-  /** Shared implementation for {@link #parse}, {@link #convert} and
-   * {@link #analyzeView}. */
-  private ParseResult parse_(Context context, String sql, boolean convert,
-                             boolean analyze, boolean fail) {
-    final JavaTypeFactory typeFactory = context.getTypeFactory();
-    CalciteCatalogReader catalogReader =
-        new CalciteCatalogReader(
-            context.getRootSchema(),
-            context.config().caseSensitive(),
-            context.getDefaultSchemaPath(),
-            typeFactory);
-    SqlParser parser = createParser(sql);
-    SqlNode sqlNode;
-    try {
-      sqlNode = parser.parseStmt();
-    } catch (SqlParseException e) {
-      throw new RuntimeException("parse failed", e);
-    }
-    final SqlValidator validator = createSqlValidator(context, catalogReader);
-    SqlNode sqlNode1 = validator.validate(sqlNode);
-    if (convert) {
-      return convert_(
-          context, sql, analyze, fail, catalogReader, validator, sqlNode1);
-    }
-    return new ParseResult(this, validator, sql, sqlNode1,
-        validator.getValidatedNodeType(sqlNode1));
-  }
-
-  private ParseResult convert_(Context context, String sql, boolean analyze,
-                               boolean fail, CalciteCatalogReader catalogReader, SqlValidator validator,
-                               SqlNode sqlNode1) {
-    final JavaTypeFactory typeFactory = context.getTypeFactory();
-    final Convention resultConvention =
-        enableBindable ? BindableConvention.INSTANCE
-            : EnumerableConvention.INSTANCE;
-    final HepPlanner planner = new HepPlanner(new HepProgramBuilder().build());
-    planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
-
-    final SqlToRelConverter.ConfigBuilder configBuilder =
-        SqlToRelConverter.configBuilder().withTrimUnusedFields(true);
-    if (analyze) {
-      configBuilder.withConvertTableAccess(false);
-    }
-
-    final CalcitePreparingStmt preparingStmt =
-        new CalcitePreparingStmt(this, context, catalogReader, typeFactory,
-            context.getRootSchema(), null, planner, resultConvention,
-            createConvertletTable());
-    final SqlToRelConverter converter =
-        preparingStmt.getSqlToRelConverter(validator, catalogReader,
-            configBuilder.build());
-
-    final RelRoot root = converter.convertQuery(sqlNode1, false, true);
-    if (analyze) {
-      return analyze_(validator, sql, sqlNode1, root, fail);
-    }
-    return new ConvertResult(this, validator, sql, sqlNode1,
-        validator.getValidatedNodeType(sqlNode1), root);
-  }
-
-  private AnalyzeViewResult analyze_(SqlValidator validator, String sql,
-                                     SqlNode sqlNode, RelRoot root, boolean fail) {
-    final RexBuilder rexBuilder = root.rel.getCluster().getRexBuilder();
-    RelNode rel = root.rel;
-    final RelNode viewRel = rel;
-    Project project;
-    if (rel instanceof Project) {
-      project = (Project) rel;
-      rel = project.getInput();
-    } else {
-      project = null;
-    }
-    Filter filter;
-    if (rel instanceof Filter) {
-      filter = (Filter) rel;
-      rel = filter.getInput();
-    } else {
-      filter = null;
-    }
-    TableScan scan;
-    if (rel instanceof TableScan) {
-      scan = (TableScan) rel;
-    } else {
-      scan = null;
-    }
-    if (scan == null) {
-      if (fail) {
-        throw validator.newValidationError(sqlNode,
-            RESOURCE.modifiableViewMustBeBasedOnSingleTable());
-      }
-      return new AnalyzeViewResult(this, validator, sql, sqlNode,
-          validator.getValidatedNodeType(sqlNode), root, null, null, null,
-          null, false);
-    }
-    final RelOptTable targetRelTable = scan.getTable();
-    final RelDataType targetRowType = targetRelTable.getRowType();
-    final Table table = targetRelTable.unwrap(Table.class);
-    final List<String> tablePath = targetRelTable.getQualifiedName();
-    assert table != null;
-    List<Integer> columnMapping;
-    final Map<Integer, RexNode> projectMap = new HashMap<>();
-    if (project == null) {
-      columnMapping = ImmutableIntList.range(0, targetRowType.getFieldCount());
-    } else {
-      columnMapping = new ArrayList<>();
-      for (Ord<RexNode> node : Ord.zip(project.getProjects())) {
-        if (node.e instanceof RexInputRef) {
-          RexInputRef rexInputRef = (RexInputRef) node.e;
-          int index = rexInputRef.getIndex();
-          if (projectMap.get(index) != null) {
-            if (fail) {
-              throw validator.newValidationError(sqlNode,
-                  RESOURCE.moreThanOneMappedColumn(
-                      targetRowType.getFieldList().get(index).getName(),
-                      Util.last(tablePath)));
-            }
-            return new AnalyzeViewResult(this, validator, sql, sqlNode,
-                validator.getValidatedNodeType(sqlNode), root, null, null, null,
-                null, false);
-          }
-          projectMap.put(index, rexBuilder.makeInputRef(viewRel, node.i));
-          columnMapping.add(index);
-        } else {
-          columnMapping.add(-1);
-        }
-      }
-    }
-    final RexNode constraint;
-    if (filter != null) {
-      constraint = filter.getCondition();
-    } else {
-      constraint = rexBuilder.makeLiteral(true);
-    }
-    final List<RexNode> filters = new ArrayList<>();
-    // If we put a constraint in projectMap above, then filters will not be empty despite
-    // being a modifiable view.
-    final List<RexNode> filters2 = new ArrayList<>();
-    boolean retry = false;
-    RelOptUtil.inferViewPredicates(projectMap, filters, constraint);
-    if (fail && !filters.isEmpty()) {
-      final Map<Integer, RexNode> projectMap2 = new HashMap<>();
-      RelOptUtil.inferViewPredicates(projectMap2, filters2, constraint);
-      if (!filters2.isEmpty()) {
-        throw validator.newValidationError(sqlNode,
-            RESOURCE.modifiableViewMustHaveOnlyEqualityPredicates());
-      }
-      retry = true;
-    }
-
-    // Check that all columns that are not projected have a constant value
-    for (RelDataTypeField field : targetRowType.getFieldList()) {
-      final int x = columnMapping.indexOf(field.getIndex());
-      if (x >= 0) {
-        assert Util.skip(columnMapping, x + 1).indexOf(field.getIndex()) < 0
-            : "column projected more than once; should have checked above";
-        continue; // target column is projected
-      }
-      if (projectMap.get(field.getIndex()) != null) {
-        continue; // constant expression
-      }
-      if (field.getType().isNullable()) {
-        continue; // don't need expression for nullable columns; NULL suffices
-      }
-      if (fail) {
-        throw validator.newValidationError(sqlNode,
-            RESOURCE.noValueSuppliedForViewColumn(field.getName(),
-                Util.last(tablePath)));
-      }
-      return new AnalyzeViewResult(this, validator, sql, sqlNode,
-          validator.getValidatedNodeType(sqlNode), root, null, null, null,
-          null, false);
-    }
-
-    final boolean modifiable = filters.isEmpty() || retry && filters2.isEmpty();
-    return new AnalyzeViewResult(this, validator, sql, sqlNode,
-        validator.getValidatedNodeType(sqlNode), root, modifiable ? table : null,
-        ImmutableList.copyOf(tablePath),
-        constraint, ImmutableIntList.copyOf(columnMapping),
-        modifiable);
-  }
-
-  @Override public void executeDdl(Context context, SqlNode node) {
-    if (node instanceof SqlExecutableStatement) {
-      SqlExecutableStatement statement = (SqlExecutableStatement) node;
-      statement.execute(context);
-      return;
-    }
-    throw new UnsupportedOperationException();
-  }
-
-  /** Factory method for default SQL parser. */
-  protected SqlParser createParser(String sql) {
-    return createParser(sql, createParserConfig());
-  }
-
-  /** Factory method for SQL parser with a given configuration. */
-  protected SqlParser createParser(String sql,
-                                   SqlParser.ConfigBuilder parserConfig) {
-    return SqlParser.create(sql, parserConfig.build());
-  }
-
-  /** Factory method for SQL parser configuration. */
-  protected SqlParser.ConfigBuilder createParserConfig() {
-    return SqlParser.configBuilder();
-  }
-
-  /** Factory method for default convertlet table. */
-  protected SqlRexConvertletTable createConvertletTable() {
-    return StandardConvertletTable.INSTANCE;
-  }
-
-  /** Factory method for cluster. */
-  protected RelOptCluster createCluster(RelOptPlanner planner,
-                                        RexBuilder rexBuilder) {
-    return RelOptCluster.create(planner, rexBuilder);
-  }
-
-  /** Creates a collection of planner factories.
-   *
-   * <p>The collection must have at least one factory, and each factory must
-   * create a planner. If the collection has more than one planner, Calcite will
-   * try each planner in turn.</p>
-   *
-   * <p>One of the things you can do with this mechanism is to try a simpler,
-   * faster, planner with a smaller rule set first, then fall back to a more
-   * complex planner for complex and costly queries.</p>
-   *
-   * <p>The default implementation returns a factory that calls
-   * {@link #createPlanner(org.apache.calcite.jdbc.CalcitePrepare.Context)}.</p>
-   */
-  protected List<Function1<Context, RelOptPlanner>> createPlannerFactories() {
-    return Collections.<Function1<Context, RelOptPlanner>>singletonList(
-        new Function1<Context, RelOptPlanner>() {
-          public RelOptPlanner apply(Context context) {
-            return createPlanner(context, null, null);
-          }
-        });
-  }
-
-  /** Creates a query planner and initializes it with a default set of
-   * rules. */
-  protected RelOptPlanner createPlanner(CalcitePrepare.Context prepareContext) {
-    return createPlanner(prepareContext, null, null);
-  }
-
-  /** Creates a query planner and initializes it with a default set of
-   * rules. */
-  protected RelOptPlanner createPlanner(
-      final CalcitePrepare.Context prepareContext,
-      org.apache.calcite.plan.Context externalContext,
-      RelOptCostFactory costFactory) {
-    if (externalContext == null) {
-      externalContext = Contexts.of(prepareContext.config());
-    }
-    final VolcanoPlanner planner =
-        new VolcanoPlanner(costFactory, externalContext);
-    planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
-    if (ENABLE_COLLATION_TRAIT) {
-      planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
-      planner.registerAbstractRelationalRules();
-    }
-    RelOptUtil.registerAbstractRels(planner);
-    for (RelOptRule rule : DEFAULT_RULES) {
-      planner.addRule(rule);
-    }
-    if (prepareContext.config().materializationsEnabled()) {
-      planner.addRule(MaterializedViewFilterScanRule.INSTANCE);
-      planner.addRule(AbstractMaterializedViewRule.INSTANCE_PROJECT_FILTER);
-      planner.addRule(AbstractMaterializedViewRule.INSTANCE_FILTER);
-      planner.addRule(AbstractMaterializedViewRule.INSTANCE_PROJECT_JOIN);
-      planner.addRule(AbstractMaterializedViewRule.INSTANCE_JOIN);
-      planner.addRule(AbstractMaterializedViewRule.INSTANCE_PROJECT_AGGREGATE);
-      planner.addRule(AbstractMaterializedViewRule.INSTANCE_AGGREGATE);
-    }
-    if (enableBindable) {
-      for (RelOptRule rule : Bindables.RULES) {
-        planner.addRule(rule);
-      }
-    }
-    planner.addRule(Bindables.BINDABLE_TABLE_SCAN_RULE);
-    planner.addRule(ProjectTableScanRule.INSTANCE);
-    planner.addRule(ProjectTableScanRule.INTERPRETER);
-
-    if (ENABLE_ENUMERABLE) {
-      for (RelOptRule rule : ENUMERABLE_RULES) {
-        planner.addRule(rule);
-      }
-      planner.addRule(EnumerableInterpreterRule.INSTANCE);
-    }
-
-    if (enableBindable && ENABLE_ENUMERABLE) {
-      planner.addRule(
-          EnumerableBindable.EnumerableToBindableConverterRule.INSTANCE);
-    }
-
-    if (ENABLE_STREAM) {
-      for (RelOptRule rule : StreamRules.RULES) {
-        planner.addRule(rule);
-      }
-    }
-
-    // Change the below to enable constant-reduction.
-    if (false) {
-      for (RelOptRule rule : CONSTANT_REDUCTION_RULES) {
-        planner.addRule(rule);
-      }
-    }
-
-    final SparkHandler spark = prepareContext.spark();
-    if (spark.enabled()) {
-      spark.registerRules(
-          new SparkHandler.RuleSetBuilder() {
-          public void addRule(RelOptRule rule) {
-            // TODO:
-          }
-
-          public void removeRule(RelOptRule rule) {
-            // TODO:
-          }
-        });
-    }
-
-    Hook.PLANNER.run(planner); // allow test to add or remove rules
-
-    return planner;
-  }
-
-  public <T> CalciteSignature<T> prepareQueryable(
-      Context context,
-      Queryable<T> queryable) {
-    return prepare_(context, Query.of(queryable), queryable.getElementType(),
-        -1);
-  }
-
-  public <T> CalciteSignature<T> prepareSql(
-      Context context,
-      Query<T> query,
-      Type elementType,
-      long maxRowCount) {
-    return prepare_(context, query, elementType, maxRowCount);
-  }
-
-  <T> CalciteSignature<T> prepare_(
-      Context context,
-      Query<T> query,
-      Type elementType,
-      long maxRowCount) {
-    if (SIMPLE_SQLS.contains(query.sql)) {
-      return simplePrepare(context, query.sql);
-    }
-
-    if(KYLIN_ONLY_PREPARE.get() != null && KYLIN_ONLY_PREPARE.get()) {
-      ParseResult parseResult = parse(context, query.sql);
-      Class<OnlyPrepareEarlyAbortException> onlyPrepareEarlyAbortExceptionClass =
-              OnlyPrepareEarlyAbortException.class;
-      throw new OnlyPrepareEarlyAbortException(context, parseResult);
-    }
-
-    final JavaTypeFactory typeFactory = context.getTypeFactory();
-    CalciteCatalogReader catalogReader =
-        new CalciteCatalogReader(
-            context.getRootSchema(),
-            context.config().caseSensitive(),
-            context.getDefaultSchemaPath(),
-            typeFactory);
-    final List<Function1<Context, RelOptPlanner>> plannerFactories =
-        createPlannerFactories();
-    if (plannerFactories.isEmpty()) {
-      throw new AssertionError("no planner factories");
-    }
-    RuntimeException exception = Util.FoundOne.NULL;
-    for (Function1<Context, RelOptPlanner> plannerFactory : plannerFactories) {
-      final RelOptPlanner planner = plannerFactory.apply(context);
-      if (planner == null) {
-        throw new AssertionError("factory returned null planner");
-      }
-      try {
-        return prepare2_(context, query, elementType, maxRowCount,
-            catalogReader, planner);
-      } catch (RelOptPlanner.CannotPlanException e) {
-        exception = e;
-      }
-    }
-    throw exception;
-  }
-
-  /** Quickly prepares a simple SQL statement, circumventing the usual
-   * preparation process. */
-  private <T> CalciteSignature<T> simplePrepare(Context context, String sql) {
-    final JavaTypeFactory typeFactory = context.getTypeFactory();
-    final RelDataType x =
-        typeFactory.builder()
-            .add(SqlUtil.deriveAliasFromOrdinal(0), SqlTypeName.INTEGER)
-            .build();
-    @SuppressWarnings("unchecked")
-    final List<T> list = (List) ImmutableList.of(1);
-    final List<String> origin = null;
-    final List<List<String>> origins =
-        Collections.nCopies(x.getFieldCount(), origin);
-    final List<ColumnMetaData> columns =
-        getColumnMetaDataList(typeFactory, x, x, origins);
-    final Meta.CursorFactory cursorFactory =
-        Meta.CursorFactory.deduce(columns, null);
-    return new CalciteSignature<>(
-        sql,
-        ImmutableList.<AvaticaParameter>of(),
-        ImmutableMap.<String, Object>of(),
-        x,
-        columns,
-        cursorFactory,
-        context.getRootSchema(),
-        ImmutableList.<RelCollation>of(),
-        -1,
-        new Bindable<T>() {
-          public Enumerable<T> bind(DataContext dataContext) {
-            return Linq4j.asEnumerable(list);
-          }
-        },
-        Meta.StatementType.SELECT);
-  }
-
-  /**
-   * Deduces the broad type of statement.
-   * Currently returns SELECT for most statement types, but this may change.
-   *
-   * @param kind Kind of statement
-   */
-  private Meta.StatementType getStatementType(SqlKind kind) {
-    switch (kind) {
-    case INSERT:
-    case DELETE:
-    case UPDATE:
-      return Meta.StatementType.IS_DML;
-    default:
-      return Meta.StatementType.SELECT;
-    }
-  }
-
-  /**
-   * Deduces the broad type of statement for a prepare result.
-   * Currently returns SELECT for most statement types, but this may change.
-   *
-   * @param preparedResult Prepare result
-   */
-  private Meta.StatementType getStatementType(Prepare.PreparedResult preparedResult) {
-    if (preparedResult.isDml()) {
-      return Meta.StatementType.IS_DML;
-    } else {
-      return Meta.StatementType.SELECT;
-    }
-  }
-
-  <T> CalciteSignature<T> prepare2_(
-      Context context,
-      Query<T> query,
-      Type elementType,
-      long maxRowCount,
-      CalciteCatalogReader catalogReader,
-      RelOptPlanner planner) {
-    final JavaTypeFactory typeFactory = context.getTypeFactory();
-    final EnumerableRel.Prefer prefer;
-    if (elementType == Object[].class) {
-      prefer = EnumerableRel.Prefer.ARRAY;
-    } else {
-      prefer = EnumerableRel.Prefer.CUSTOM;
-    }
-    final Convention resultConvention =
-        enableBindable ? BindableConvention.INSTANCE
-            : EnumerableConvention.INSTANCE;
-    final CalcitePreparingStmt preparingStmt =
-        new CalcitePreparingStmt(this, context, catalogReader, typeFactory,
-            context.getRootSchema(), prefer, planner, resultConvention,
-            createConvertletTable());
-
-    final RelDataType x;
-    final Prepare.PreparedResult preparedResult;
-    final Meta.StatementType statementType;
-    if (query.sql != null) {
-      final CalciteConnectionConfig config = context.config();
-      final SqlParser.ConfigBuilder parserConfig = createParserConfig()
-          .setQuotedCasing(config.quotedCasing())
-          .setUnquotedCasing(config.unquotedCasing())
-          .setQuoting(config.quoting())
-          .setConformance(config.conformance());
-      final SqlParserImplFactory parserFactory =
-          config.parserFactory(SqlParserImplFactory.class, null);
-      if (parserFactory != null) {
-        parserConfig.setParserFactory(parserFactory);
-      }
-      SqlParser parser = createParser(query.sql,  parserConfig);
-      SqlNode sqlNode;
-      try {
-        sqlNode = parser.parseStmt();
-        statementType = getStatementType(sqlNode.getKind());
-      } catch (SqlParseException e) {
-        throw new RuntimeException(
-            "parse failed: " + e.getMessage(), e);
-      }
-
-      Hook.PARSE_TREE.run(new Object[] {query.sql, sqlNode});
-
-      if (sqlNode.getKind().belongsTo(SqlKind.DDL)) {
-        executeDdl(context, sqlNode);
-
-        // Return a dummy signature that contains no rows
-        final Bindable<T> bindable = new Bindable<T>() {
-          public Enumerable<T> bind(DataContext dataContext) {
-            return Linq4j.emptyEnumerable();
-          }
-        };
-        return new CalciteSignature<>(query.sql,
-            ImmutableList.<AvaticaParameter>of(),
-            ImmutableMap.<String, Object>of(), null,
-            ImmutableList.<ColumnMetaData>of(), Meta.CursorFactory.OBJECT,
-            null, ImmutableList.<RelCollation>of(), -1, bindable);
-      }
-
-      final SqlValidator validator =
-          createSqlValidator(context, catalogReader);
-      validator.setIdentifierExpansion(true);
-      validator.setDefaultNullCollation(config.defaultNullCollation());
-
-      preparedResult = preparingStmt.prepareSql(
-          sqlNode, Object.class, validator, true);
-      switch (sqlNode.getKind()) {
-      case INSERT:
-      case DELETE:
-      case UPDATE:
-      case EXPLAIN:
-        // FIXME: getValidatedNodeType is wrong for DML
-        x = RelOptUtil.createDmlRowType(sqlNode.getKind(), typeFactory);
-        break;
-      default:
-        x = validator.getValidatedNodeType(sqlNode);
-      }
-    } else if (query.queryable != null) {
-      x = context.getTypeFactory().createType(elementType);
-      preparedResult =
-          preparingStmt.prepareQueryable(query.queryable, x);
-      statementType = getStatementType(preparedResult);
-    } else {
-      assert query.rel != null;
-      x = query.rel.getRowType();
-      preparedResult = preparingStmt.prepareRel(query.rel);
-      statementType = getStatementType(preparedResult);
-    }
-
-    final List<AvaticaParameter> parameters = new ArrayList<>();
-    final RelDataType parameterRowType = preparedResult.getParameterRowType();
-    for (RelDataTypeField field : parameterRowType.getFieldList()) {
-      RelDataType type = field.getType();
-      parameters.add(
-          new AvaticaParameter(
-              false,
-              getPrecision(type),
-              getScale(type),
-              getTypeOrdinal(type),
-              getTypeName(type),
-              getClassName(type),
-              field.getName()));
-    }
-
-    RelDataType jdbcType = makeStruct(typeFactory, x);
-    final List<List<String>> originList = preparedResult.getFieldOrigins();
-    final List<ColumnMetaData> columns =
-        getColumnMetaDataList(typeFactory, x, jdbcType, originList);
-    Class resultClazz = null;
-    if (preparedResult instanceof Typed) {
-      resultClazz = (Class) ((Typed) preparedResult).getElementType();
-    }
-    final Meta.CursorFactory cursorFactory =
-        preparingStmt.resultConvention == BindableConvention.INSTANCE
-            ? Meta.CursorFactory.ARRAY
-            : Meta.CursorFactory.deduce(columns, resultClazz);
-    //noinspection unchecked
-    final Bindable<T> bindable = preparedResult.getBindable(cursorFactory);
-    return new CalciteSignature<>(
-        query.sql,
-        parameters,
-        preparingStmt.internalParameters,
-        jdbcType,
-        columns,
-        cursorFactory,
-        context.getRootSchema(),
-        preparedResult instanceof Prepare.PreparedResultImpl
-            ? ((Prepare.PreparedResultImpl) preparedResult).collations
-            : ImmutableList.<RelCollation>of(),
-        maxRowCount,
-        bindable,
-        statementType);
-  }
-
-  private SqlValidator createSqlValidator(Context context,
-                                          CalciteCatalogReader catalogReader) {
-    final SqlOperatorTable opTab0 =
-        context.config().fun(SqlOperatorTable.class,
-            SqlStdOperatorTable.instance());
-    final SqlOperatorTable opTab =
-        ChainedSqlOperatorTable.of(opTab0, catalogReader);
-    final JavaTypeFactory typeFactory = context.getTypeFactory();
-    final SqlConformance conformance = context.config().conformance();
-    return new CalciteSqlValidator(opTab, catalogReader, typeFactory,
-        conformance);
-  }
-
-  private List<ColumnMetaData> getColumnMetaDataList(
-          JavaTypeFactory typeFactory, RelDataType x, RelDataType jdbcType,
-          List<List<String>> originList) {
-    final List<ColumnMetaData> columns = new ArrayList<>();
-    for (Ord<RelDataTypeField> pair : Ord.zip(jdbcType.getFieldList())) {
-      final RelDataTypeField field = pair.e;
-      final RelDataType type = field.getType();
-      final RelDataType fieldType =
-          x.isStruct() ? x.getFieldList().get(pair.i).getType() : type;
-      columns.add(
-          metaData(typeFactory, columns.size(), field.getName(), type,
-              fieldType, originList.get(pair.i)));
-    }
-    return columns;
-  }
-
-  private ColumnMetaData metaData(JavaTypeFactory typeFactory, int ordinal,
-                                  String fieldName, RelDataType type, RelDataType fieldType,
-                                  List<String> origins) {
-    final ColumnMetaData.AvaticaType avaticaType =
-        avaticaType(typeFactory, type, fieldType);
-    return new ColumnMetaData(
-        ordinal,
-        false,
-        true,
-        false,
-        false,
-        type.isNullable()
-            ? DatabaseMetaData.columnNullable
-            : DatabaseMetaData.columnNoNulls,
-        true,
-        type.getPrecision(),
-        fieldName,
-        origin(origins, 0),
-        origin(origins, 2),
-        getPrecision(type),
-        getScale(type),
-        origin(origins, 1),
-        null,
-        avaticaType,
-        true,
-        false,
-        false,
-        avaticaType.columnClassName());
-  }
-
-  private ColumnMetaData.AvaticaType avaticaType(JavaTypeFactory typeFactory,
-                                                 RelDataType type, RelDataType fieldType) {
-    final String typeName = getTypeName(type);
-    if (type.getComponentType() != null) {
-      final ColumnMetaData.AvaticaType componentType =
-          avaticaType(typeFactory, type.getComponentType(), null);
-      final Type clazz = typeFactory.getJavaClass(type.getComponentType());
-      final ColumnMetaData.Rep rep = ColumnMetaData.Rep.of(clazz);
-      assert rep != null;
-      return ColumnMetaData.array(componentType, typeName, rep);
-    } else {
-      final int typeOrdinal = getTypeOrdinal(type);
-      switch (typeOrdinal) {
-      case Types.STRUCT:
-        final List<ColumnMetaData> columns = new ArrayList<>();
-        for (RelDataTypeField field : type.getFieldList()) {
-          columns.add(
-              metaData(typeFactory, field.getIndex(), field.getName(),
-                  field.getType(), null, null));
-        }
-        return ColumnMetaData.struct(columns);
-      default:
-        final Type clazz =
-            typeFactory.getJavaClass(Util.first(fieldType, type));
-        final ColumnMetaData.Rep rep = ColumnMetaData.Rep.of(clazz);
-        assert rep != null;
-        return ColumnMetaData.scalar(typeOrdinal, typeName, rep);
-      }
-    }
-  }
-
-  private static String origin(List<String> origins, int offsetFromEnd) {
-    return origins == null || offsetFromEnd >= origins.size()
-        ? null
-        : origins.get(origins.size() - 1 - offsetFromEnd);
-  }
-
-  private int getTypeOrdinal(RelDataType type) {
-    return type.getSqlTypeName().getJdbcOrdinal();
-  }
-
-  private static String getClassName(RelDataType type) {
-    return null;
-  }
-
-  private static int getScale(RelDataType type) {
-    return type.getScale() == RelDataType.SCALE_NOT_SPECIFIED
-        ? 0
-        : type.getScale();
-  }
-
-  private static int getPrecision(RelDataType type) {
-    return type.getPrecision() == RelDataType.PRECISION_NOT_SPECIFIED
-        ? 0
-        : type.getPrecision();
-  }
-
-  /** Returns the type name in string form. Does not include precision, scale
-   * or whether nulls are allowed. Example: "DECIMAL" not "DECIMAL(7, 2)";
-   * "INTEGER" not "JavaType(int)". */
-  private static String getTypeName(RelDataType type) {
-    final SqlTypeName sqlTypeName = type.getSqlTypeName();
-    switch (sqlTypeName) {
-    case ARRAY:
-    case MULTISET:
-    case MAP:
-    case ROW:
-      return type.toString(); // e.g. "INTEGER ARRAY"
-    case INTERVAL_YEAR_MONTH:
-      return "INTERVAL_YEAR_TO_MONTH";
-    case INTERVAL_DAY_HOUR:
-      return "INTERVAL_DAY_TO_HOUR";
-    case INTERVAL_DAY_MINUTE:
-      return "INTERVAL_DAY_TO_MINUTE";
-    case INTERVAL_DAY_SECOND:
-      return "INTERVAL_DAY_TO_SECOND";
-    case INTERVAL_HOUR_MINUTE:
-      return "INTERVAL_HOUR_TO_MINUTE";
-    case INTERVAL_HOUR_SECOND:
-      return "INTERVAL_HOUR_TO_SECOND";
-    case INTERVAL_MINUTE_SECOND:
-      return "INTERVAL_MINUTE_TO_SECOND";
-    default:
-      return sqlTypeName.getName(); // e.g. "DECIMAL", "INTERVAL_YEAR_MONTH"
-    }
-  }
-
-  protected void populateMaterializations(Context context,
-                                          RelOptPlanner planner, Prepare.Materialization materialization) {
-    // REVIEW: initialize queryRel and tableRel inside MaterializationService,
-    // not here?
-    try {
-      final CalciteSchema schema = materialization.materializedTable.schema;
-      CalciteCatalogReader catalogReader =
-          new CalciteCatalogReader(
-              schema.root(),
-              context.config().caseSensitive(),
-              materialization.viewSchemaPath,
-              context.getTypeFactory());
-      final CalciteMaterializer materializer =
-          new CalciteMaterializer(this, context, catalogReader, schema, planner,
-              createConvertletTable());
-      materializer.populate(materialization);
-    } catch (Exception e) {
-      throw new RuntimeException("While populating materialization "
-          + materialization.materializedTable.path(), e);
-    }
-  }
-
-  private static RelDataType makeStruct(
-      RelDataTypeFactory typeFactory,
-      RelDataType type) {
-    if (type.isStruct()) {
-      return type;
-    }
-    return typeFactory.builder().add("$0", type).build();
-  }
-
-  /** Executes a prepare action. */
-  public <R> R perform(CalciteServerStatement statement,
-      Frameworks.PrepareAction<R> action) {
-    final CalcitePrepare.Context prepareContext =
-        statement.createPrepareContext();
-    final JavaTypeFactory typeFactory = prepareContext.getTypeFactory();
-    final CalciteSchema schema =
-        action.getConfig().getDefaultSchema() != null
-            ? CalciteSchema.from(action.getConfig().getDefaultSchema())
-            : prepareContext.getRootSchema();
-    CalciteCatalogReader catalogReader =
-        new CalciteCatalogReader(schema.root(),
-            prepareContext.config().caseSensitive(),
-            schema.path(null),
-            typeFactory);
-    final RexBuilder rexBuilder = new RexBuilder(typeFactory);
-    final RelOptPlanner planner =
-        createPlanner(prepareContext,
-            action.getConfig().getContext(),
-            action.getConfig().getCostFactory());
-    final RelOptCluster cluster = createCluster(planner, rexBuilder);
-    return action.apply(cluster, catalogReader,
-        prepareContext.getRootSchema().plus(), statement);
-  }
-
-  /** Holds state for the process of preparing a SQL statement. */
-  static class CalcitePreparingStmt extends Prepare
-      implements RelOptTable.ViewExpander {
-    protected final RelOptPlanner planner;
-    protected final RexBuilder rexBuilder;
-    protected final CalcitePrepareImpl prepare;
-    protected final CalciteSchema schema;
-    protected final RelDataTypeFactory typeFactory;
-    protected final SqlRexConvertletTable convertletTable;
-    private final EnumerableRel.Prefer prefer;
-    private final Map<String, Object> internalParameters =
-        Maps.newLinkedHashMap();
-    private int expansionDepth;
-    private SqlValidator sqlValidator;
-
-    public CalcitePreparingStmt(CalcitePrepareImpl prepare,
-        Context context,
-        CatalogReader catalogReader,
-        RelDataTypeFactory typeFactory,
-        CalciteSchema schema,
-        EnumerableRel.Prefer prefer,
-        RelOptPlanner planner,
-        Convention resultConvention,
-        SqlRexConvertletTable convertletTable) {
-      super(context, catalogReader, resultConvention);
-      this.prepare = prepare;
-      this.schema = schema;
-      this.prefer = prefer;
-      this.planner = planner;
-      this.typeFactory = typeFactory;
-      this.convertletTable = convertletTable;
-      this.rexBuilder = new RexBuilder(typeFactory);
-    }
-
-    @Override protected void init(Class runtimeContextClass) {
-    }
-
-    public PreparedResult prepareQueryable(
-        final Queryable queryable,
-        RelDataType resultType) {
-      return prepare_(
-          new Supplier<RelNode>() {
-            public RelNode get() {
-              final RelOptCluster cluster =
-                  prepare.createCluster(planner, rexBuilder);
-              return new LixToRelTranslator(cluster, CalcitePreparingStmt.this)
-                  .translate(queryable);
-            }
-          }, resultType);
-    }
-
-    public PreparedResult prepareRel(final RelNode rel) {
-      return prepare_(
-          new Supplier<RelNode>() {
-            public RelNode get() {
-              return rel;
-            }
-          }, rel.getRowType());
-    }
-
-    private PreparedResult prepare_(Supplier<RelNode> fn,
-                                    RelDataType resultType) {
-      queryString = null;
-      Class runtimeContextClass = Object.class;
-      init(runtimeContextClass);
-
-      final RelNode rel = fn.get();
-      final RelDataType rowType = rel.getRowType();
-      final List<Pair<Integer, String>> fields =
-          Pair.zip(ImmutableIntList.identity(rowType.getFieldCount()),
-              rowType.getFieldNames());
-      final RelCollation collation =
-          rel instanceof Sort
-              ? ((Sort) rel).collation
-              : RelCollations.EMPTY;
-      RelRoot root = new RelRoot(rel, resultType, SqlKind.SELECT, fields,
-          collation);
-
-      if (timingTracer != null) {
-        timingTracer.traceTime("end sql2rel");
-      }
-
-      final RelDataType jdbcType =
-          makeStruct(rexBuilder.getTypeFactory(), resultType);
-      fieldOrigins = Collections.nCopies(jdbcType.getFieldCount(), null);
-      parameterRowType = rexBuilder.getTypeFactory().builder().build();
-
-      // Structured type flattening, view expansion, and plugging in
-      // physical storage.
-      root = root.withRel(flattenTypes(root.rel, true));
-
-      // Trim unused fields.
-      root = trimUnusedFields(root);
-
-      final List<Materialization> materializations = ImmutableList.of();
-      final List<LatticeEntry> lattices = ImmutableList.of();
-      long start = System.currentTimeMillis();
-      LOGGER.info("Begin optimize");
-      root = optimize(root, materializations, lattices);
-      LOGGER.info("End optimize, take : " + (System.currentTimeMillis() - start));
-
-      if (timingTracer != null) {
-          timingTracer.traceTime("end optimization");
-      }
-
-      return implement(root);
-    }
-
-    @Override protected SqlToRelConverter getSqlToRelConverter(
-        SqlValidator validator,
-        CatalogReader catalogReader,
-        SqlToRelConverter.Config config) {
-      final RelOptCluster cluster = prepare.createCluster(planner, rexBuilder);
-      SqlToRelConverter sqlToRelConverter =
-          new SqlToRelConverter(this, validator, catalogReader, cluster,
-              convertletTable, config);
-      return sqlToRelConverter;
-    }
-
-    @Override public RelNode flattenTypes(
-        RelNode rootRel,
-        boolean restructure) {
-      final SparkHandler spark = context.spark();
-      if (spark.enabled()) {
-        return spark.flattenTypes(planner, rootRel, restructure);
-      }
-      return rootRel;
-    }
-
-    @Override protected RelNode decorrelate(SqlToRelConverter sqlToRelConverter,
-                                            SqlNode query, RelNode rootRel) {
-      return sqlToRelConverter.decorrelate(query, rootRel);
-    }
-
-    @Override public RelRoot expandView(RelDataType rowType, String queryString,
-                                        List<String> schemaPath, List<String> viewPath) {
-      expansionDepth++;
-
-      SqlParser parser = prepare.createParser(queryString);
-      SqlNode sqlNode;
-      try {
-        sqlNode = parser.parseQuery();
-      } catch (SqlParseException e) {
-        throw new RuntimeException("parse failed", e);
-      }
-      // View may have different schema path than current connection.
-      final CatalogReader catalogReader =
-          this.catalogReader.withSchemaPath(schemaPath);
-      SqlValidator validator = createSqlValidator(catalogReader);
-      SqlNode sqlNode1 = validator.validate(sqlNode);
-      final SqlToRelConverter.Config config = SqlToRelConverter.configBuilder()
-              .withTrimUnusedFields(true).build();
-      SqlToRelConverter sqlToRelConverter =
-          getSqlToRelConverter(validator, catalogReader, config);
-      RelRoot root =
-          sqlToRelConverter.convertQuery(sqlNode1, true, false);
-
-      --expansionDepth;
-      return root;
-    }
-
-    protected SqlValidator createSqlValidator(CatalogReader catalogReader) {
-      return prepare.createSqlValidator(context,
-          (CalciteCatalogReader) catalogReader);
-    }
-
-    @Override protected SqlValidator getSqlValidator() {
-      if (sqlValidator == null) {
-        sqlValidator = createSqlValidator(catalogReader);
-      }
-      return sqlValidator;
-    }
-
-    @Override protected PreparedResult createPreparedExplanation(
-        RelDataType resultType,
-        RelDataType parameterRowType,
-        RelRoot root,
-        SqlExplainFormat format,
-        SqlExplainLevel detailLevel) {
-      return new CalcitePreparedExplain(resultType, parameterRowType, root,
-          format, detailLevel);
-    }
-
-    @Override protected PreparedResult implement(RelRoot root) {
-      if(RelUtils.findOLAPRel(root.rel) && !root.rel.getClass().getName().contains("OLAPToEnumerableConverter")){
-        String dumpPlan = RelOptUtil.dumpPlan("", root.rel, false, SqlExplainLevel.DIGEST_ATTRIBUTES);
-        throw new IllegalArgumentException("Error planer:" + dumpPlan);
-      }
-      RelDataType resultType = root.rel.getRowType();
-      boolean isDml = root.kind.belongsTo(SqlKind.DML);
-      final Bindable bindable;
-      if (resultConvention == BindableConvention.INSTANCE) {
-        bindable = Interpreters.bindable(root.rel);
-      } else {
-        EnumerableRel enumerable = (EnumerableRel) root.rel;
-        if (!root.isRefTrivial()) {
-          final List<RexNode> projects = new ArrayList<>();
-          final RexBuilder rexBuilder = enumerable.getCluster().getRexBuilder();
-          for (int field : Pair.left(root.fields)) {
-            projects.add(rexBuilder.makeInputRef(enumerable, field));
-          }
-          RexProgram program = RexProgram.create(enumerable.getRowType(),
-              projects, null, root.validatedRowType, rexBuilder);
-          enumerable = EnumerableCalc.create(enumerable, program);
-        }
-
-        try {
-          CatalogReader.THREAD_LOCAL.set(catalogReader);
-          bindable = EnumerableInterpretable.toBindable(internalParameters,
-              context.spark(), enumerable, prefer);
-        } finally {
-          CatalogReader.THREAD_LOCAL.remove();
-        }
-      }
-
-      if (timingTracer != null) {
-        timingTracer.traceTime("end codegen");
-      }
-
-      if (timingTracer != null) {
-        timingTracer.traceTime("end compilation");
-      }
-
-      return new PreparedResultImpl(
-          resultType,
-          parameterRowType,
-          fieldOrigins,
-          root.collation.getFieldCollations().isEmpty()
-              ? ImmutableList.<RelCollation>of()
-              : ImmutableList.of(root.collation),
-          root.rel,
-          mapTableModOp(isDml, root.kind),
-          isDml) {
-        public String getCode() {
-          throw new UnsupportedOperationException();
-        }
-
-        public Bindable getBindable(Meta.CursorFactory cursorFactory) {
-          return bindable;
-        }
-
-        public Type getElementType() {
-          return ((Typed) bindable).getElementType();
-        }
-      };
-    }
-
-    @Override protected List<Materialization> getMaterializations() {
-      final List<Materialization> materializations =
-          context.config().materializationsEnabled()
-              ? MaterializationService.instance().query(schema)
-              : ImmutableList.<Prepare.Materialization>of();
-      for (Prepare.Materialization materialization : materializations) {
-        prepare.populateMaterializations(context, planner, materialization);
-      }
-      return materializations;
-    }
-
-    @Override protected List<LatticeEntry> getLattices() {
-      return Schemas.getLatticeEntries(schema);
-    }
-  }
-
-  /** An {@code EXPLAIN} statement, prepared and ready to execute. */
-  private static class CalcitePreparedExplain extends Prepare.PreparedExplain {
-    public CalcitePreparedExplain(
-        RelDataType resultType,
-        RelDataType parameterRowType,
-        RelRoot root,
-        SqlExplainFormat format,
-        SqlExplainLevel detailLevel) {
-      super(resultType, parameterRowType, root, format, detailLevel);
-    }
-
-    public Bindable getBindable(final Meta.CursorFactory cursorFactory) {
-      final String explanation = getCode();
-      return new Bindable() {
-        public Enumerable bind(DataContext dataContext) {
-          switch (cursorFactory.style) {
-          case ARRAY:
-            return Linq4j.singletonEnumerable(new String[] {explanation});
-          case OBJECT:
-          default:
-            return Linq4j.singletonEnumerable(explanation);
-          }
-        }
-      };
-    }
-  }
-
-  /** Translator from Java AST to {@link RexNode}. */
-  interface ScalarTranslator {
-    RexNode toRex(BlockStatement statement);
-    List<RexNode> toRexList(BlockStatement statement);
-    RexNode toRex(Expression expression);
-    ScalarTranslator bind(List<ParameterExpression> parameterList,
-                          List<RexNode> values);
-  }
-
-  /** Basic translator. */
-  static class EmptyScalarTranslator implements ScalarTranslator {
-    private final RexBuilder rexBuilder;
-
-    public EmptyScalarTranslator(RexBuilder rexBuilder) {
-      this.rexBuilder = rexBuilder;
-    }
-
-    public static ScalarTranslator empty(RexBuilder builder) {
-      return new EmptyScalarTranslator(builder);
-    }
-
-    public List<RexNode> toRexList(BlockStatement statement) {
-      final List<Expression> simpleList = simpleList(statement);
-      final List<RexNode> list = new ArrayList<>();
-      for (Expression expression1 : simpleList) {
-        list.add(toRex(expression1));
-      }
-      return list;
-    }
-
-    public RexNode toRex(BlockStatement statement) {
-      return toRex(Blocks.simple(statement));
-    }
-
-    private static List<Expression> simpleList(BlockStatement statement) {
-      Expression simple = Blocks.simple(statement);
-      if (simple instanceof NewExpression) {
-        NewExpression newExpression = (NewExpression) simple;
-        return newExpression.arguments;
-      } else {
-        return Collections.singletonList(simple);
-      }
-    }
-
-    public RexNode toRex(Expression expression) {
-      switch (expression.getNodeType()) {
-      case MemberAccess:
-        // Case-sensitive name match because name was previously resolved.
-        return rexBuilder.makeFieldAccess(
-            toRex(
-                ((MemberExpression) expression).expression),
-            ((MemberExpression) expression).field.getName(),
-            true);
-      case GreaterThan:
-        return binary(expression, SqlStdOperatorTable.GREATER_THAN);
-      case LessThan:
-        return binary(expression, SqlStdOperatorTable.LESS_THAN);
-      case Parameter:
-        return parameter((ParameterExpression) expression);
-      case Call:
-        MethodCallExpression call = (MethodCallExpression) expression;
-        SqlOperator operator =
-            RexToLixTranslator.JAVA_TO_SQL_METHOD_MAP.get(call.method);
-        if (operator != null) {
-          return rexBuilder.makeCall(
-              type(call),
-              operator,
-              toRex(
-                  Expressions.<Expression>list()
-                      .appendIfNotNull(call.targetExpression)
-                      .appendAll(call.expressions)));
-        }
-        throw new RuntimeException(
-            "Could translate call to method " + call.method);
-      case Constant:
-        final ConstantExpression constant =
-            (ConstantExpression) expression;
-        Object value = constant.value;
-        if (value instanceof Number) {
-          Number number = (Number) value;
-          if (value instanceof Double || value instanceof Float) {
-            return rexBuilder.makeApproxLiteral(
-                BigDecimal.valueOf(number.doubleValue()));
-          } else if (value instanceof BigDecimal) {
-            return rexBuilder.makeExactLiteral((BigDecimal) value);
-          } else {
-            return rexBuilder.makeExactLiteral(
-                BigDecimal.valueOf(number.longValue()));
-          }
-        } else if (value instanceof Boolean) {
-          return rexBuilder.makeLiteral((Boolean) value);
-        } else {
-          return rexBuilder.makeLiteral(constant.toString());
-        }
-      default:
-        throw new UnsupportedOperationException(
-            "unknown expression type " + expression.getNodeType() + " "
-            + expression);
-      }
-    }
-
-    private RexNode binary(Expression expression, SqlBinaryOperator op) {
-      BinaryExpression call = (BinaryExpression) expression;
-      return rexBuilder.makeCall(type(call), op,
-          toRex(ImmutableList.of(call.expression0, call.expression1)));
-    }
-
-    private List<RexNode> toRex(List<Expression> expressions) {
-      final List<RexNode> list = new ArrayList<>();
-      for (Expression expression : expressions) {
-        list.add(toRex(expression));
-      }
-      return list;
-    }
-
-    protected RelDataType type(Expression expression) {
-      final Type type = expression.getType();
-      return ((JavaTypeFactory) rexBuilder.getTypeFactory()).createType(type);
-    }
-
-    public ScalarTranslator bind(
-            List<ParameterExpression> parameterList, List<RexNode> values) {
-      return new LambdaScalarTranslator(
-          rexBuilder, parameterList, values);
-    }
-
-    public RexNode parameter(ParameterExpression param) {
-      throw new RuntimeException("unknown parameter " + param);
-    }
-  }
-
-  /** Translator that looks for parameters. */
-  private static class LambdaScalarTranslator extends EmptyScalarTranslator {
-    private final List<ParameterExpression> parameterList;
-    private final List<RexNode> values;
-
-    public LambdaScalarTranslator(
-        RexBuilder rexBuilder,
-        List<ParameterExpression> parameterList,
-        List<RexNode> values) {
-      super(rexBuilder);
-      this.parameterList = parameterList;
-      this.values = values;
-    }
-
-    public RexNode parameter(ParameterExpression param) {
-      int i = parameterList.indexOf(param);
-      if (i >= 0) {
-        return values.get(i);
-      }
-      throw new RuntimeException("unknown parameter " + param);
-    }
-  }
-}
-
-// End CalcitePrepareImpl.java
diff --git a/atopcalcite/src/main/java/org/apache/calcite/rel/rules/FilterJoinRule.java b/atopcalcite/src/main/java/org/apache/calcite/rel/rules/FilterJoinRule.java
deleted file mode 100644
index f758b40735..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/rel/rules/FilterJoinRule.java
+++ /dev/null
@@ -1,311 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.calcite.rel.rules;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.plan.RelOptRuleOperand;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.EquiJoin;
-import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Join;
-import org.apache.calcite.rel.core.JoinRelType;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.tools.RelBuilder;
-import org.apache.calcite.tools.RelBuilderFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Turn it off.
- * Though try to turn it off in OLAPTableScan, sometimes it still triggerd.
- */
-
-/**
- * Planner rule that pushes filters above and
- * within a join node into the join node and/or its children nodes.
- */
-public abstract class FilterJoinRule extends RelOptRule {
-    /** Predicate that always returns true. With this predicate, every filter
-     * will be pushed into the ON clause. */
-    public static final Predicate TRUE_PREDICATE = new Predicate() {
-        public boolean apply(Join join, JoinRelType joinType, RexNode exp) {
-            return true;
-        }
-    };
-
-    /** Rule that pushes predicates from a Filter into the Join below them. */
-    public static final FilterJoinRule FILTER_ON_JOIN = new FilterIntoJoinRule(true, RelFactories.LOGICAL_BUILDER,
-            TRUE_PREDICATE);
-
-    /** Dumber version of {@link #FILTER_ON_JOIN}. Not intended for production
-     * use, but keeps some tests working for which {@code FILTER_ON_JOIN} is too
-     * smart. */
-    public static final FilterJoinRule DUMB_FILTER_ON_JOIN = new FilterIntoJoinRule(false, RelFactories.LOGICAL_BUILDER,
-            TRUE_PREDICATE);
-
-    /** Rule that pushes predicates in a Join into the inputs to the join. */
-    public static final FilterJoinRule JOIN = new JoinConditionPushRule(RelFactories.LOGICAL_BUILDER, TRUE_PREDICATE);
-
-    /** Whether to try to strengthen join-type. */
-    private final boolean smart;
-
-    /** Predicate that returns whether a filter is valid in the ON clause of a
-     * join for this particular kind of join. If not, Calcite will push it back to
-     * above the join. */
-    private final Predicate predicate;
-
-    //~ Constructors -----------------------------------------------------------
-
-    /**
-     * Creates a FilterProjectTransposeRule with an explicit root operand and
-     * factories.
-     */
-    protected FilterJoinRule(RelOptRuleOperand operand, String id, boolean smart, RelBuilderFactory relBuilderFactory,
-            Predicate predicate) {
-        super(operand, relBuilderFactory, "FilterJoinRule:" + id);
-        this.smart = smart;
-        this.predicate = Preconditions.checkNotNull(predicate);
-    }
-
-    /**
-     * Creates a FilterJoinRule with an explicit root operand and
-     * factories.
-     */
-    @Deprecated // to be removed before 2.0
-    protected FilterJoinRule(RelOptRuleOperand operand, String id, boolean smart,
-            RelFactories.FilterFactory filterFactory, RelFactories.ProjectFactory projectFactory) {
-        this(operand, id, smart, RelBuilder.proto(filterFactory, projectFactory), TRUE_PREDICATE);
-    }
-
-    /**
-     * Creates a FilterProjectTransposeRule with an explicit root operand and
-     * factories.
-     */
-    @Deprecated // to be removed before 2.0
-    protected FilterJoinRule(RelOptRuleOperand operand, String id, boolean smart,
-            RelFactories.FilterFactory filterFactory, RelFactories.ProjectFactory projectFactory, Predicate predicate) {
-        this(operand, id, smart, RelBuilder.proto(filterFactory, projectFactory), predicate);
-    }
-
-    //~ Methods ----------------------------------------------------------------
-
-    protected void perform(RelOptRuleCall call, Filter filter, Join join) {
-        final List<RexNode> joinFilters = RelOptUtil.conjunctions(join.getCondition());
-        final List<RexNode> origJoinFilters = ImmutableList.copyOf(joinFilters);
-
-        // If there is only the joinRel,
-        // make sure it does not match a cartesian product joinRel
-        // (with "true" condition), otherwise this rule will be applied
-        // again on the new cartesian product joinRel.
-        if (filter == null && joinFilters.isEmpty()) {
-            return;
-        }
-
-        final List<RexNode> aboveFilters = filter != null ? RelOptUtil.conjunctions(filter.getCondition())
-                : Lists.<RexNode> newArrayList();
-        final ImmutableList<RexNode> origAboveFilters = ImmutableList.copyOf(aboveFilters);
-
-        // Simplify Outer Joins
-        JoinRelType joinType = join.getJoinType();
-        if (smart && !origAboveFilters.isEmpty() && join.getJoinType() != JoinRelType.INNER) {
-            joinType = RelOptUtil.simplifyJoin(join, origAboveFilters, joinType);
-        }
-
-        final List<RexNode> leftFilters = new ArrayList<>();
-        final List<RexNode> rightFilters = new ArrayList<>();
-
-        // TODO - add logic to derive additional filters.  E.g., from
-        // (t1.a = 1 AND t2.a = 2) OR (t1.b = 3 AND t2.b = 4), you can
-        // derive table filters:
-        // (t1.a = 1 OR t1.b = 3)
-        // (t2.a = 2 OR t2.b = 4)
-
-        // Try to push down above filters. These are typically where clause
-        // filters. They can be pushed down if they are not on the NULL
-        // generating side.
-        boolean filterPushed = false;
-        if (RelOptUtil.classifyFilters(join, aboveFilters, joinType, !(join instanceof EquiJoin),
-                !joinType.generatesNullsOnLeft(), !joinType.generatesNullsOnRight(), joinFilters, leftFilters,
-                rightFilters)) {
-            filterPushed = true;
-        }
-
-        // Move join filters up if needed
-        validateJoinFilters(aboveFilters, joinFilters, join, joinType);
-
-        // If no filter got pushed after validate, reset filterPushed flag
-        if (leftFilters.isEmpty() && rightFilters.isEmpty() && joinFilters.size() == origJoinFilters.size()) {
-            if (Sets.newHashSet(joinFilters).equals(Sets.newHashSet(origJoinFilters))) {
-                filterPushed = false;
-            }
-        }
-
-        // Try to push down filters in ON clause. A ON clause filter can only be
-        // pushed down if it does not affect the non-matching set, i.e. it is
-        // not on the side which is preserved.
-        if (RelOptUtil.classifyFilters(join, joinFilters, joinType, false, !joinType.generatesNullsOnRight(),
-                !joinType.generatesNullsOnLeft(), joinFilters, leftFilters, rightFilters)) {
-            filterPushed = true;
-        }
-
-        // if nothing actually got pushed and there is nothing leftover,
-        // then this rule is a no-op
-        if ((!filterPushed && joinType == join.getJoinType())
-                || (joinFilters.isEmpty() && leftFilters.isEmpty() && rightFilters.isEmpty())) {
-            return;
-        }
-
-        // create Filters on top of the children if any filters were
-        // pushed to them
-        final RexBuilder rexBuilder = join.getCluster().getRexBuilder();
-        final RelBuilder relBuilder = call.builder();
-        final RelNode leftRel = relBuilder.push(join.getLeft()).filter(leftFilters).build();
-        final RelNode rightRel = relBuilder.push(join.getRight()).filter(rightFilters).build();
-
-        // create the new join node referencing the new children and
-        // containing its new join filters (if there are any)
-        final ImmutableList<RelDataType> fieldTypes = ImmutableList.<RelDataType> builder()
-                .addAll(RelOptUtil.getFieldTypeList(leftRel.getRowType()))
-                .addAll(RelOptUtil.getFieldTypeList(rightRel.getRowType())).build();
-        final RexNode joinFilter = RexUtil.composeConjunction(rexBuilder,
-                RexUtil.fixUp(rexBuilder, joinFilters, fieldTypes), false);
-
-        // If nothing actually got pushed and there is nothing leftover,
-        // then this rule is a no-op
-        if (joinFilter.isAlwaysTrue() && leftFilters.isEmpty() && rightFilters.isEmpty()
-                && joinType == join.getJoinType()) {
-            return;
-        }
-
-        RelNode newJoinRel = join.copy(join.getTraitSet(), joinFilter, leftRel, rightRel, joinType,
-                join.isSemiJoinDone());
-        call.getPlanner().onCopy(join, newJoinRel);
-        if (!leftFilters.isEmpty()) {
-            call.getPlanner().onCopy(filter, leftRel);
-        }
-        if (!rightFilters.isEmpty()) {
-            call.getPlanner().onCopy(filter, rightRel);
-        }
-
-        relBuilder.push(newJoinRel);
-
-        // Create a project on top of the join if some of the columns have become
-        // NOT NULL due to the join-type getting stricter.
-        relBuilder.convert(join.getRowType(), false);
-
-        // create a FilterRel on top of the join if needed
-        relBuilder.filter(
-                RexUtil.fixUp(rexBuilder, aboveFilters, RelOptUtil.getFieldTypeList(relBuilder.peek().getRowType())));
-
-        call.transformTo(relBuilder.build());
-    }
-
-    /**
-     * Validates that target execution framework can satisfy join filters.
-     *
-     * <p>If the join filter cannot be satisfied (for example, if it is
-     * {@code l.c1 > r.c2} and the join only supports equi-join), removes the
-     * filter from {@code joinFilters} and adds it to {@code aboveFilters}.
-     *
-     * <p>The default implementation does nothing; i.e. the join can handle all
-     * conditions.
-     *
-     * @param aboveFilters Filter above Join
-     * @param joinFilters Filters in join condition
-     * @param join Join
-     * @param joinType JoinRelType could be different from type in Join due to
-     * outer join simplification.
-     */
-    protected void validateJoinFilters(List<RexNode> aboveFilters, List<RexNode> joinFilters, Join join,
-            JoinRelType joinType) {
-        final Iterator<RexNode> filterIter = joinFilters.iterator();
-        while (filterIter.hasNext()) {
-            RexNode exp = filterIter.next();
-            if (!predicate.apply(join, joinType, exp)) {
-                aboveFilters.add(exp);
-                filterIter.remove();
-            }
-        }
-    }
-
-    /** Rule that pushes parts of the join condition to its inputs. */
-    public static class JoinConditionPushRule extends FilterJoinRule {
-        public JoinConditionPushRule(RelBuilderFactory relBuilderFactory, Predicate predicate) {
-            super(RelOptRule.operand(Join.class, RelOptRule.any()), "FilterJoinRule:no-filter", true, relBuilderFactory,
-                    predicate);
-        }
-
-        @Deprecated // to be removed before 2.0
-        public JoinConditionPushRule(RelFactories.FilterFactory filterFactory,
-                RelFactories.ProjectFactory projectFactory, Predicate predicate) {
-            this(RelBuilder.proto(filterFactory, projectFactory), predicate);
-        }
-
-        @Override
-        public void onMatch(RelOptRuleCall call) {
-            Join join = call.rel(0);
-            // HACK POINT
-//            perform(call, null, join);
-        }
-    }
-
-    /** Rule that tries to push filter expressions into a join
-     * condition and into the inputs of the join. */
-    public static class FilterIntoJoinRule extends FilterJoinRule {
-        public FilterIntoJoinRule(boolean smart, RelBuilderFactory relBuilderFactory, Predicate predicate) {
-            super(operand(Filter.class, operand(Join.class, RelOptRule.any())), "FilterJoinRule:filter", smart,
-                    relBuilderFactory, predicate);
-        }
-
-        @Deprecated // to be removed before 2.0
-        public FilterIntoJoinRule(boolean smart, RelFactories.FilterFactory filterFactory,
-                RelFactories.ProjectFactory projectFactory, Predicate predicate) {
-            this(smart, RelBuilder.proto(filterFactory, projectFactory), predicate);
-        }
-
-        @Override
-        public void onMatch(RelOptRuleCall call) {
-            Filter filter = call.rel(0);
-            Join join = call.rel(1);
-            // HACK POINT
-//            perform(call, filter, join);
-        }
-    }
-
-    /** Predicate that returns whether a filter is valid in the ON clause of a
-     * join for this particular kind of join. If not, Calcite will push it back to
-     * above the join. */
-    public interface Predicate {
-        boolean apply(Join join, JoinRelType joinType, RexNode exp);
-    }
-}
-
-// End FilterJoinRule.java
\ No newline at end of file
diff --git a/atopcalcite/src/main/java/org/apache/calcite/rel/rules/OLAPJoinPushThroughJoinRule.java b/atopcalcite/src/main/java/org/apache/calcite/rel/rules/OLAPJoinPushThroughJoinRule.java
deleted file mode 100644
index 35f2ae6752..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/rel/rules/OLAPJoinPushThroughJoinRule.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *     http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-package org.apache.calcite.rel.rules;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.annotation.Nullable;
-
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Join;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.core.TableScan;
-import org.apache.calcite.rel.logical.LogicalJoin;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexPermuteInputsShuttle;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.tools.RelBuilder;
-import org.apache.calcite.tools.RelBuilderFactory;
-import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.mapping.Mappings;
-
-import com.google.common.base.Predicate;
-
-/**
- * modified form org.apache.calcite.rel.rules.JoinPushThroughJoinRule.
- * The goal is to move joins with sub-queries after joins with tables,
- * so that pre-defined join with tables can be matched
- */
-public class OLAPJoinPushThroughJoinRule extends RelOptRule {
-    /**
-     * Instance of the rule that works on logical joins only, and pushes to the
-     * right.
-     */
-    public static final RelOptRule INSTANCE = new OLAPJoinPushThroughJoinRule("OLAPJoinPushThroughJoinRule", LogicalJoin.class, RelFactories.LOGICAL_BUILDER);
-
-    public OLAPJoinPushThroughJoinRule(String description, Class<? extends Join> clazz, RelBuilderFactory relBuilderFactory) {
-        super(operand(clazz,
-
-                operand(clazz, operand(RelNode.class, any()), operand(RelNode.class, null, new Predicate<RelNode>() {
-                    @Override
-                    public boolean apply(@Nullable RelNode input) {
-                        return !(input instanceof TableScan);
-                    }
-                }, any())),
-
-                operand(TableScan.class, any())), relBuilderFactory, description);
-    }
-
-    @Override
-    public void onMatch(RelOptRuleCall call) {
-        onMatchRight(call);
-    }
-
-    private void onMatchRight(RelOptRuleCall call) {
-        final Join topJoin = call.rel(0);
-        final Join bottomJoin = call.rel(1);
-        final RelNode relC = call.rel(4);
-        final RelNode relA = bottomJoin.getLeft();
-        final RelNode relB = bottomJoin.getRight();
-        final RelOptCluster cluster = topJoin.getCluster();
-        //        Preconditions.checkState(relA == call.rel(2));
-        //        Preconditions.checkState(relB == call.rel(3));
-
-        //        topJoin
-        //        /     \
-        //   bottomJoin  C
-        //    /    \
-        //   A      B
-
-        final int aCount = relA.getRowType().getFieldCount();
-        final int bCount = relB.getRowType().getFieldCount();
-        final int cCount = relC.getRowType().getFieldCount();
-        final ImmutableBitSet bBitSet = ImmutableBitSet.range(aCount, aCount + bCount);
-
-        // becomes
-        //
-        //        newTopJoin
-        //        /        \
-        //   newBottomJoin  B
-        //    /    \
-        //   A      C
-
-        // If either join is not inner, we cannot proceed.
-        // (Is this too strict?)
-        //        if (topJoin.getJoinType() != JoinRelType.INNER || bottomJoin.getJoinType() != JoinRelType.INNER) {
-        //            return;
-        //        }
-
-        // Split the condition of topJoin into a conjunction. Each of the
-        // parts that does not use columns from B can be pushed down.
-        final List<RexNode> intersecting = new ArrayList<>();
-        final List<RexNode> nonIntersecting = new ArrayList<>();
-        split(topJoin.getCondition(), bBitSet, intersecting, nonIntersecting);
-
-        // If there's nothing to push down, it's not worth proceeding.
-        if (nonIntersecting.isEmpty()) {
-            return;
-        }
-
-        // Split the condition of bottomJoin into a conjunction. Each of the
-        // parts that use columns from B will need to be pulled up.
-        final List<RexNode> bottomIntersecting = new ArrayList<>();
-        final List<RexNode> bottomNonIntersecting = new ArrayList<>();
-        split(bottomJoin.getCondition(), bBitSet, bottomIntersecting, bottomNonIntersecting);
-
-        // target: | A       | C      |
-        // source: | A       | B | C      |
-        //        final Mappings.TargetMapping bottomMapping = Mappings
-        //            .createShiftMapping(aCount + bCount + cCount, 0, 0, aCount, aCount, aCount + bCount,
-        //                cCount);
-
-        final Mappings.TargetMapping bottomMapping = Mappings.createShiftMapping(aCount + bCount + cCount, 0, 0, aCount, aCount + cCount, aCount, bCount, aCount, aCount + bCount, cCount);
-        final List<RexNode> newBottomList = new ArrayList<>();
-        new RexPermuteInputsShuttle(bottomMapping, relA, relC).visitList(nonIntersecting, newBottomList);
-        new RexPermuteInputsShuttle(bottomMapping, relA, relC).visitList(bottomNonIntersecting, newBottomList);
-        final RexBuilder rexBuilder = cluster.getRexBuilder();
-        RexNode newBottomCondition = RexUtil.composeConjunction(rexBuilder, newBottomList, false);
-        final Join newBottomJoin = bottomJoin.copy(bottomJoin.getTraitSet(), newBottomCondition, relA, relC, bottomJoin.getJoinType(), bottomJoin.isSemiJoinDone());
-
-        // target: | A       | C      | B |
-        // source: | A       | B | C      |
-        final Mappings.TargetMapping topMapping = Mappings.createShiftMapping(aCount + bCount + cCount, 0, 0, aCount, aCount + cCount, aCount, bCount, aCount, aCount + bCount, cCount);
-        final List<RexNode> newTopList = new ArrayList<>();
-        new RexPermuteInputsShuttle(topMapping, newBottomJoin, relB).visitList(intersecting, newTopList);
-        new RexPermuteInputsShuttle(topMapping, newBottomJoin, relB).visitList(bottomIntersecting, newTopList);
-        RexNode newTopCondition = RexUtil.composeConjunction(rexBuilder, newTopList, false);
-        @SuppressWarnings("SuspiciousNameCombination")
-        final Join newTopJoin = topJoin.copy(topJoin.getTraitSet(), newTopCondition, newBottomJoin, relB, topJoin.getJoinType(), topJoin.isSemiJoinDone());
-
-        assert !Mappings.isIdentity(topMapping);
-        final RelBuilder relBuilder = call.builder();
-        relBuilder.push(newTopJoin);
-        relBuilder.project(relBuilder.fields(topMapping));
-        call.transformTo(relBuilder.build());
-    }
-
-    /**
-     * Splits a condition into conjunctions that do or do not intersect with
-     * a given bit set.
-     */
-    static void split(RexNode condition, ImmutableBitSet bitSet, List<RexNode> intersecting, List<RexNode> nonIntersecting) {
-        for (RexNode node : RelOptUtil.conjunctions(condition)) {
-            ImmutableBitSet inputBitSet = RelOptUtil.InputFinder.bits(node);
-            if (bitSet.intersects(inputBitSet)) {
-                intersecting.add(node);
-            } else {
-                nonIntersecting.add(node);
-            }
-        }
-    }
-}
diff --git a/atopcalcite/src/main/java/org/apache/calcite/rel/rules/OLAPJoinPushThroughJoinRule2.java b/atopcalcite/src/main/java/org/apache/calcite/rel/rules/OLAPJoinPushThroughJoinRule2.java
deleted file mode 100644
index a769cbdac3..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/rel/rules/OLAPJoinPushThroughJoinRule2.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *     http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-package org.apache.calcite.rel.rules;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.annotation.Nullable;
-
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Join;
-import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.core.TableScan;
-import org.apache.calcite.rel.logical.LogicalJoin;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexPermuteInputsShuttle;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.tools.RelBuilder;
-import org.apache.calcite.tools.RelBuilderFactory;
-import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.Permutation;
-import org.apache.calcite.util.mapping.AbstractTargetMapping;
-import org.apache.calcite.util.mapping.Mapping;
-import org.apache.calcite.util.mapping.Mappings;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-
-/**
- * modified form org.apache.calcite.rel.rules.JoinPushThroughJoinRule.
- * The goal is to move joins with sub-queries after joins with tables,
- * so that pre-defined join with tables can be matched
- * 
- * differ from OLAPJoinPushThroughJoinRule in the pattern to match. OLAPJoinPushThroughJoinRule
- * will generate a result pattern which cannot recursively match OLAPJoinPushThroughJoinRule's pattern.
- * So OLAPJoinPushThroughJoinRule2 is introduced to allow recursive matching
- */
-public class OLAPJoinPushThroughJoinRule2 extends RelOptRule {
-    /**
-     * Instance of the rule that works on logical joins only, and pushes to the
-     * right.
-     */
-    public static final RelOptRule INSTANCE = new OLAPJoinPushThroughJoinRule2("OLAPJoinPushThroughJoinRule2", LogicalJoin.class, RelFactories.LOGICAL_BUILDER);
-
-    public OLAPJoinPushThroughJoinRule2(String description, Class<? extends Join> clazz, RelBuilderFactory relBuilderFactory) {
-        super(operand(clazz,
-
-                operand(Project.class, //project is added on top by OLAPJoinPushThroughJoinRule
-                        null, new Predicate<Project>() {
-                            @Override
-                            public boolean apply(@Nullable Project input) {
-                                return input.getPermutation() != null;
-                            }
-                        }, operand(clazz, //
-                                operand(RelNode.class, any()), operand(RelNode.class, null, new Predicate<RelNode>() {
-                                    @Override
-                                    public boolean apply(@Nullable RelNode input) {
-                                        return !(input instanceof TableScan);
-                                    }
-                                }, any()))),
-
-                operand(TableScan.class, any())), relBuilderFactory, description);
-    }
-
-    @Override
-    public void onMatch(RelOptRuleCall call) {
-        onMatchRight(call);
-    }
-
-    private void onMatchRight(RelOptRuleCall call) {
-        final Join topJoin = call.rel(0);
-        final Project projectOnBottomJoin = call.rel(1);
-        final Join bottomJoin = call.rel(2);
-        final RelNode relC = call.rel(5);
-        final RelNode relA = bottomJoin.getLeft();
-        final RelNode relB = bottomJoin.getRight();
-        final RelOptCluster cluster = topJoin.getCluster();
-        final Permutation projectPermu = projectOnBottomJoin.getPermutation();
-        final Permutation inverseProjectPermu = projectPermu.inverse();
-        //        Preconditions.checkState(relA == call.rel(3));
-        //        Preconditions.checkState(relB == call.rel(4));
-        Preconditions.checkNotNull(projectPermu);
-
-        //            topJoin
-        //           /        \
-        //        project      C
-        //        /     
-        //   bottomJoin  
-        //    /    \
-        //   A      B
-
-        final int aCount = relA.getRowType().getFieldCount();
-        final int bCount = relB.getRowType().getFieldCount();
-        final int cCount = relC.getRowType().getFieldCount();
-        final ImmutableBitSet bBitSetBelowProject = ImmutableBitSet.range(aCount, aCount + bCount);
-        final ImmutableBitSet bBitSetAboveProject = Mappings.apply(inverseProjectPermu, bBitSetBelowProject);
-
-        final Mapping extendedProjectPerm = createAbstractTargetMapping(Mappings.append(projectPermu, Mappings.createIdentity(cCount)));
-
-        // becomes
-        //
-        //            project
-        //             /
-        //        newTopJoin
-        //        /        \
-        //   newBottomJoin  B
-        //    /    \
-        //   A      C
-
-        // If either join is not inner, we cannot proceed.
-        // (Is this too strict?)
-        //        if (topJoin.getJoinType() != JoinRelType.INNER || bottomJoin.getJoinType() != JoinRelType.INNER) {
-        //            return;
-        //        }
-
-        // Split the condition of topJoin into a conjunction. Each of the
-        // parts that does not use columns from B can be pushed down.
-        final List<RexNode> intersecting = new ArrayList<>();
-        final List<RexNode> nonIntersecting = new ArrayList<>();
-        split(topJoin.getCondition(), bBitSetAboveProject, intersecting, nonIntersecting);
-
-        // If there's nothing to push down, it's not worth proceeding.
-        if (nonIntersecting.isEmpty()) {
-            return;
-        }
-
-        // Split the condition of bottomJoin into a conjunction. Each of the
-        // parts that use columns from B will need to be pulled up.
-        final List<RexNode> bottomIntersecting = new ArrayList<>();
-        final List<RexNode> bottomNonIntersecting = new ArrayList<>();
-        split(bottomJoin.getCondition(), bBitSetBelowProject, bottomIntersecting, bottomNonIntersecting);
-        Preconditions.checkState(bottomNonIntersecting.isEmpty());
-
-        // target: | A       | C      |
-        // source: | A       | B | C      |
-        final Mappings.TargetMapping tempMapping = Mappings.createShiftMapping(aCount + bCount + cCount, 0, 0, aCount, aCount + cCount, aCount, bCount, aCount, aCount + bCount, cCount);
-        final Mappings.TargetMapping thruProjectMapping = Mappings.multiply(extendedProjectPerm, createAbstractTargetMapping(tempMapping));
-        final List<RexNode> newBottomList = new ArrayList<>();
-        new RexPermuteInputsShuttle(thruProjectMapping, relA, relC).visitList(nonIntersecting, newBottomList);
-        final RexBuilder rexBuilder = cluster.getRexBuilder();
-        RexNode newBottomCondition = RexUtil.composeConjunction(rexBuilder, newBottomList, false);
-        final Join newBottomJoin = bottomJoin.copy(bottomJoin.getTraitSet(), newBottomCondition, relA, relC, bottomJoin.getJoinType(), bottomJoin.isSemiJoinDone());
-
-        // target: | A       | C      | B |
-        // source: | A       | B | C      |
-        final Mappings.TargetMapping nonThruProjectMapping = Mappings.createShiftMapping(aCount + bCount + cCount, 0, 0, aCount, aCount + cCount, aCount, bCount, aCount, aCount + bCount, cCount);
-        final List<RexNode> newTopList = new ArrayList<>();
-        new RexPermuteInputsShuttle(thruProjectMapping, newBottomJoin, relB).visitList(intersecting, newTopList);
-        new RexPermuteInputsShuttle(nonThruProjectMapping, newBottomJoin, relB).visitList(bottomIntersecting, newTopList);
-        RexNode newTopCondition = RexUtil.composeConjunction(rexBuilder, newTopList, false);
-        @SuppressWarnings("SuspiciousNameCombination")
-        final Join newTopJoin = topJoin.copy(topJoin.getTraitSet(), newTopCondition, newBottomJoin, relB, topJoin.getJoinType(), topJoin.isSemiJoinDone());
-
-        assert !Mappings.isIdentity(thruProjectMapping);
-        final RelBuilder relBuilder = call.builder();
-        relBuilder.push(newTopJoin);
-        relBuilder.project(relBuilder.fields(thruProjectMapping));
-        call.transformTo(relBuilder.build());
-    }
-
-    private AbstractTargetMapping createAbstractTargetMapping(final Mappings.TargetMapping targetMapping) {
-        return new AbstractTargetMapping(targetMapping.getSourceCount(), targetMapping.getTargetCount()) {
-            @Override
-            public int getTargetOpt(int source) {
-                return targetMapping.getTargetOpt(source);
-            }
-        };
-    }
-
-    /**
-     * Splits a condition into conjunctions that do or do not intersect with
-     * a given bit set.
-     */
-    static void split(RexNode condition, ImmutableBitSet bitSet, List<RexNode> intersecting, List<RexNode> nonIntersecting) {
-        for (RexNode node : RelOptUtil.conjunctions(condition)) {
-            ImmutableBitSet inputBitSet = RelOptUtil.InputFinder.bits(node);
-            if (bitSet.intersects(inputBitSet)) {
-                intersecting.add(node);
-            } else {
-                nonIntersecting.add(node);
-            }
-        }
-    }
-}
diff --git a/atopcalcite/src/main/java/org/apache/calcite/runtime/SqlFunctions.java b/atopcalcite/src/main/java/org/apache/calcite/runtime/SqlFunctions.java
deleted file mode 100644
index 38bd223e05..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/runtime/SqlFunctions.java
+++ /dev/null
@@ -1,2238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.calcite.runtime;
-
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.math.MathContext;
-import java.math.RoundingMode;
-import java.sql.SQLException;
-import java.sql.Timestamp;
-import java.text.DecimalFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TimeZone;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.regex.Pattern;
-
-import org.apache.calcite.DataContext;
-import org.apache.calcite.avatica.util.ByteString;
-import org.apache.calcite.avatica.util.DateTimeUtils;
-import org.apache.calcite.avatica.util.Spaces;
-import org.apache.calcite.avatica.util.TimeUnitRange;
-import org.apache.calcite.linq4j.AbstractEnumerable;
-import org.apache.calcite.linq4j.CartesianProductEnumerator;
-import org.apache.calcite.linq4j.Enumerable;
-import org.apache.calcite.linq4j.Enumerator;
-import org.apache.calcite.linq4j.Linq4j;
-import org.apache.calcite.linq4j.function.Deterministic;
-import org.apache.calcite.linq4j.function.Function1;
-import org.apache.calcite.linq4j.function.NonDeterministic;
-import org.apache.calcite.linq4j.tree.Primitive;
-import org.apache.calcite.runtime.FlatLists.ComparableList;
-import org.apache.calcite.util.Bug;
-import org.apache.calcite.util.NumberUtil;
-
-/*
- * OVERRIDE POINT:
- * - more power() overloads
- * - refined org.apache.calcite.runtime.SqlFunctions#addMonths(int, int)
- * - corner case subString()
- * - corner case trim_()
- * - upper()
- * - lower()
- * - charLength()
- * - addMonths()
- */
-
-/**
- * Helper methods to implement SQL functions in generated code.
- *
- * <p>Not present: and, or, not (builtin operators are better, because they
- * use lazy evaluation. Implementations do not check for null values; the
- * calling code must do that.</p>
- *
- * <p>Many of the functions do not check for null values. This is intentional.
- * If null arguments are possible, the code-generation framework checks for
- * nulls before calling the functions.</p>
- */
-
-@SuppressWarnings("UnnecessaryUnboxing")
-@Deterministic
-public class SqlFunctions {
-    private static final DecimalFormat DOUBLE_FORMAT = NumberUtil.decimalFormat("0.0E0");
-
-    private static final TimeZone LOCAL_TZ = TimeZone.getDefault();
-
-    private static final Function1<List<Object>, Enumerable<Object>> LIST_AS_ENUMERABLE = new Function1<List<Object>, Enumerable<Object>>() {
-        public Enumerable<Object> apply(List<Object> list) {
-            return Linq4j.asEnumerable(list);
-        }
-    };
-
-    private static final Function1<Object[], Enumerable<Object[]>> ARRAY_CARTESIAN_PRODUCT = new Function1<Object[], Enumerable<Object[]>>() {
-        public Enumerable<Object[]> apply(Object[] lists) {
-            final List<Enumerator<Object>> enumerators = new ArrayList<>();
-            for (Object list : lists) {
-                enumerators.add(Linq4j.enumerator((List) list));
-            }
-            final Enumerator<List<Object>> product = Linq4j.product(enumerators);
-            return new AbstractEnumerable<Object[]>() {
-                public Enumerator<Object[]> enumerator() {
-                    return Linq4j.transform(product, new Function1<List<Object>, Object[]>() {
-                        public Object[] apply(List<Object> list) {
-                            return list.toArray();
-                        }
-                    });
-                }
-            };
-        }
-    };
-
-    /** Holds, for each thread, a map from sequence name to sequence current
-     * value.
-     *
-     * <p>This is a straw man of an implementation whose main goal is to prove
-     * that sequences can be parsed, validated and planned. A real application
-     * will want persistent values for sequences, shared among threads. */
-    private static final ThreadLocal<Map<String, AtomicLong>> THREAD_SEQUENCES = new ThreadLocal<Map<String, AtomicLong>>() {
-        @Override
-        protected Map<String, AtomicLong> initialValue() {
-            return new HashMap<String, AtomicLong>();
-        }
-    };
-
-    private SqlFunctions() {
-    }
-
-    /** SQL SUBSTRING(string FROM ... FOR ...) function. */
-    // override
-    public static String substring(String s, int from, int for_) {
-        if (s == null) {
-            return null;
-        }
-        return s.substring(from - 1, Math.min(from - 1 + for_, s.length()));
-    }
-
-    public static String substring(String s, long from, long for_) {
-        if (from < Integer.MIN_VALUE || from > Integer.MAX_VALUE || for_ < Integer.MIN_VALUE
-                || for_ > Integer.MAX_VALUE) {
-            throw new IllegalArgumentException("Cannot be cast to int due to risk of overflow.");
-        }
-        return substring(s, (int) from, (int) for_);
-    }
-
-    /** SQL SUBSTRING(string FROM ...) function. */
-    public static String substring(String s, int from) {
-        return s.substring(from - 1);
-    }
-
-    public static String substring(String s, long from) {
-        if (from < Integer.MIN_VALUE || from > Integer.MAX_VALUE) {
-            throw new IllegalArgumentException("Cannot be cast to int due to risk of overflow.");
-        }
-        return substring(s, (int) from);
-    }
-
-    /** SQL SUBSTRING(binary FROM ... FOR ...) function. */
-    public static ByteString substring(ByteString b, int from, int for_) {
-        return b.substring(from - 1, Math.min(from - 1 + for_, b.length()));
-    }
-
-    public static ByteString substring(ByteString b, long from, long for_) {
-        if (from < Integer.MIN_VALUE || from > Integer.MAX_VALUE || for_ < Integer.MIN_VALUE
-                || for_ > Integer.MAX_VALUE) {
-            throw new IllegalArgumentException("Cannot be cast to int due to risk of overflow.");
-        }
-        return substring(b, (int) from, (int) for_);
-    }
-
-    /** SQL SUBSTRING(binary FROM ...) function. */
-    public static ByteString substring(ByteString b, int from) {
-        return b.substring(from - 1);
-    }
-
-    public static ByteString substring(ByteString b, long from) {
-        if (from < Integer.MIN_VALUE || from > Integer.MAX_VALUE) {
-            throw new IllegalArgumentException("Cannot be cast to int due to risk of overflow.");
-        }
-        return substring(b, (int) from);
-    }
-
-    /** SQL UPPER(string) function. */
-    //overrivde
-    public static String upper(String s) {
-        if (s == null) {
-            return "";
-        }
-        return s.toUpperCase(Locale.ROOT);
-    }
-
-    /** SQL LOWER(string) function. */
-    //override
-    public static String lower(String s) {
-        if (s == null) {
-            return "";
-        }
-        return s.toLowerCase(Locale.ROOT);
-    }
-
-    /** SQL INITCAP(string) function. */
-    public static String initcap(String s) {
-        // Assumes Alpha as [A-Za-z0-9]
-        // white space is treated as everything else.
-        final int len = s.length();
-        boolean start = true;
-        final StringBuilder newS = new StringBuilder();
-
-        for (int i = 0; i < len; i++) {
-            char curCh = s.charAt(i);
-            final int c = (int) curCh;
-            if (start) { // curCh is whitespace or first character of word.
-                if (c > 47 && c < 58) { // 0-9
-                    start = false;
-                } else if (c > 64 && c < 91) { // A-Z
-                    start = false;
-                } else if (c > 96 && c < 123) { // a-z
-                    start = false;
-                    curCh = (char) (c - 32); // Uppercase this character
-                }
-                // else {} whitespace
-            } else { // Inside of a word or white space after end of word.
-                if (c > 47 && c < 58) { // 0-9
-                    // noop
-                } else if (c > 64 && c < 91) { // A-Z
-                    curCh = (char) (c + 32); // Lowercase this character
-                } else if (c > 96 && c < 123) { // a-z
-                    // noop
-                } else { // whitespace
-                    start = true;
-                }
-            }
-            newS.append(curCh);
-        } // for each character in s
-        return newS.toString();
-    }
-
-    /** SQL CHARACTER_LENGTH(string) function. */
-    public static int charLength(String s) {
-        if (s == null) {
-            return 0;
-        }
-        return s.length();
-    }
-
-    /** SQL {@code string || string} operator. */
-    public static String concat(String s0, String s1) {
-        return s0 + s1;
-    }
-
-    /** SQL {@code binary || binary} operator. */
-    public static ByteString concat(ByteString s0, ByteString s1) {
-        return s0.concat(s1);
-    }
-
-    /** SQL {@code RTRIM} function applied to string. */
-    public static String rtrim(String s) {
-        return trim_(s, false, true, ' ');
-    }
-
-    /** SQL {@code LTRIM} function. */
-    public static String ltrim(String s) {
-        return trim_(s, true, false, ' ');
-    }
-
-    /** SQL {@code TRIM(... seek FROM s)} function. */
-    public static String trim(boolean leading, boolean trailing, String seek, String s) {
-        return trim_(s, leading, trailing, seek.charAt(0));
-    }
-
-    /** SQL {@code TRIM} function. */
-    private static String trim_(String s, boolean left, boolean right, char c) {
-        if (s == null) {
-            return null;
-        }
-        int j = s.length();
-        if (right) {
-            for (;;) {
-                if (j == 0) {
-                    return "";
-                }
-                if (s.charAt(j - 1) != c) {
-                    break;
-                }
-                --j;
-            }
-        }
-        int i = 0;
-        if (left) {
-            for (;;) {
-                if (i == j) {
-                    return "";
-                }
-                if (s.charAt(i) != c) {
-                    break;
-                }
-                ++i;
-            }
-        }
-        return s.substring(i, j);
-    }
-
-    /** SQL {@code TRIM} function applied to binary string. */
-    public static ByteString trim(ByteString s) {
-        return trim_(s, true, true);
-    }
-
-    /** Helper for CAST. */
-    public static ByteString rtrim(ByteString s) {
-        return trim_(s, false, true);
-    }
-
-    /** SQL {@code TRIM} function applied to binary string. */
-    private static ByteString trim_(ByteString s, boolean left, boolean right) {
-        int j = s.length();
-        if (right) {
-            for (;;) {
-                if (j == 0) {
-                    return ByteString.EMPTY;
-                }
-                if (s.byteAt(j - 1) != 0) {
-                    break;
-                }
-                --j;
-            }
-        }
-        int i = 0;
-        if (left) {
-            for (;;) {
-                if (i == j) {
-                    return ByteString.EMPTY;
-                }
-                if (s.byteAt(i) != 0) {
-                    break;
-                }
-                ++i;
-            }
-        }
-        return s.substring(i, j);
-    }
-
-    /** SQL {@code OVERLAY} function. */
-    public static String overlay(String s, String r, int start) {
-        if (s == null || r == null) {
-            return null;
-        }
-        return s.substring(0, start - 1) + r + s.substring(start - 1 + r.length());
-    }
-
-    /** SQL {@code OVERLAY} function. */
-    public static String overlay(String s, String r, int start, int length) {
-        if (s == null || r == null) {
-            return null;
-        }
-        return s.substring(0, start - 1) + r + s.substring(start - 1 + length);
-    }
-
-    /** SQL {@code OVERLAY} function applied to binary strings. */
-    public static ByteString overlay(ByteString s, ByteString r, int start) {
-        if (s == null || r == null) {
-            return null;
-        }
-        return s.substring(0, start - 1).concat(r).concat(s.substring(start - 1 + r.length()));
-    }
-
-    /** SQL {@code OVERLAY} function applied to binary strings. */
-    public static ByteString overlay(ByteString s, ByteString r, int start, int length) {
-        if (s == null || r == null) {
-            return null;
-        }
-        return s.substring(0, start - 1).concat(r).concat(s.substring(start - 1 + length));
-    }
-
-    /** SQL {@code LIKE} function. */
-    public static boolean like(String s, String pattern) {
-        final String regex = Like.sqlToRegexLike(pattern, null);
-        return Pattern.matches(regex, s);
-    }
-
-    /** SQL {@code LIKE} function with escape. */
-    public static boolean like(String s, String pattern, String escape) {
-        final String regex = Like.sqlToRegexLike(pattern, escape);
-        return Pattern.matches(regex, s);
-    }
-
-    /** SQL {@code SIMILAR} function. */
-    public static boolean similar(String s, String pattern) {
-        final String regex = Like.sqlToRegexSimilar(pattern, null);
-        return Pattern.matches(regex, s);
-    }
-
-    /** SQL {@code SIMILAR} function with escape. */
-    public static boolean similar(String s, String pattern, String escape) {
-        final String regex = Like.sqlToRegexSimilar(pattern, escape);
-        return Pattern.matches(regex, s);
-    }
-
-    // =
-
-    /** SQL <code>=</code> operator applied to BigDecimal values (neither may be
-     * null). */
-    public static boolean eq(BigDecimal b0, BigDecimal b1) {
-        return b0.stripTrailingZeros().equals(b1.stripTrailingZeros());
-    }
-
-    /** SQL <code>=</code> operator applied to Object values (including String;
-     * neither side may be null). */
-    public static boolean eq(Object b0, Object b1) {
-        return b0.equals(b1);
-    }
-
-    /** SQL <code>=</code> operator applied to Object values (at least one operand
-     * has ANY type; neither may be null). */
-    public static boolean eqAny(Object b0, Object b1) {
-        if (b0.getClass().equals(b1.getClass())) {
-            // The result of SqlFunctions.eq(BigDecimal, BigDecimal) makes more sense
-            // than BigDecimal.equals(BigDecimal). So if both of types are BigDecimal,
-            // we just use SqlFunctions.eq(BigDecimal, BigDecimal).
-            if (BigDecimal.class.isInstance(b0)) {
-                return eq((BigDecimal) b0, (BigDecimal) b1);
-            } else {
-                return b0.equals(b1);
-            }
-        } else if (allAssignable(Number.class, b0, b1)) {
-            return eq(toBigDecimal((Number) b0), toBigDecimal((Number) b1));
-        }
-        // We shouldn't rely on implementation even though overridden equals can
-        // handle other types which may create worse result: for example,
-        // a.equals(b) != b.equals(a)
-        return false;
-    }
-
-    /** Returns whether two objects can both be assigned to a given class. */
-    private static boolean allAssignable(Class clazz, Object o0, Object o1) {
-        return clazz.isInstance(o0) && clazz.isInstance(o1);
-    }
-
-    // <>
-
-    /** SQL <code>&lt;gt;</code> operator applied to BigDecimal values. */
-    public static boolean ne(BigDecimal b0, BigDecimal b1) {
-        return b0.compareTo(b1) != 0;
-    }
-
-    /** SQL <code>&lt;gt;</code> operator applied to Object values (including
-     * String; neither side may be null). */
-    public static boolean ne(Object b0, Object b1) {
-        return !eq(b0, b1);
-    }
-
-    /** SQL <code>&lt;gt;</code> operator applied to Object values (at least one
-     *  operand has ANY type, including String; neither may be null). */
-    public static boolean neAny(Object b0, Object b1) {
-        return !eqAny(b0, b1);
-    }
-
-    // <
-
-    /** SQL <code>&lt;</code> operator applied to boolean values. */
-    public static boolean lt(boolean b0, boolean b1) {
-        return compare(b0, b1) < 0;
-    }
-
-    /** SQL <code>&lt;</code> operator applied to String values. */
-    public static boolean lt(String b0, String b1) {
-        return b0.compareTo(b1) < 0;
-    }
-
-    /** SQL <code>&lt;</code> operator applied to ByteString values. */
-    public static boolean lt(ByteString b0, ByteString b1) {
-        return b0.compareTo(b1) < 0;
-    }
-
-    /** SQL <code>&lt;</code> operator applied to BigDecimal values. */
-    public static boolean lt(BigDecimal b0, BigDecimal b1) {
-        return b0.compareTo(b1) < 0;
-    }
-
-    /** SQL <code>&lt;</code> operator applied to Object values. */
-    public static boolean ltAny(Object b0, Object b1) {
-        if (b0.getClass().equals(b1.getClass()) && b0 instanceof Comparable) {
-            //noinspection unchecked
-            return ((Comparable) b0).compareTo(b1) < 0;
-        } else if (allAssignable(Number.class, b0, b1)) {
-            return lt(toBigDecimal((Number) b0), toBigDecimal((Number) b1));
-        }
-
-        throw notComparable("<", b0, b1);
-    }
-
-    // <=
-
-    /** SQL <code>&le;</code> operator applied to boolean values. */
-    public static boolean le(boolean b0, boolean b1) {
-        return compare(b0, b1) <= 0;
-    }
-
-    /** SQL <code>&le;</code> operator applied to String values. */
-    public static boolean le(String b0, String b1) {
-        return b0.compareTo(b1) <= 0;
-    }
-
-    /** SQL <code>&le;</code> operator applied to ByteString values. */
-    public static boolean le(ByteString b0, ByteString b1) {
-        return b0.compareTo(b1) <= 0;
-    }
-
-    /** SQL <code>&le;</code> operator applied to BigDecimal values. */
-    public static boolean le(BigDecimal b0, BigDecimal b1) {
-        return b0.compareTo(b1) <= 0;
-    }
-
-    /** SQL <code>&le;</code> operator applied to Object values (at least one
-     * operand has ANY type; neither may be null). */
-    public static boolean leAny(Object b0, Object b1) {
-        if (b0.getClass().equals(b1.getClass()) && b0 instanceof Comparable) {
-            //noinspection unchecked
-            return ((Comparable) b0).compareTo(b1) <= 0;
-        } else if (allAssignable(Number.class, b0, b1)) {
-            return le(toBigDecimal((Number) b0), toBigDecimal((Number) b1));
-        }
-
-        throw notComparable("<=", b0, b1);
-    }
-
-    // >
-
-    /** SQL <code>&gt;</code> operator applied to boolean values. */
-    public static boolean gt(boolean b0, boolean b1) {
-        return compare(b0, b1) > 0;
-    }
-
-    /** SQL <code>&gt;</code> operator applied to String values. */
-    public static boolean gt(String b0, String b1) {
-        return b0.compareTo(b1) > 0;
-    }
-
-    /** SQL <code>&gt;</code> operator applied to ByteString values. */
-    public static boolean gt(ByteString b0, ByteString b1) {
-        return b0.compareTo(b1) > 0;
-    }
-
-    /** SQL <code>&gt;</code> operator applied to BigDecimal values. */
-    public static boolean gt(BigDecimal b0, BigDecimal b1) {
-        return b0.compareTo(b1) > 0;
-    }
-
-    /** SQL <code>&gt;</code> operator applied to Object values (at least one
-     * operand has ANY type; neither may be null). */
-    public static boolean gtAny(Object b0, Object b1) {
-        if (b0.getClass().equals(b1.getClass()) && b0 instanceof Comparable) {
-            //noinspection unchecked
-            return ((Comparable) b0).compareTo(b1) > 0;
-        } else if (allAssignable(Number.class, b0, b1)) {
-            return gt(toBigDecimal((Number) b0), toBigDecimal((Number) b1));
-        }
-
-        throw notComparable(">", b0, b1);
-    }
-
-    // >=
-
-    /** SQL <code>&ge;</code> operator applied to boolean values. */
-    public static boolean ge(boolean b0, boolean b1) {
-        return compare(b0, b1) >= 0;
-    }
-
-    /** SQL <code>&ge;</code> operator applied to String values. */
-    public static boolean ge(String b0, String b1) {
-        return b0.compareTo(b1) >= 0;
-    }
-
-    /** SQL <code>&ge;</code> operator applied to ByteString values. */
-    public static boolean ge(ByteString b0, ByteString b1) {
-        return b0.compareTo(b1) >= 0;
-    }
-
-    /** SQL <code>&ge;</code> operator applied to BigDecimal values. */
-    public static boolean ge(BigDecimal b0, BigDecimal b1) {
-        return b0.compareTo(b1) >= 0;
-    }
-
-    /** SQL <code>&ge;</code> operator applied to Object values (at least one
-     * operand has ANY type; neither may be null). */
-    public static boolean geAny(Object b0, Object b1) {
-        if (b0.getClass().equals(b1.getClass()) && b0 instanceof Comparable) {
-            //noinspection unchecked
-            return ((Comparable) b0).compareTo(b1) >= 0;
-        } else if (allAssignable(Number.class, b0, b1)) {
-            return ge(toBigDecimal((Number) b0), toBigDecimal((Number) b1));
-        }
-
-        throw notComparable(">=", b0, b1);
-    }
-
-    // +
-
-    /** SQL <code>+</code> operator applied to int values. */
-    public static int plus(int b0, int b1) {
-        return b0 + b1;
-    }
-
-    /** SQL <code>+</code> operator applied to int values; left side may be
-     * null. */
-    public static Integer plus(Integer b0, int b1) {
-        return b0 == null ? null : (b0 + b1);
-    }
-
-    /** SQL <code>+</code> operator applied to int values; right side may be
-     * null. */
-    public static Integer plus(int b0, Integer b1) {
-        return b1 == null ? null : (b0 + b1);
-    }
-
-    /** SQL <code>+</code> operator applied to nullable int values. */
-    public static Integer plus(Integer b0, Integer b1) {
-        return (b0 == null || b1 == null) ? null : (b0 + b1);
-    }
-
-    /** SQL <code>+</code> operator applied to nullable long and int values. */
-    public static Long plus(Long b0, Integer b1) {
-        return (b0 == null || b1 == null) ? null : (b0.longValue() + b1.longValue());
-    }
-
-    /** SQL <code>+</code> operator applied to nullable int and long values. */
-    public static Long plus(Integer b0, Long b1) {
-        return (b0 == null || b1 == null) ? null : (b0.longValue() + b1.longValue());
-    }
-
-    /** SQL <code>+</code> operator applied to BigDecimal values. */
-    public static BigDecimal plus(BigDecimal b0, BigDecimal b1) {
-        return (b0 == null || b1 == null) ? null : b0.add(b1);
-    }
-
-    /** SQL <code>+</code> operator applied to String values. Same as string concat operator. */
-    public static String plus(String s0, String s1) {
-        return s0 + s1;
-    }
-
-    /** SQL <code>+</code> operator applied to Object values (at least one operand
-     * has ANY type; either may be null). */
-    public static Object plusAny(Object b0, Object b1) {
-        if (b0 == null || b1 == null) {
-            return null;
-        }
-
-        if (allAssignable(Number.class, b0, b1)) {
-            return plus(toBigDecimal((Number) b0), toBigDecimal((Number) b1));
-        }
-
-        throw notArithmetic("+", b0, b1);
-    }
-
-    // -
-
-    /** SQL <code>-</code> operator applied to int values. */
-    public static int minus(int b0, int b1) {
-        return b0 - b1;
-    }
-
-    /** SQL <code>-</code> operator applied to int values; left side may be
-     * null. */
-    public static Integer minus(Integer b0, int b1) {
-        return b0 == null ? null : (b0 - b1);
-    }
-
-    /** SQL <code>-</code> operator applied to int values; right side may be
-     * null. */
-    public static Integer minus(int b0, Integer b1) {
-        return b1 == null ? null : (b0 - b1);
-    }
-
-    /** SQL <code>-</code> operator applied to nullable int values. */
-    public static Integer minus(Integer b0, Integer b1) {
-        return (b0 == null || b1 == null) ? null : (b0 - b1);
-    }
-
-    /** SQL <code>-</code> operator applied to nullable long and int values. */
-    public static Long minus(Long b0, Integer b1) {
-        return (b0 == null || b1 == null) ? null : (b0.longValue() - b1.longValue());
-    }
-
-    /** SQL <code>-</code> operator applied to nullable int and long values. */
-    public static Long minus(Integer b0, Long b1) {
-        return (b0 == null || b1 == null) ? null : (b0.longValue() - b1.longValue());
-    }
-
-    /** SQL <code>-</code> operator applied to BigDecimal values. */
-    public static BigDecimal minus(BigDecimal b0, BigDecimal b1) {
-        return (b0 == null || b1 == null) ? null : b0.subtract(b1);
-    }
-
-    /** SQL <code>-</code> operator applied to Object values (at least one operand
-     * has ANY type; either may be null). */
-    public static Object minusAny(Object b0, Object b1) {
-        if (b0 == null || b1 == null) {
-            return null;
-        }
-
-        if (allAssignable(Number.class, b0, b1)) {
-            return minus(toBigDecimal((Number) b0), toBigDecimal((Number) b1));
-        }
-
-        throw notArithmetic("-", b0, b1);
-    }
-
-    // /
-
-    /** SQL <code>/</code> operator applied to int values. */
-    public static int divide(int b0, int b1) {
-        return b0 / b1;
-    }
-
-    /** SQL <code>/</code> operator applied to int values; left side may be
-     * null. */
-    public static Integer divide(Integer b0, int b1) {
-        return b0 == null ? null : (b0 / b1);
-    }
-
-    /** SQL <code>/</code> operator applied to int values; right side may be
-     * null. */
-    public static Integer divide(int b0, Integer b1) {
-        return b1 == null ? null : (b0 / b1);
-    }
-
-    /** SQL <code>/</code> operator applied to nullable int values. */
-    public static Integer divide(Integer b0, Integer b1) {
-        return (b0 == null || b1 == null) ? null : (b0 / b1);
-    }
-
-    /** SQL <code>/</code> operator applied to nullable long and int values. */
-    public static Long divide(Long b0, Integer b1) {
-        return (b0 == null || b1 == null) ? null : (b0.longValue() / b1.longValue());
-    }
-
-    /** SQL <code>/</code> operator applied to nullable int and long values. */
-    public static Long divide(Integer b0, Long b1) {
-        return (b0 == null || b1 == null) ? null : (b0.longValue() / b1.longValue());
-    }
-
-    /** SQL <code>/</code> operator applied to BigDecimal values. */
-    public static BigDecimal divide(BigDecimal b0, BigDecimal b1) {
-        return (b0 == null || b1 == null) ? null : b0.divide(b1, MathContext.DECIMAL64);
-    }
-
-    /** SQL <code>/</code> operator applied to Object values (at least one operand
-     * has ANY type; either may be null). */
-    public static Object divideAny(Object b0, Object b1) {
-        if (b0 == null || b1 == null) {
-            return null;
-        }
-
-        if (allAssignable(Number.class, b0, b1)) {
-            return divide(toBigDecimal((Number) b0), toBigDecimal((Number) b1));
-        }
-
-        throw notArithmetic("/", b0, b1);
-    }
-
-    public static int divide(int b0, BigDecimal b1) {
-        return BigDecimal.valueOf(b0).divide(b1, RoundingMode.HALF_DOWN).intValue();
-    }
-
-    public static long divide(long b0, BigDecimal b1) {
-        return BigDecimal.valueOf(b0).divide(b1, RoundingMode.HALF_DOWN).longValue();
-    }
-
-    // *
-
-    /** SQL <code>*</code> operator applied to int values. */
-    public static int multiply(int b0, int b1) {
-        return b0 * b1;
-    }
-
-    /** SQL <code>*</code> operator applied to int values; left side may be
-     * null. */
-    public static Integer multiply(Integer b0, int b1) {
-        return b0 == null ? null : (b0 * b1);
-    }
-
-    /** SQL <code>*</code> operator applied to int values; right side may be
-     * null. */
-    public static Integer multiply(int b0, Integer b1) {
-        return b1 == null ? null : (b0 * b1);
-    }
-
-    /** SQL <code>*</code> operator applied to nullable int values. */
-    public static Integer multiply(Integer b0, Integer b1) {
-        return (b0 == null || b1 == null) ? null : (b0 * b1);
-    }
-
-    /** SQL <code>*</code> operator applied to nullable long and int values. */
-    public static Long multiply(Long b0, Integer b1) {
-        return (b0 == null || b1 == null) ? null : (b0.longValue() * b1.longValue());
-    }
-
-    /** SQL <code>*</code> operator applied to nullable int and long values. */
-    public static Long multiply(Integer b0, Long b1) {
-        return (b0 == null || b1 == null) ? null : (b0.longValue() * b1.longValue());
-    }
-
-    /** SQL <code>*</code> operator applied to BigDecimal values. */
-    public static BigDecimal multiply(BigDecimal b0, BigDecimal b1) {
-        return (b0 == null || b1 == null) ? null : b0.multiply(b1);
-    }
-
-    /** SQL <code>*</code> operator applied to Object values (at least one operand
-     * has ANY type; either may be null). */
-    public static Object multiplyAny(Object b0, Object b1) {
-        if (b0 == null || b1 == null) {
-            return null;
-        }
-
-        if (allAssignable(Number.class, b0, b1)) {
-            return multiply(toBigDecimal((Number) b0), toBigDecimal((Number) b1));
-        }
-
-        throw notArithmetic("*", b0, b1);
-    }
-
-    private static IllegalArgumentException notArithmetic(String op, Object b0, Object b1) {
-        return new IllegalArgumentException(
-                "Invalid types for arithmetic: " + b0.getClass() + " " + op + " " + b1.getClass());
-    }
-
-    private static IllegalArgumentException notComparable(String op, Object b0, Object b1) {
-        return new IllegalArgumentException(
-                "Invalid types for comparison: " + b0.getClass() + " " + op + " " + b1.getClass());
-    }
-
-    // EXP
-
-    /** SQL <code>EXP</code> operator applied to double values. */
-    public static double exp(double b0) {
-        return Math.exp(b0);
-    }
-
-    public static double exp(BigDecimal b0) {
-        return Math.exp(b0.doubleValue());
-    }
-
-    public static double exp(long b0) {
-        return Math.exp(b0);
-    }
-
-    // POWER
-
-    /** SQL <code>POWER</code> operator applied to double values. */
-    public static double power(double b0, double b1) {
-        return Math.pow(b0, b1);
-    }
-
-    public static double power(long b0, long b1) {
-        return Math.pow(b0, b1);
-    }
-
-    public static double power(BigDecimal b0, BigDecimal b1) {
-        return Math.pow(b0.doubleValue(), b1.doubleValue());
-    }
-
-    public static double power(long b0, BigDecimal b1) {
-        return Math.pow(b0, b1.doubleValue());
-    }
-
-    // OVERRIDE POINT starts, more power overloads
-    public static double power(double n1, long n2) {
-        return Math.pow(n1, (double) n2);
-    }
-
-    public static double power(double n1, BigDecimal n2) {
-        return Math.pow(n1, n2.doubleValue());
-    }
-
-    public static double power(long n1, double n2) {
-        return Math.pow((double) n1, n2);
-    }
-
-    public static double power(BigDecimal n1, double n2) {
-        return Math.pow(n1.doubleValue(), n2);
-    }
-
-    public static double power(BigDecimal n1, long n2) {
-        return Math.pow(n1.doubleValue(), (double) n2);
-    }
-
-    public static double power(double n1, int n2) {
-        return Math.pow(n1, (double) n2);
-    }
-
-    public static double power(long n1, int n2) {
-        return Math.pow((double) n1, (double) n2);
-    }
-
-    public static double power(BigDecimal n1, int n2) {
-        return Math.pow(n1.doubleValue(), (double) n2);
-    }
-
-    public static double power(int n1, double n2) {
-        return Math.pow((double) n1, n2);
-    }
-
-    public static double power(int n1, long n2) {
-        return Math.pow((double) n1, (double) n2);
-    }
-
-    public static double power(int n1, BigDecimal n2) {
-        return Math.pow((double) n1, n2.doubleValue());
-    }
-
-    public static double power(int n1, int n2) {
-        return Math.pow(n1, n2);
-    }
-
-    // OVERRIDE POINT ends, more power overloads
-
-    // LN
-
-    /** SQL {@code LN(number)} function applied to double values. */
-    public static double ln(double d) {
-        return Math.log(d);
-    }
-
-    /** SQL {@code LN(number)} function applied to long values. */
-    public static double ln(long b0) {
-        return Math.log(b0);
-    }
-
-    /** SQL {@code LN(number)} function applied to BigDecimal values. */
-    public static double ln(BigDecimal d) {
-        return Math.log(d.doubleValue());
-    }
-
-    // LOG10
-
-    /** SQL <code>LOG10(numeric)</code> operator applied to double values. */
-    public static double log10(double b0) {
-        return Math.log10(b0);
-    }
-
-    /** SQL {@code LOG10(number)} function applied to long values. */
-    public static double log10(long b0) {
-        return Math.log10(b0);
-    }
-
-    /** SQL {@code LOG10(number)} function applied to BigDecimal values. */
-    public static double log10(BigDecimal d) {
-        return Math.log10(d.doubleValue());
-    }
-
-    // MOD
-
-    /** SQL <code>MOD</code> operator applied to byte values. */
-    public static byte mod(byte b0, byte b1) {
-        return (byte) (b0 % b1);
-    }
-
-    /** SQL <code>MOD</code> operator applied to short values. */
-    public static short mod(short b0, short b1) {
-        return (short) (b0 % b1);
-    }
-
-    /** SQL <code>MOD</code> operator applied to int values. */
-    public static int mod(int b0, int b1) {
-        return b0 % b1;
-    }
-
-    /** SQL <code>MOD</code> operator applied to long values. */
-    public static long mod(long b0, long b1) {
-        return b0 % b1;
-    }
-
-    // temporary
-    public static BigDecimal mod(BigDecimal b0, int b1) {
-        return mod(b0, BigDecimal.valueOf(b1));
-    }
-
-    // temporary
-    public static int mod(int b0, BigDecimal b1) {
-        return mod(b0, b1.intValue());
-    }
-
-    public static BigDecimal mod(BigDecimal b0, BigDecimal b1) {
-        final BigDecimal[] bigDecimals = b0.divideAndRemainder(b1);
-        return bigDecimals[1];
-    }
-
-    // FLOOR
-
-    public static double floor(double b0) {
-        return Math.floor(b0);
-    }
-
-    public static float floor(float b0) {
-        return (float) Math.floor(b0);
-    }
-
-    public static BigDecimal floor(BigDecimal b0) {
-        return b0.setScale(0, RoundingMode.FLOOR);
-    }
-
-    /** SQL <code>FLOOR</code> operator applied to byte values. */
-    public static byte floor(byte b0, byte b1) {
-        return (byte) floor((int) b0, (int) b1);
-    }
-
-    /** SQL <code>FLOOR</code> operator applied to short values. */
-    public static short floor(short b0, short b1) {
-        return (short) floor((int) b0, (int) b1);
-    }
-
-    /** SQL <code>FLOOR</code> operator applied to int values. */
-    public static int floor(int b0, int b1) {
-        int r = b0 % b1;
-        if (r < 0) {
-            r += b1;
-        }
-        return b0 - r;
-    }
-
-    /** SQL <code>FLOOR</code> operator applied to long values. */
-    public static long floor(long b0, long b1) {
-        long r = b0 % b1;
-        if (r < 0) {
-            r += b1;
-        }
-        return b0 - r;
-    }
-
-    // temporary
-    public static BigDecimal floor(BigDecimal b0, int b1) {
-        return floor(b0, BigDecimal.valueOf(b1));
-    }
-
-    // temporary
-    public static int floor(int b0, BigDecimal b1) {
-        return floor(b0, b1.intValue());
-    }
-
-    public static BigDecimal floor(BigDecimal b0, BigDecimal b1) {
-        final BigDecimal[] bigDecimals = b0.divideAndRemainder(b1);
-        BigDecimal r = bigDecimals[1];
-        if (r.signum() < 0) {
-            r = r.add(b1);
-        }
-        return b0.subtract(r);
-    }
-
-    // CEIL
-
-    public static double ceil(double b0) {
-        return Math.ceil(b0);
-    }
-
-    public static float ceil(float b0) {
-        return (float) Math.ceil(b0);
-    }
-
-    public static BigDecimal ceil(BigDecimal b0) {
-        return b0.setScale(0, RoundingMode.CEILING);
-    }
-
-    /** SQL <code>CEIL</code> operator applied to byte values. */
-    public static byte ceil(byte b0, byte b1) {
-        return floor((byte) (b0 + b1 - 1), b1);
-    }
-
-    /** SQL <code>CEIL</code> operator applied to short values. */
-    public static short ceil(short b0, short b1) {
-        return floor((short) (b0 + b1 - 1), b1);
-    }
-
-    /** SQL <code>CEIL</code> operator applied to int values. */
-    public static int ceil(int b0, int b1) {
-        int r = b0 % b1;
-        if (r > 0) {
-            r -= b1;
-        }
-        return b0 - r;
-    }
-
-    /** SQL <code>CEIL</code> operator applied to long values. */
-    public static long ceil(long b0, long b1) {
-        return floor(b0 + b1 - 1, b1);
-    }
-
-    // temporary
-    public static BigDecimal ceil(BigDecimal b0, int b1) {
-        return ceil(b0, BigDecimal.valueOf(b1));
-    }
-
-    // temporary
-    public static int ceil(int b0, BigDecimal b1) {
-        return ceil(b0, b1.intValue());
-    }
-
-    public static BigDecimal ceil(BigDecimal b0, BigDecimal b1) {
-        final BigDecimal[] bigDecimals = b0.divideAndRemainder(b1);
-        BigDecimal r = bigDecimals[1];
-        if (r.signum() > 0) {
-            r = r.subtract(b1);
-        }
-        return b0.subtract(r);
-    }
-
-    // ABS
-
-    /** SQL <code>ABS</code> operator applied to byte values. */
-    public static byte abs(byte b0) {
-        return (byte) Math.abs(b0);
-    }
-
-    /** SQL <code>ABS</code> operator applied to short values. */
-    public static short abs(short b0) {
-        return (short) Math.abs(b0);
-    }
-
-    /** SQL <code>ABS</code> operator applied to int values. */
-    public static int abs(int b0) {
-        return Math.abs(b0);
-    }
-
-    /** SQL <code>ABS</code> operator applied to long values. */
-    public static long abs(long b0) {
-        return Math.abs(b0);
-    }
-
-    /** SQL <code>ABS</code> operator applied to float values. */
-    public static float abs(float b0) {
-        return Math.abs(b0);
-    }
-
-    /** SQL <code>ABS</code> operator applied to double values. */
-    public static double abs(double b0) {
-        return Math.abs(b0);
-    }
-
-    /** SQL <code>ABS</code> operator applied to BigDecimal values. */
-    public static BigDecimal abs(BigDecimal b0) {
-        return b0.abs();
-    }
-
-    // ACOS
-    /** SQL <code>ACOS</code> operator applied to long values. */
-    public static double acos(long b0) {
-        return Math.acos(b0);
-    }
-
-    /** SQL <code>ACOS</code> operator applied to BigDecimal values. */
-    public static double acos(BigDecimal b0) {
-        return Math.acos(b0.doubleValue());
-    }
-
-    /** SQL <code>ACOS</code> operator applied to double values. */
-    public static double acos(double b0) {
-        return Math.acos(b0);
-    }
-
-    // ASIN
-    /** SQL <code>ASIN</code> operator applied to long values. */
-    public static double asin(long b0) {
-        return Math.asin(b0);
-    }
-
-    /** SQL <code>ASIN</code> operator applied to BigDecimal values. */
-    public static double asin(BigDecimal b0) {
-        return Math.asin(b0.doubleValue());
-    }
-
-    /** SQL <code>ASIN</code> operator applied to double values. */
-    public static double asin(double b0) {
-        return Math.asin(b0);
-    }
-
-    // ATAN
-    /** SQL <code>ATAN</code> operator applied to long values. */
-    public static double atan(long b0) {
-        return Math.atan(b0);
-    }
-
-    /** SQL <code>ATAN</code> operator applied to BigDecimal values. */
-    public static double atan(BigDecimal b0) {
-        return Math.atan(b0.doubleValue());
-    }
-
-    /** SQL <code>ATAN</code> operator applied to double values. */
-    public static double atan(double b0) {
-        return Math.atan(b0);
-    }
-
-    // ATAN2
-    /** SQL <code>ATAN2</code> operator applied to long values. */
-    public static double atan2(long b0, long b1) {
-        return Math.atan2(b0, b1);
-    }
-
-    /** SQL <code>ATAN2</code> operator applied to long/BigDecimal values. */
-    public static double atan2(long b0, BigDecimal b1) {
-        return Math.atan2(b0, b1.doubleValue());
-    }
-
-    /** SQL <code>ATAN2</code> operator applied to BigDecimal values. */
-    public static double atan2(BigDecimal b0, BigDecimal b1) {
-        return Math.atan2(b0.doubleValue(), b1.doubleValue());
-    }
-
-    /** SQL <code>ATAN2</code> operator applied to double values. */
-    public static double atan2(double b0, double b1) {
-        return Math.atan2(b0, b1);
-    }
-
-    // COS
-    /** SQL <code>COS</code> operator applied to long values. */
-    public static double cos(long b0) {
-        return Math.cos(b0);
-    }
-
-    /** SQL <code>COS</code> operator applied to BigDecimal values. */
-    public static double cos(BigDecimal b0) {
-        return Math.cos(b0.doubleValue());
-    }
-
-    /** SQL <code>COS</code> operator applied to double values. */
-    public static double cos(double b0) {
-        return Math.cos(b0);
-    }
-
-    // COT
-    /** SQL <code>COT</code> operator applied to long values. */
-    public static double cot(long b0) {
-        return 1.0d / Math.tan(b0);
-    }
-
-    /** SQL <code>COT</code> operator applied to BigDecimal values. */
-    public static double cot(BigDecimal b0) {
-        return 1.0d / Math.tan(b0.doubleValue());
-    }
-
-    /** SQL <code>COT</code> operator applied to double values. */
-    public static double cot(double b0) {
-        return 1.0d / Math.tan(b0);
-    }
-
-    // DEGREES
-    /** SQL <code>DEGREES</code> operator applied to long values. */
-    public static double degrees(long b0) {
-        return Math.toDegrees(b0);
-    }
-
-    /** SQL <code>DEGREES</code> operator applied to BigDecimal values. */
-    public static double degrees(BigDecimal b0) {
-        return Math.toDegrees(b0.doubleValue());
-    }
-
-    /** SQL <code>DEGREES</code> operator applied to double values. */
-    public static double degrees(double b0) {
-        return Math.toDegrees(b0);
-    }
-
-    // RADIANS
-    /** SQL <code>RADIANS</code> operator applied to long values. */
-    public static double radians(long b0) {
-        return Math.toRadians(b0);
-    }
-
-    /** SQL <code>RADIANS</code> operator applied to BigDecimal values. */
-    public static double radians(BigDecimal b0) {
-        return Math.toRadians(b0.doubleValue());
-    }
-
-    /** SQL <code>RADIANS</code> operator applied to double values. */
-    public static double radians(double b0) {
-        return Math.toRadians(b0);
-    }
-
-    // SQL ROUND
-    /** SQL <code>ROUND</code> operator applied to long values. */
-    public static int sround(int b0, int b1) {
-        return sround(BigDecimal.valueOf(b0), b1).intValue();
-    }
-
-    /** SQL <code>ROUND</code> operator applied to long values. */
-    public static long sround(long b0, int b1) {
-        return sround(BigDecimal.valueOf(b0), b1).longValue();
-    }
-
-    /** SQL <code>ROUND</code> operator applied to BigDecimal values. */
-    public static BigDecimal sround(BigDecimal b0, int b1) {
-        return b0.movePointRight(b1).setScale(0, RoundingMode.HALF_UP).movePointLeft(b1);
-    }
-
-    /** SQL <code>ROUND</code> operator applied to double values. */
-    public static double sround(double b0, int b1) {
-        return sround(BigDecimal.valueOf(b0), b1).doubleValue();
-    }
-
-    // SQL TRUNCATE
-    /** SQL <code>TRUNCATE</code> operator applied to int values. */
-    public static int struncate(int b0, int b1) {
-        return struncate(BigDecimal.valueOf(b0), b1).intValue();
-    }
-
-    /** SQL <code>TRUNCATE</code> operator applied to long values. */
-    public static long struncate(long b0, int b1) {
-        return struncate(BigDecimal.valueOf(b0), b1).longValue();
-    }
-
-    /** SQL <code>TRUNCATE</code> operator applied to BigDecimal values. */
-    public static BigDecimal struncate(BigDecimal b0, int b1) {
-        return b0.movePointRight(b1).setScale(0, RoundingMode.DOWN).movePointLeft(b1);
-    }
-
-    /** SQL <code>TRUNCATE</code> operator applied to double values. */
-    public static double struncate(double b0, int b1) {
-        return struncate(BigDecimal.valueOf(b0), b1).doubleValue();
-    }
-
-    // SIGN
-    /** SQL <code>SIGN</code> operator applied to int values. */
-    public static int sign(int b0) {
-        return Integer.signum(b0);
-    }
-
-    /** SQL <code>SIGN</code> operator applied to long values. */
-    public static long sign(long b0) {
-        return Long.signum(b0);
-    }
-
-    /** SQL <code>SIGN</code> operator applied to BigDecimal values. */
-    public static BigDecimal sign(BigDecimal b0) {
-        return BigDecimal.valueOf(b0.signum());
-    }
-
-    /** SQL <code>SIGN</code> operator applied to double values. */
-    public static double sign(double b0) {
-        return Math.signum(b0);
-    }
-
-    // SIN
-    /** SQL <code>SIN</code> operator applied to long values. */
-    public static double sin(long b0) {
-        return Math.sin(b0);
-    }
-
-    /** SQL <code>SIN</code> operator applied to BigDecimal values. */
-    public static double sin(BigDecimal b0) {
-        return Math.sin(b0.doubleValue());
-    }
-
-    /** SQL <code>SIN</code> operator applied to double values. */
-    public static double sin(double b0) {
-        return Math.sin(b0);
-    }
-
-    // TAN
-    /** SQL <code>TAN</code> operator applied to long values. */
-    public static double tan(long b0) {
-        return Math.tan(b0);
-    }
-
-    /** SQL <code>TAN</code> operator applied to BigDecimal values. */
-    public static double tan(BigDecimal b0) {
-        return Math.tan(b0.doubleValue());
-    }
-
-    /** SQL <code>TAN</code> operator applied to double values. */
-    public static double tan(double b0) {
-        return Math.tan(b0);
-    }
-
-    // Helpers
-
-    /** Helper for implementing MIN. Somewhat similar to LEAST operator. */
-    public static <T extends Comparable<T>> T lesser(T b0, T b1) {
-        return b0 == null || b0.compareTo(b1) > 0 ? b1 : b0;
-    }
-
-    /** LEAST operator. */
-    public static <T extends Comparable<T>> T least(T b0, T b1) {
-        return b0 == null || b1 != null && b0.compareTo(b1) > 0 ? b1 : b0;
-    }
-
-    public static boolean greater(boolean b0, boolean b1) {
-        return b0 || b1;
-    }
-
-    public static boolean lesser(boolean b0, boolean b1) {
-        return b0 && b1;
-    }
-
-    public static byte greater(byte b0, byte b1) {
-        return b0 > b1 ? b0 : b1;
-    }
-
-    public static byte lesser(byte b0, byte b1) {
-        return b0 > b1 ? b1 : b0;
-    }
-
-    public static char greater(char b0, char b1) {
-        return b0 > b1 ? b0 : b1;
-    }
-
-    public static char lesser(char b0, char b1) {
-        return b0 > b1 ? b1 : b0;
-    }
-
-    public static short greater(short b0, short b1) {
-        return b0 > b1 ? b0 : b1;
-    }
-
-    public static short lesser(short b0, short b1) {
-        return b0 > b1 ? b1 : b0;
-    }
-
-    public static int greater(int b0, int b1) {
-        return b0 > b1 ? b0 : b1;
-    }
-
-    public static int lesser(int b0, int b1) {
-        return b0 > b1 ? b1 : b0;
-    }
-
-    public static long greater(long b0, long b1) {
-        return b0 > b1 ? b0 : b1;
-    }
-
-    public static long lesser(long b0, long b1) {
-        return b0 > b1 ? b1 : b0;
-    }
-
-    public static float greater(float b0, float b1) {
-        return b0 > b1 ? b0 : b1;
-    }
-
-    public static float lesser(float b0, float b1) {
-        return b0 > b1 ? b1 : b0;
-    }
-
-    public static double greater(double b0, double b1) {
-        return b0 > b1 ? b0 : b1;
-    }
-
-    public static double lesser(double b0, double b1) {
-        return b0 > b1 ? b1 : b0;
-    }
-
-    /** Helper for implementing MAX. Somewhat similar to GREATEST operator. */
-    public static <T extends Comparable<T>> T greater(T b0, T b1) {
-        return b0 == null || b0.compareTo(b1) < 0 ? b1 : b0;
-    }
-
-    /** GREATEST operator. */
-    public static <T extends Comparable<T>> T greatest(T b0, T b1) {
-        return b0 == null || b1 != null && b0.compareTo(b1) < 0 ? b1 : b0;
-    }
-
-    /** Boolean comparison. */
-    public static int compare(boolean x, boolean y) {
-        return x == y ? 0 : x ? 1 : -1;
-    }
-
-    /** CAST(FLOAT AS VARCHAR). */
-    public static String toString(float x) {
-        if (x == 0) {
-            return "0E0";
-        }
-        BigDecimal bigDecimal = new BigDecimal(x, MathContext.DECIMAL32).stripTrailingZeros();
-        final String s = bigDecimal.toString();
-        return s.replaceAll("0*E", "E").replace("E+", "E");
-    }
-
-    /** CAST(DOUBLE AS VARCHAR). */
-    public static String toString(double x) {
-        if (x == 0) {
-            return "0E0";
-        }
-        BigDecimal bigDecimal = new BigDecimal(x, MathContext.DECIMAL64).stripTrailingZeros();
-        final String s = bigDecimal.toString();
-        return s.replaceAll("0*E", "E").replace("E+", "E");
-    }
-
-    /** CAST(DECIMAL AS VARCHAR). */
-    public static String toString(BigDecimal x) {
-        final String s = x.toString();
-        if (s.startsWith("0")) {
-            // we want ".1" not "0.1"
-            return s.substring(1);
-        } else if (s.startsWith("-0")) {
-            // we want "-.1" not "-0.1"
-            return "-" + s.substring(2);
-        } else {
-            return s;
-        }
-    }
-
-    /** CAST(BOOLEAN AS VARCHAR). */
-    public static String toString(boolean x) {
-        // Boolean.toString returns lower case -- no good.
-        return x ? "TRUE" : "FALSE";
-    }
-
-    @NonDeterministic
-    private static Object cannotConvert(Object o, Class toType) {
-        throw new RuntimeException("Cannot convert " + o + " to " + toType);
-    }
-
-    /** CAST(VARCHAR AS BOOLEAN). */
-    public static boolean toBoolean(String s) {
-        s = trim_(s, true, true, ' ');
-        if (s.equalsIgnoreCase("TRUE")) {
-            return true;
-        } else if (s.equalsIgnoreCase("FALSE")) {
-            return false;
-        } else {
-            throw new RuntimeException("Invalid character for cast");
-        }
-    }
-
-    public static boolean toBoolean(Number number) {
-        return !number.equals(0);
-    }
-
-    public static boolean toBoolean(Object o) {
-        return o instanceof Boolean ? (Boolean) o
-                : o instanceof Number ? toBoolean((Number) o)
-                        : o instanceof String ? toBoolean((String) o) : (Boolean) cannotConvert(o, boolean.class);
-    }
-
-    // Don't need parseByte etc. - Byte.parseByte is sufficient.
-
-    public static byte toByte(Object o) {
-        return o instanceof Byte ? (Byte) o : o instanceof Number ? toByte((Number) o) : Byte.parseByte(o.toString());
-    }
-
-    public static byte toByte(Number number) {
-        return number.byteValue();
-    }
-
-    public static char toChar(String s) {
-        return s.charAt(0);
-    }
-
-    public static Character toCharBoxed(String s) {
-        return s.charAt(0);
-    }
-
-    public static short toShort(String s) {
-        return Short.parseShort(s.trim());
-    }
-
-    public static short toShort(Number number) {
-        return number.shortValue();
-    }
-
-    public static short toShort(Object o) {
-        return o instanceof Short ? (Short) o
-                : o instanceof Number ? toShort((Number) o)
-                        : o instanceof String ? toShort((String) o) : (Short) cannotConvert(o, short.class);
-    }
-
-    /** Converts the Java type used for UDF parameters of SQL DATE type
-     * ({@link java.sql.Date}) to internal representation (int).
-     *
-     * <p>Converse of {@link #internalToDate(int)}. */
-    public static int toInt(java.util.Date v) {
-        return toInt(v, LOCAL_TZ);
-    }
-
-    public static int toInt(java.util.Date v, TimeZone timeZone) {
-        return (int) (toLong(v, timeZone) / DateTimeUtils.MILLIS_PER_DAY);
-    }
-
-    public static Integer toIntOptional(java.util.Date v) {
-        return v == null ? null : toInt(v);
-    }
-
-    public static Integer toIntOptional(java.util.Date v, TimeZone timeZone) {
-        return v == null ? null : toInt(v, timeZone);
-    }
-
-    public static long toLong(Date v) {
-        return toLong(v, LOCAL_TZ);
-    }
-
-    /** Converts the Java type used for UDF parameters of SQL TIME type
-     * ({@link java.sql.Time}) to internal representation (int).
-     *
-     * <p>Converse of {@link #internalToTime(int)}. */
-    public static int toInt(java.sql.Time v) {
-        return (int) (toLong(v) % DateTimeUtils.MILLIS_PER_DAY);
-    }
-
-    public static Integer toIntOptional(java.sql.Time v) {
-        return v == null ? null : toInt(v);
-    }
-
-    public static int toInt(String s) {
-        return Integer.parseInt(s.trim());
-    }
-
-    public static int toInt(Number number) {
-        return number.intValue();
-    }
-
-    public static int toInt(Object o) {
-        return o instanceof Integer ? (Integer) o
-                : o instanceof Number ? toInt((Number) o)
-                        : o instanceof String ? toInt((String) o)
-                                : o instanceof java.util.Date ? toInt((java.util.Date) o)
-                                        : (Integer) cannotConvert(o, int.class);
-    }
-
-    /** Converts the Java type used for UDF parameters of SQL TIMESTAMP type
-     * ({@link java.sql.Timestamp}) to internal representation (long).
-     *
-     * <p>Converse of {@link #internalToTimestamp(long)}. */
-    public static long toLong(Timestamp v) {
-        return toLong(v, LOCAL_TZ);
-    }
-
-    // mainly intended for java.sql.Timestamp but works for other dates also
-    public static long toLong(java.util.Date v, TimeZone timeZone) {
-        final long time = v.getTime();
-        return time + timeZone.getOffset(time);
-    }
-
-    // mainly intended for java.sql.Timestamp but works for other dates also
-    public static Long toLongOptional(java.util.Date v) {
-        return v == null ? null : toLong(v, LOCAL_TZ);
-    }
-
-    public static Long toLongOptional(Timestamp v, TimeZone timeZone) {
-        if (v == null) {
-            return null;
-        }
-        return toLong(v, LOCAL_TZ);
-    }
-
-    public static long toLong(String s) {
-        if (s.startsWith("199") && s.contains(":")) {
-            return Timestamp.valueOf(s).getTime();
-        }
-        return Long.parseLong(s.trim());
-    }
-
-    public static long toLong(Number number) {
-        return number.longValue();
-    }
-
-    public static long toLong(Object o) {
-        return o instanceof Long ? (Long) o
-                : o instanceof Number ? toLong((Number) o)
-                        : o instanceof String ? toLong((String) o) : (Long) cannotConvert(o, long.class);
-    }
-
-    public static float toFloat(String s) {
-        return Float.parseFloat(s.trim());
-    }
-
-    public static float toFloat(Number number) {
-        return number.floatValue();
-    }
-
-    public static float toFloat(Object o) {
-        return o instanceof Float ? (Float) o
-                : o instanceof Number ? toFloat((Number) o)
-                        : o instanceof String ? toFloat((String) o) : (Float) cannotConvert(o, float.class);
-    }
-
-    public static double toDouble(String s) {
-        return Double.parseDouble(s.trim());
-    }
-
-    public static double toDouble(Number number) {
-        return number.doubleValue();
-    }
-
-    public static double toDouble(Object o) {
-        return o instanceof Double ? (Double) o
-                : o instanceof Number ? toDouble((Number) o)
-                        : o instanceof String ? toDouble((String) o) : (Double) cannotConvert(o, double.class);
-    }
-
-    public static BigDecimal toBigDecimal(String s) {
-        return new BigDecimal(s.trim());
-    }
-
-    public static BigDecimal toBigDecimal(Number number) {
-        // There are some values of "long" that cannot be represented as "double".
-        // Not so "int". If it isn't a long, go straight to double.
-        return number instanceof BigDecimal ? (BigDecimal) number
-                : number instanceof BigInteger ? new BigDecimal((BigInteger) number)
-                        : number instanceof Long ? new BigDecimal(number.longValue())
-                                : new BigDecimal(number.doubleValue());
-    }
-
-    public static BigDecimal toBigDecimal(Object o) {
-        return o instanceof Number ? toBigDecimal((Number) o) : toBigDecimal(o.toString());
-    }
-
-    /** Converts the internal representation of a SQL DATE (int) to the Java
-     * type used for UDF parameters ({@link java.sql.Date}). */
-    public static java.sql.Date internalToDate(int v) {
-        final long t = v * DateTimeUtils.MILLIS_PER_DAY;
-        return new java.sql.Date(t - LOCAL_TZ.getOffset(t));
-    }
-
-    /** As {@link #internalToDate(int)} but allows nulls. */
-    public static java.sql.Date internalToDate(Integer v) {
-        return v == null ? null : internalToDate(v.intValue());
-    }
-
-    /** Converts the internal representation of a SQL TIME (int) to the Java
-     * type used for UDF parameters ({@link java.sql.Time}). */
-    public static java.sql.Time internalToTime(int v) {
-        return new java.sql.Time(v - LOCAL_TZ.getOffset(v));
-    }
-
-    public static java.sql.Time internalToTime(Integer v) {
-        return v == null ? null : internalToTime(v.intValue());
-    }
-
-    /** Converts the internal representation of a SQL TIMESTAMP (long) to the Java
-     * type used for UDF parameters ({@link java.sql.Timestamp}). */
-    public static java.sql.Timestamp internalToTimestamp(long v) {
-        return new java.sql.Timestamp(v - LOCAL_TZ.getOffset(v));
-    }
-
-    public static java.sql.Timestamp internalToTimestamp(Long v) {
-        return v == null ? null : internalToTimestamp(v.longValue());
-    }
-
-    // Don't need shortValueOf etc. - Short.valueOf is sufficient.
-
-    /** Helper for CAST(... AS VARCHAR(maxLength)). */
-    public static String truncate(String s, int maxLength) {
-        if (s == null) {
-            return null;
-        } else if (s.length() > maxLength) {
-            return s.substring(0, maxLength);
-        } else {
-            return s;
-        }
-    }
-
-    /** Helper for CAST(... AS CHAR(maxLength)). */
-    public static String truncateOrPad(String s, int maxLength) {
-        if (s == null) {
-            return null;
-        } else {
-            final int length = s.length();
-            if (length > maxLength) {
-                return s.substring(0, maxLength);
-            } else {
-                return length < maxLength ? Spaces.padRight(s, maxLength) : s;
-            }
-        }
-    }
-
-    /** Helper for CAST(... AS VARBINARY(maxLength)). */
-    public static ByteString truncate(ByteString s, int maxLength) {
-        if (s == null) {
-            return null;
-        } else if (s.length() > maxLength) {
-            return s.substring(0, maxLength);
-        } else {
-            return s;
-        }
-    }
-
-    /** Helper for CAST(... AS BINARY(maxLength)). */
-    public static ByteString truncateOrPad(ByteString s, int maxLength) {
-        if (s == null) {
-            return null;
-        } else {
-            final int length = s.length();
-            if (length > maxLength) {
-                return s.substring(0, maxLength);
-            } else if (length < maxLength) {
-                return s.concat(new ByteString(new byte[maxLength - length]));
-            } else {
-                return s;
-            }
-        }
-    }
-
-    /** SQL {@code POSITION(seek IN string)} function. */
-    public static int position(String seek, String s) {
-        return s.indexOf(seek) + 1;
-    }
-
-    /** SQL {@code POSITION(seek IN string)} function for byte strings. */
-    public static int position(ByteString seek, ByteString s) {
-        return s.indexOf(seek) + 1;
-    }
-
-    /** SQL {@code POSITION(seek IN string FROM integer)} function. */
-    public static int position(String seek, String s, int from) {
-        final int from0 = from - 1; // 0-based
-        if (from0 > s.length() || from0 < 0) {
-            return 0;
-        }
-
-        return s.indexOf(seek, from0) + 1;
-    }
-
-    /** SQL {@code POSITION(seek IN string FROM integer)} function for byte
-     * strings. */
-    public static int position(ByteString seek, ByteString s, int from) {
-        final int from0 = from - 1;
-        if (from0 > s.length() || from0 < 0) {
-            return 0;
-        }
-
-        // ByteString doesn't have indexOf(ByteString, int) until avatica-1.9
-        // (see [CALCITE-1423]), so apply substring and find from there.
-        Bug.upgrade("in avatica-1.9, use ByteString.substring(ByteString, int)");
-        final int p = s.substring(from0).indexOf(seek);
-        if (p < 0) {
-            return 0;
-        }
-        return p + from;
-    }
-
-    /** Helper for rounding. Truncate(12345, 1000) returns 12000. */
-    public static long round(long v, long x) {
-        return truncate(v + x / 2, x);
-    }
-
-    /** Helper for rounding. Truncate(12345, 1000) returns 12000. */
-    public static long truncate(long v, long x) {
-        long remainder = v % x;
-        if (remainder < 0) {
-            remainder += x;
-        }
-        return v - remainder;
-    }
-
-    /** Helper for rounding. Truncate(12345, 1000) returns 12000. */
-    public static int round(int v, int x) {
-        return truncate(v + x / 2, x);
-    }
-
-    /** Helper for rounding. Truncate(12345, 1000) returns 12000. */
-    public static int truncate(int v, int x) {
-        int remainder = v % x;
-        if (remainder < 0) {
-            remainder += x;
-        }
-        return v - remainder;
-    }
-
-    /** SQL {@code CURRENT_TIMESTAMP} function. */
-    @NonDeterministic
-    public static long currentTimestamp(DataContext root) {
-        // Cast required for JDK 1.6.
-        return (Long) DataContext.Variable.CURRENT_TIMESTAMP.get(root);
-    }
-
-    /** SQL {@code CURRENT_TIME} function. */
-    @NonDeterministic
-    public static int currentTime(DataContext root) {
-        int time = (int) (currentTimestamp(root) % DateTimeUtils.MILLIS_PER_DAY);
-        if (time < 0) {
-            time += DateTimeUtils.MILLIS_PER_DAY;
-        }
-        return time;
-    }
-
-    /** SQL {@code CURRENT_DATE} function. */
-    @NonDeterministic
-    public static int currentDate(DataContext root) {
-        final long timestamp = currentTimestamp(root);
-        int date = (int) (timestamp / DateTimeUtils.MILLIS_PER_DAY);
-        final int time = (int) (timestamp % DateTimeUtils.MILLIS_PER_DAY);
-        if (time < 0) {
-            --date;
-        }
-        return date;
-    }
-
-    /** SQL {@code LOCAL_TIMESTAMP} function. */
-    @NonDeterministic
-    public static long localTimestamp(DataContext root) {
-        // Cast required for JDK 1.6.
-        return (Long) DataContext.Variable.LOCAL_TIMESTAMP.get(root);
-    }
-
-    /** SQL {@code LOCAL_TIME} function. */
-    @NonDeterministic
-    public static int localTime(DataContext root) {
-        return (int) (localTimestamp(root) % DateTimeUtils.MILLIS_PER_DAY);
-    }
-
-    /** SQL {@code TRANSLATE(string, search_chars, replacement_chars)}
-     * function. */
-    public static String translate3(String s, String search, String replacement) {
-        return org.apache.commons.lang3.StringUtils.replaceChars(s, search, replacement);
-    }
-
-    /** SQL {@code REPLACE(string, search, replacement)} function. */
-    public static String replace(String s, String search, String replacement) {
-        return s.replace(search, replacement);
-    }
-
-    /** Helper for "array element reference". Caller has already ensured that
-     * array and index are not null. Index is 1-based, per SQL. */
-    public static Object arrayItem(List list, int item) {
-        if (item < 1 || item > list.size()) {
-            return null;
-        }
-        return list.get(item - 1);
-    }
-
-    /** Helper for "map element reference". Caller has already ensured that
-     * array and index are not null. Index is 1-based, per SQL. */
-    public static Object mapItem(Map map, Object item) {
-        return map.get(item);
-    }
-
-    /** Implements the {@code [ ... ]} operator on an object whose type is not
-     * known until runtime.
-     */
-    public static Object item(Object object, Object index) {
-        if (object instanceof Map) {
-            return mapItem((Map) object, index);
-        }
-        if (object instanceof List && index instanceof Number) {
-            return arrayItem((List) object, ((Number) index).intValue());
-        }
-        return null;
-    }
-
-    /** As {@link #arrayItem} method, but allows array to be nullable. */
-    public static Object arrayItemOptional(List list, int item) {
-        if (list == null) {
-            return null;
-        }
-        return arrayItem(list, item);
-    }
-
-    /** As {@link #mapItem} method, but allows map to be nullable. */
-    public static Object mapItemOptional(Map map, Object item) {
-        if (map == null) {
-            return null;
-        }
-        return mapItem(map, item);
-    }
-
-    /** As {@link #item} method, but allows object to be nullable. */
-    public static Object itemOptional(Object object, Object index) {
-        if (object == null) {
-            return null;
-        }
-        return item(object, index);
-    }
-
-    /** NULL &rarr; FALSE, FALSE &rarr; FALSE, TRUE &rarr; TRUE. */
-    public static boolean isTrue(Boolean b) {
-        return b != null && b;
-    }
-
-    /** NULL &rarr; FALSE, FALSE &rarr; TRUE, TRUE &rarr; FALSE. */
-    public static boolean isFalse(Boolean b) {
-        return b != null && !b;
-    }
-
-    /** NULL &rarr; TRUE, FALSE &rarr; TRUE, TRUE &rarr; FALSE. */
-    public static boolean isNotTrue(Boolean b) {
-        return b == null || !b;
-    }
-
-    /** NULL &rarr; TRUE, FALSE &rarr; FALSE, TRUE &rarr; TRUE. */
-    public static boolean isNotFalse(Boolean b) {
-        return b == null || b;
-    }
-
-    /** NULL &rarr; NULL, FALSE &rarr; TRUE, TRUE &rarr; FALSE. */
-    public static Boolean not(Boolean b) {
-        return (b == null) ? null : !b;
-    }
-
-    /** Converts a JDBC array to a list. */
-    public static List arrayToList(final java.sql.Array a) {
-        if (a == null) {
-            return null;
-        }
-        try {
-            return Primitive.asList(a.getArray());
-        } catch (SQLException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    /** Support the {@code CURRENT VALUE OF sequence} operator. */
-    @NonDeterministic
-    public static long sequenceCurrentValue(String key) {
-        return getAtomicLong(key).get();
-    }
-
-    /** Support the {@code NEXT VALUE OF sequence} operator. */
-    @NonDeterministic
-    public static long sequenceNextValue(String key) {
-        return getAtomicLong(key).incrementAndGet();
-    }
-
-    private static AtomicLong getAtomicLong(String key) {
-        final Map<String, AtomicLong> map = THREAD_SEQUENCES.get();
-        AtomicLong atomic = map.get(key);
-        if (atomic == null) {
-            atomic = new AtomicLong();
-            map.put(key, atomic);
-        }
-        return atomic;
-    }
-
-    /** Support the SLICE function. */
-    public static List slice(List list) {
-        return list;
-    }
-
-    /** Support the ELEMENT function. */
-    public static Object element(List list) {
-        switch (list.size()) {
-        case 0:
-            return null;
-        case 1:
-            return list.get(0);
-        default:
-            throw new RuntimeException("more than one value");
-        }
-    }
-
-    public static Function1<Object, Enumerable<ComparableList<Comparable>>> flatProduct(final int[] fieldCounts,
-            final boolean withOrdinality, final FlatProductInputType[] inputTypes) {
-        if (fieldCounts.length == 1) {
-            if (!withOrdinality && inputTypes[0] == FlatProductInputType.SCALAR) {
-                //noinspection unchecked
-                return (Function1) LIST_AS_ENUMERABLE;
-            } else {
-                return new Function1<Object, Enumerable<ComparableList<Comparable>>>() {
-                    public Enumerable<ComparableList<Comparable>> apply(Object row) {
-                        return p2(new Object[] { row }, fieldCounts, withOrdinality, inputTypes);
-                    }
-                };
-            }
-        }
-        return new Function1<Object, Enumerable<FlatLists.ComparableList<Comparable>>>() {
-            public Enumerable<FlatLists.ComparableList<Comparable>> apply(Object lists) {
-                return p2((Object[]) lists, fieldCounts, withOrdinality, inputTypes);
-            }
-        };
-    }
-
-    private static Enumerable<FlatLists.ComparableList<Comparable>> p2(Object[] lists, int[] fieldCounts,
-            boolean withOrdinality, FlatProductInputType[] inputTypes) {
-        final List<Enumerator<List<Comparable>>> enumerators = new ArrayList<>();
-        int totalFieldCount = 0;
-        for (int i = 0; i < lists.length; i++) {
-            int fieldCount = fieldCounts[i];
-            FlatProductInputType inputType = inputTypes[i];
-            Object inputObject = lists[i];
-            switch (inputType) {
-            case SCALAR:
-                @SuppressWarnings("unchecked")
-                List<Comparable> list = (List<Comparable>) inputObject;
-                enumerators
-                        .add(Linq4j.transform(Linq4j.enumerator(list), new Function1<Comparable, List<Comparable>>() {
-                            public List<Comparable> apply(Comparable a0) {
-                                return FlatLists.of(a0);
-                            }
-                        }));
-                break;
-            case LIST:
-                @SuppressWarnings("unchecked")
-                List<List<Comparable>> listList = (List<List<Comparable>>) inputObject;
-                enumerators.add(Linq4j.enumerator(listList));
-                break;
-            case MAP:
-                @SuppressWarnings("unchecked")
-                Map<Comparable, Comparable> map = (Map<Comparable, Comparable>) inputObject;
-                Enumerator<Entry<Comparable, Comparable>> enumerator = Linq4j.enumerator(map.entrySet());
-
-                Enumerator<List<Comparable>> transformed = Linq4j.transform(enumerator,
-                        new Function1<Entry<Comparable, Comparable>, List<Comparable>>() {
-                            public List<Comparable> apply(Entry<Comparable, Comparable> entry) {
-                                return FlatLists.<Comparable> of(entry.getKey(), entry.getValue());
-                            }
-                        });
-                enumerators.add(transformed);
-                break;
-            default:
-                break;
-            }
-            if (fieldCount < 0) {
-                ++totalFieldCount;
-            } else {
-                totalFieldCount += fieldCount;
-            }
-        }
-        if (withOrdinality) {
-            ++totalFieldCount;
-        }
-        return product(enumerators, totalFieldCount, withOrdinality);
-    }
-
-    public static Object[] array(Object... args) {
-        return args;
-    }
-
-    /** Similar to {@link Linq4j#product(Iterable)} but each resulting list
-     * implements {@link FlatLists.ComparableList}. */
-    public static <E extends Comparable> Enumerable<FlatLists.ComparableList<E>> product(
-            final List<Enumerator<List<E>>> enumerators, final int fieldCount, final boolean withOrdinality) {
-        return new AbstractEnumerable<FlatLists.ComparableList<E>>() {
-            public Enumerator<FlatLists.ComparableList<E>> enumerator() {
-                return new ProductComparableListEnumerator<>(enumerators, fieldCount, withOrdinality);
-            }
-        };
-    }
-
-    /** Adds a given number of months to a timestamp, represented as the number
-     * of milliseconds since the epoch. */
-    public static long addMonths(long timestamp, int m) {
-        final long millis = DateTimeUtils.floorMod(timestamp, DateTimeUtils.MILLIS_PER_DAY);
-        timestamp -= millis;
-        final long x = addMonths((int) (timestamp / DateTimeUtils.MILLIS_PER_DAY), m);
-        return x * DateTimeUtils.MILLIS_PER_DAY + millis;
-    }
-
-    /** Adds a given number of months to a date, represented as the number of
-     * days since the epoch. */
-    //override
-    public static int addMonths(int date, int m) {
-        int y0 = (int) DateTimeUtils.unixDateExtract(TimeUnitRange.YEAR, date);
-        int m0 = (int) DateTimeUtils.unixDateExtract(TimeUnitRange.MONTH, date);
-        int d0 = (int) DateTimeUtils.unixDateExtract(TimeUnitRange.DAY, date);
-        int y = (m + m0) / 12;
-        y0 += y;
-        m0 = m + m0 - y * 12;
-        if (m0 <= 0) {
-            m0 += 12;
-            assert m0 > 0;
-            y0--;
-        }
-        int last = lastDay(y0, m0);
-        if (d0 > last) {
-            d0 = last;
-        }
-        return DateTimeUtils.ymdToUnixDate(y0, m0, d0);
-    }
-
-    private static int lastDay(int y, int m) {
-        switch (m) {
-        case 2:
-            return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0) ? 29 : 28;
-        case 4:
-        case 6:
-        case 9:
-        case 11:
-            return 30;
-        default:
-            return 31;
-        }
-    }
-
-    /** Finds the number of months between two dates, each represented as the
-     * number of days since the epoch. */
-    public static int subtractMonths(int date0, int date1) {
-        if (date0 < date1) {
-            return -subtractMonths(date1, date0);
-        }
-        // Start with an estimate.
-        // Since no month has more than 31 days, the estimate is <= the true value.
-        int m = (date0 - date1) / 31;
-        for (;;) {
-            int date2 = addMonths(date1, m);
-            if (date2 >= date0) {
-                return m;
-            }
-            int date3 = addMonths(date1, m + 1);
-            if (date3 > date0) {
-                return m;
-            }
-            ++m;
-        }
-    }
-
-    public static int subtractMonths(long t0, long t1) {
-        final long millis0 = DateTimeUtils.floorMod(t0, DateTimeUtils.MILLIS_PER_DAY);
-        final int d0 = (int) DateTimeUtils.floorDiv(t0 - millis0, DateTimeUtils.MILLIS_PER_DAY);
-        final long millis1 = DateTimeUtils.floorMod(t1, DateTimeUtils.MILLIS_PER_DAY);
-        final int d1 = (int) DateTimeUtils.floorDiv(t1 - millis1, DateTimeUtils.MILLIS_PER_DAY);
-        int x = subtractMonths(d0, d1);
-        final long d2 = addMonths(d1, x);
-        if (d2 == d0 && millis0 < millis1) {
-            --x;
-        }
-        return x;
-    }
-
-    /** Enumerates over the cartesian product of the given lists, returning
-     * a comparable list for each row. */
-    private static class ProductComparableListEnumerator<E extends Comparable>
-            extends CartesianProductEnumerator<List<E>, FlatLists.ComparableList<E>> {
-        final E[] flatElements;
-        final List<E> list;
-        private final boolean withOrdinality;
-        private int ordinality;
-
-        ProductComparableListEnumerator(List<Enumerator<List<E>>> enumerators, int fieldCount, boolean withOrdinality) {
-            super(enumerators);
-            this.withOrdinality = withOrdinality;
-            flatElements = (E[]) new Comparable[fieldCount];
-            list = Arrays.asList(flatElements);
-        }
-
-        public FlatLists.ComparableList<E> current() {
-            int i = 0;
-            for (Object element : (Object[]) elements) {
-                final List list2 = (List) element;
-                Object[] a = list2.toArray();
-                System.arraycopy(a, 0, flatElements, i, a.length);
-                i += a.length;
-            }
-            if (withOrdinality) {
-                flatElements[i] = (E) Integer.valueOf(++ordinality); // 1-based
-            }
-            return FlatLists.ofComparable(list);
-        }
-    }
-
-    /** Type of argument passed into {@link #flatProduct}. */
-    public enum FlatProductInputType {
-        SCALAR, LIST, MAP
-    }
-
-}
-
-// End SqlFunctions.java
diff --git a/atopcalcite/src/main/java/org/apache/calcite/sql/type/SqlTypeUtil.java b/atopcalcite/src/main/java/org/apache/calcite/sql/type/SqlTypeUtil.java
deleted file mode 100644
index 320cd7c329..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/sql/type/SqlTypeUtil.java
+++ /dev/null
@@ -1,1336 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.calcite.sql.type;
-
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.rel.type.RelDataTypeFamily;
-import org.apache.calcite.rel.type.RelDataTypeField;
-import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
-import org.apache.calcite.sql.SqlCall;
-import org.apache.calcite.sql.SqlCallBinding;
-import org.apache.calcite.sql.SqlCollation;
-import org.apache.calcite.sql.SqlDataTypeSpec;
-import org.apache.calcite.sql.SqlIdentifier;
-import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.sql.parser.SqlParserPos;
-import org.apache.calcite.sql.validate.SqlValidator;
-import org.apache.calcite.sql.validate.SqlValidatorScope;
-import org.apache.calcite.sql.validate.SqlValidatorUtil;
-import org.apache.calcite.util.NumberUtil;
-import org.apache.calcite.util.Pair;
-import org.apache.calcite.util.Util;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
-import java.nio.charset.Charset;
-import java.util.AbstractList;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import static org.apache.calcite.util.Static.RESOURCE;
-
-/**
- * Contains utility methods used during SQL validation or type derivation.
- */
-public abstract class SqlTypeUtil {
-    //~ Methods ----------------------------------------------------------------
-
-    /**
-     * Checks whether two types or more are char comparable.
-     *
-     * @return Returns true if all operands are of char type and if they are
-     * comparable, i.e. of the same charset and collation of same charset
-     */
-    public static boolean isCharTypeComparable(List<RelDataType> argTypes) {
-        assert argTypes != null;
-        assert argTypes.size() >= 2;
-
-        // Filter out ANY elements.
-        List<RelDataType> argTypes2 = Lists.newArrayList();
-        for (RelDataType t : argTypes) {
-            if (!isAny(t)) {
-                argTypes2.add(t);
-            }
-        }
-
-        for (Pair<RelDataType, RelDataType> pair : Pair.adjacents(argTypes2)) {
-            RelDataType t0 = pair.left;
-            RelDataType t1 = pair.right;
-
-            if (!inCharFamily(t0) || !inCharFamily(t1)) {
-                return false;
-            }
-
-            if (t0.getCharset() == null) {
-                throw new AssertionError("RelDataType object should have been assigned "
-                        + "a (default) charset when calling deriveType");
-            } else if (!t0.getCharset().equals(t1.getCharset())) {
-                return false;
-            }
-
-            if (t0.getCollation() == null) {
-                throw new AssertionError("RelDataType object should have been assigned "
-                        + "a (default) collation when calling deriveType");
-            } else if (!t0.getCollation().getCharset().equals(
-                    t1.getCollation().getCharset())) {
-                return false;
-            }
-        }
-
-        return true;
-    }
-
-    /**
-     * Returns whether the operands to a call are char type-comparable.
-     *
-     * @param binding        Binding of call to operands
-     * @param operands       Operands to check for compatibility; usually the
-     *                       operands of the bound call, but not always
-     * @param throwOnFailure Whether to throw an exception on failure
-     * @return whether operands are valid
-     */
-    public static boolean isCharTypeComparable(
-            SqlCallBinding binding,
-            List<SqlNode> operands,
-            boolean throwOnFailure) {
-        final SqlValidator validator = binding.getValidator();
-        final SqlValidatorScope scope = binding.getScope();
-        assert operands != null;
-        assert operands.size() >= 2;
-
-        if (!isCharTypeComparable(
-                deriveAndCollectTypes(validator, scope, operands))) {
-            if (throwOnFailure) {
-                String msg = "";
-                for (int i = 0; i < operands.size(); i++) {
-                    if (i > 0) {
-                        msg += ", ";
-                    }
-                    msg += operands.get(i).toString();
-                }
-                throw binding.newError(RESOURCE.operandNotComparable(msg));
-            }
-            return false;
-        }
-        return true;
-    }
-
-    /**
-     * Iterates over all operands, derives their types, and collects them into
-     * a list.
-     */
-    public static List<RelDataType> deriveAndCollectTypes(
-            SqlValidator validator,
-            SqlValidatorScope scope,
-            List<SqlNode> operands) {
-        // NOTE: Do not use an AbstractList. Don't want to be lazy. We want
-        // errors.
-        List<RelDataType> types = new ArrayList<RelDataType>();
-        for (SqlNode operand : operands) {
-            types.add(validator.deriveType(scope, operand));
-        }
-        return types;
-    }
-
-    /**
-     * Promotes a type to a row type (does nothing if it already is one).
-     *
-     * @param type      type to be promoted
-     * @param fieldName name to give field in row type; null for default of
-     *                  "ROW_VALUE"
-     * @return row type
-     */
-    public static RelDataType promoteToRowType(
-            RelDataTypeFactory typeFactory,
-            RelDataType type,
-            String fieldName) {
-        if (!type.isStruct()) {
-            if (fieldName == null) {
-                fieldName = "ROW_VALUE";
-            }
-            type = typeFactory.builder().add(fieldName, type).build();
-        }
-        return type;
-    }
-
-    /**
-     * Recreates a given RelDataType with nullability iff any of the operands
-     * of a call are nullable.
-     */
-    public static RelDataType makeNullableIfOperandsAre(
-            final SqlValidator validator,
-            final SqlValidatorScope scope,
-            final SqlCall call,
-            RelDataType type) {
-        for (SqlNode operand : call.getOperandList()) {
-            RelDataType operandType = validator.deriveType(scope, operand);
-
-            if (containsNullable(operandType)) {
-                RelDataTypeFactory typeFactory = validator.getTypeFactory();
-                type = typeFactory.createTypeWithNullability(type, true);
-                break;
-            }
-        }
-        return type;
-    }
-
-    /**
-     * Recreates a given RelDataType with nullability iff any of the param
-     * argTypes are nullable.
-     */
-    public static RelDataType makeNullableIfOperandsAre(
-            final RelDataTypeFactory typeFactory,
-            final List<RelDataType> argTypes,
-            RelDataType type) {
-        Preconditions.checkNotNull(type);
-        if (containsNullable(argTypes)) {
-            type = typeFactory.createTypeWithNullability(type, true);
-        }
-        return type;
-    }
-
-    /**
-     * Returns whether all of array of types are nullable.
-     */
-    public static boolean allNullable(List<RelDataType> types) {
-        for (RelDataType type : types) {
-            if (!containsNullable(type)) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-    /**
-     * Returns whether one or more of an array of types is nullable.
-     */
-    public static boolean containsNullable(List<RelDataType> types) {
-        for (RelDataType type : types) {
-            if (containsNullable(type)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    /**
-     * Determines whether a type or any of its fields (if a structured type) are
-     * nullable.
-     */
-    public static boolean containsNullable(RelDataType type) {
-        if (type.isNullable()) {
-            return true;
-        }
-        if (!type.isStruct()) {
-            return false;
-        }
-        for (RelDataTypeField field : type.getFieldList()) {
-            if (containsNullable(field.getType())) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    /**
-     * Returns typeName.equals(type.getSqlTypeName()). If
-     * typeName.equals(SqlTypeName.Any) true is always returned.
-     */
-    public static boolean isOfSameTypeName(
-            SqlTypeName typeName,
-            RelDataType type) {
-        return SqlTypeName.ANY.equals(typeName)
-                || typeName.equals(type.getSqlTypeName());
-    }
-
-    /**
-     * Returns true if any element in <code>typeNames</code> matches
-     * type.getSqlTypeName().
-     *
-     * @see #isOfSameTypeName(SqlTypeName, RelDataType)
-     */
-    public static boolean isOfSameTypeName(
-            Collection<SqlTypeName> typeNames,
-            RelDataType type) {
-        for (SqlTypeName typeName : typeNames) {
-            if (isOfSameTypeName(typeName, type)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    /**
-     * @return true if type is DATE, TIME, or TIMESTAMP
-     */
-    public static boolean isDatetime(RelDataType type) {
-        return SqlTypeFamily.DATETIME.contains(type);
-    }
-
-    /**
-     * @return true if type is some kind of INTERVAL
-     */
-    public static boolean isInterval(RelDataType type) {
-        return SqlTypeFamily.DATETIME_INTERVAL.contains(type);
-    }
-
-    /**
-     * @return true if type is in SqlTypeFamily.Character
-     */
-    public static boolean inCharFamily(RelDataType type) {
-        return type.getFamily() == SqlTypeFamily.CHARACTER;
-    }
-
-    /**
-     * @return true if type is in SqlTypeFamily.Character
-     */
-    public static boolean inCharFamily(SqlTypeName typeName) {
-        return typeName.getFamily() == SqlTypeFamily.CHARACTER;
-    }
-
-    /**
-     * @return true if type is in SqlTypeFamily.Boolean
-     */
-    public static boolean inBooleanFamily(RelDataType type) {
-        return type.getFamily() == SqlTypeFamily.BOOLEAN;
-    }
-
-    /**
-     * @return true if two types are in same type family
-     */
-    public static boolean inSameFamily(RelDataType t1, RelDataType t2) {
-        return t1.getFamily() == t2.getFamily();
-    }
-
-    /**
-     * @return true if two types are in same type family, or one or the other is
-     * of type {@link SqlTypeName#NULL}.
-     */
-    public static boolean inSameFamilyOrNull(RelDataType t1, RelDataType t2) {
-        return (t1.getSqlTypeName() == SqlTypeName.NULL)
-                || (t2.getSqlTypeName() == SqlTypeName.NULL)
-                || (t1.getFamily() == t2.getFamily());
-    }
-
-    /**
-     * @return true if type family is either character or binary
-     */
-    public static boolean inCharOrBinaryFamilies(RelDataType type) {
-        return (type.getFamily() == SqlTypeFamily.CHARACTER)
-                || (type.getFamily() == SqlTypeFamily.BINARY);
-    }
-
-    /**
-     * @return true if type is a LOB of some kind
-     */
-    public static boolean isLob(RelDataType type) {
-        // TODO jvs 9-Dec-2004:  once we support LOB types
-        return false;
-    }
-
-    /**
-     * @return true if type is variable width with bounded precision
-     */
-    public static boolean isBoundedVariableWidth(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-        if (typeName == null) {
-            return false;
-        }
-        switch (typeName) {
-            case VARCHAR:
-            case VARBINARY:
-
-                // TODO angel 8-June-2005: Multiset should be LOB
-            case MULTISET:
-                return true;
-            default:
-                return false;
-        }
-    }
-
-    /**
-     * @return true if type is one of the integer types
-     */
-    public static boolean isIntType(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-        if (typeName == null) {
-            return false;
-        }
-        switch (typeName) {
-            case TINYINT:
-            case SMALLINT:
-            case INTEGER:
-            case BIGINT:
-                return true;
-            default:
-                return false;
-        }
-    }
-
-    /**
-     * @return true if type is decimal
-     */
-    public static boolean isDecimal(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-        if (typeName == null) {
-            return false;
-        }
-        return typeName == SqlTypeName.DECIMAL;
-    }
-
-    /**
-     * @return true if type is bigint
-     */
-    public static boolean isBigint(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-        if (typeName == null) {
-            return false;
-        }
-        return typeName == SqlTypeName.BIGINT;
-    }
-
-    /**
-     * @return true if type is numeric with exact precision
-     */
-    public static boolean isExactNumeric(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-        if (typeName == null) {
-            return false;
-        }
-        switch (typeName) {
-            case TINYINT:
-            case SMALLINT:
-            case INTEGER:
-            case BIGINT:
-            case DECIMAL:
-                return true;
-            default:
-                return false;
-        }
-    }
-
-    /** Returns whether a type's scale is set. */
-    public static boolean hasScale(RelDataType type) {
-        return type.getScale() != Integer.MIN_VALUE;
-    }
-
-    /**
-     * Returns the maximum value of an integral type, as a long value
-     */
-    public static long maxValue(RelDataType type) {
-        assert SqlTypeUtil.isIntType(type);
-        switch (type.getSqlTypeName()) {
-            case TINYINT:
-                return Byte.MAX_VALUE;
-            case SMALLINT:
-                return Short.MAX_VALUE;
-            case INTEGER:
-                return Integer.MAX_VALUE;
-            case BIGINT:
-                return Long.MAX_VALUE;
-            default:
-                throw Util.unexpected(type.getSqlTypeName());
-        }
-    }
-
-    /**
-     * @return true if type is numeric with approximate precision
-     */
-    public static boolean isApproximateNumeric(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-        if (typeName == null) {
-            return false;
-        }
-        switch (typeName) {
-            case FLOAT:
-            case REAL:
-            case DOUBLE:
-                return true;
-            default:
-                return false;
-        }
-    }
-
-    /**
-     * @return true if type is numeric
-     */
-    public static boolean isNumeric(RelDataType type) {
-        return isExactNumeric(type) || isApproximateNumeric(type);
-    }
-
-    /**
-     * Tests whether two types have the same name and structure, possibly with
-     * differing modifiers. For example, VARCHAR(1) and VARCHAR(10) are
-     * considered the same, while VARCHAR(1) and CHAR(1) are considered
-     * different. Likewise, VARCHAR(1) MULTISET and VARCHAR(10) MULTISET are
-     * considered the same.
-     *
-     * @return true if types have same name and structure
-     */
-    public static boolean sameNamedType(RelDataType t1, RelDataType t2) {
-        if (t1.isStruct() || t2.isStruct()) {
-            if (!t1.isStruct() || !t2.isStruct()) {
-                return false;
-            }
-            if (t1.getFieldCount() != t2.getFieldCount()) {
-                return false;
-            }
-            List<RelDataTypeField> fields1 = t1.getFieldList();
-            List<RelDataTypeField> fields2 = t2.getFieldList();
-            for (int i = 0; i < fields1.size(); ++i) {
-                if (!sameNamedType(
-                        fields1.get(i).getType(),
-                        fields2.get(i).getType())) {
-                    return false;
-                }
-            }
-            return true;
-        }
-        RelDataType comp1 = t1.getComponentType();
-        RelDataType comp2 = t2.getComponentType();
-        if ((comp1 != null) || (comp2 != null)) {
-            if ((comp1 == null) || (comp2 == null)) {
-                return false;
-            }
-            if (!sameNamedType(comp1, comp2)) {
-                return false;
-            }
-        }
-        return t1.getSqlTypeName() == t2.getSqlTypeName();
-    }
-
-    /**
-     * Computes the maximum number of bytes required to represent a value of a
-     * type having user-defined precision. This computation assumes no overhead
-     * such as length indicators and NUL-terminators. Complex types for which
-     * multiple representations are possible (e.g. DECIMAL or TIMESTAMP) return
-     * 0.
-     *
-     * @param type type for which to compute storage
-     * @return maximum bytes, or 0 for a fixed-width type or type with unknown
-     * maximum
-     */
-    public static int getMaxByteSize(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-
-        if (typeName == null) {
-            return 0;
-        }
-
-        switch (typeName) {
-            case CHAR:
-            case VARCHAR:
-                return (int) Math.ceil(
-                        ((double) type.getPrecision())
-                                * type.getCharset().newEncoder().maxBytesPerChar());
-
-            case BINARY:
-            case VARBINARY:
-                return type.getPrecision();
-
-            case MULTISET:
-
-                // TODO Wael Jan-24-2005: Need a better way to tell fennel this
-                // number. This a very generic place and implementation details like
-                // this doesnt belong here. Waiting to change this once we have blob
-                // support
-                return 4096;
-
-            default:
-                return 0;
-        }
-    }
-
-    /**
-     * Determines the minimum unscaled value of a numeric type
-     *
-     * @param type a numeric type
-     */
-    public static long getMinValue(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-        switch (typeName) {
-            case TINYINT:
-                return Byte.MIN_VALUE;
-            case SMALLINT:
-                return Short.MIN_VALUE;
-            case INTEGER:
-                return Integer.MIN_VALUE;
-            case BIGINT:
-            case DECIMAL:
-                return NumberUtil.getMinUnscaled(type.getPrecision()).longValue();
-            default:
-                throw new AssertionError("getMinValue(" + typeName + ")");
-        }
-    }
-
-    /**
-     * Determines the maximum unscaled value of a numeric type
-     *
-     * @param type a numeric type
-     */
-    public static long getMaxValue(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-        switch (typeName) {
-            case TINYINT:
-                return Byte.MAX_VALUE;
-            case SMALLINT:
-                return Short.MAX_VALUE;
-            case INTEGER:
-                return Integer.MAX_VALUE;
-            case BIGINT:
-            case DECIMAL:
-                return NumberUtil.getMaxUnscaled(type.getPrecision()).longValue();
-            default:
-                throw new AssertionError("getMaxValue(" + typeName + ")");
-        }
-    }
-
-    /**
-     * @return true if type has a representation as a Java primitive (ignoring
-     * nullability)
-     */
-    @Deprecated // to be removed before 2.0
-    public static boolean isJavaPrimitive(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-        if (typeName == null) {
-            return false;
-        }
-
-        switch (typeName) {
-            case BOOLEAN:
-            case TINYINT:
-            case SMALLINT:
-            case INTEGER:
-            case BIGINT:
-            case FLOAT:
-            case REAL:
-            case DOUBLE:
-            case SYMBOL:
-                return true;
-            default:
-                return false;
-        }
-    }
-
-    /**
-     * @return class name of the wrapper for the primitive data type.
-     */
-    @Deprecated // to be removed before 2.0
-    public static String getPrimitiveWrapperJavaClassName(RelDataType type) {
-        if (type == null) {
-            return null;
-        }
-        SqlTypeName typeName = type.getSqlTypeName();
-        if (typeName == null) {
-            return null;
-        }
-
-        switch (typeName) {
-            case BOOLEAN:
-                return "Boolean";
-            default:
-                //noinspection deprecation
-                return getNumericJavaClassName(type);
-        }
-    }
-
-    /**
-     * @return class name of the numeric data type.
-     */
-    @Deprecated // to be removed before 2.0
-    public static String getNumericJavaClassName(RelDataType type) {
-        if (type == null) {
-            return null;
-        }
-        SqlTypeName typeName = type.getSqlTypeName();
-        if (typeName == null) {
-            return null;
-        }
-
-        switch (typeName) {
-            case TINYINT:
-                return "Byte";
-            case SMALLINT:
-                return "Short";
-            case INTEGER:
-                return "Integer";
-            case BIGINT:
-                return "Long";
-            case REAL:
-                return "Float";
-            case DECIMAL:
-            case FLOAT:
-            case DOUBLE:
-                return "Double";
-            default:
-                return null;
-        }
-    }
-
-    private static boolean isAny(RelDataType t) {
-        return t.getFamily() == SqlTypeFamily.ANY;
-    }
-
-    /**
-     * Tests whether a value can be assigned to a site.
-     *
-     * @param toType   type of the target site
-     * @param fromType type of the source value
-     * @return true iff assignable
-     */
-    public static boolean canAssignFrom(
-            RelDataType toType,
-            RelDataType fromType) {
-        if (isAny(toType) || isAny(fromType)) {
-            return true;
-        }
-
-        // TODO jvs 2-Jan-2005:  handle all the other cases like
-        // rows, collections, UDT's
-        if (fromType.getSqlTypeName() == SqlTypeName.NULL) {
-            // REVIEW jvs 4-Dec-2008: We allow assignment from NULL to any
-            // type, including NOT NULL types, since in the case where no
-            // rows are actually processed, the assignment is legal
-            // (FRG-365).  However, it would be better if the validator's
-            // NULL type inference guaranteed that we had already
-            // assigned a real (nullable) type to every NULL literal.
-            return true;
-        }
-
-        if (fromType.getSqlTypeName() == SqlTypeName.ARRAY) {
-            if (toType.getSqlTypeName() != SqlTypeName.ARRAY) {
-                return false;
-            }
-            return canAssignFrom(toType.getComponentType(), fromType.getComponentType());
-        }
-
-        if (areCharacterSetsMismatched(toType, fromType)) {
-            return false;
-        }
-
-        return toType.getFamily() == fromType.getFamily();
-    }
-
-    /**
-     * Determines whether two types both have different character sets. If one
-     * or the other type has no character set (e.g. in cast from INT to
-     * VARCHAR), that is not a mismatch.
-     *
-     * @param t1 first type
-     * @param t2 second type
-     * @return true iff mismatched
-     */
-    public static boolean areCharacterSetsMismatched(
-            RelDataType t1,
-            RelDataType t2) {
-        if (isAny(t1) || isAny(t2)) {
-            return false;
-        }
-
-        Charset cs1 = t1.getCharset();
-        Charset cs2 = t2.getCharset();
-        if ((cs1 != null) && (cs2 != null)) {
-            if (!cs1.equals(cs2)) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-    /**
-     * Compares two types and returns true if fromType can be cast to toType.
-     *
-     * <p>REVIEW jvs 17-Dec-2004: the coerce param below shouldn't really be
-     * necessary. We're using it as a hack because
-     * {@link SqlTypeFactoryImpl#leastRestrictiveSqlType} isn't complete enough
-     * yet.  Once it is, this param (and the non-coerce rules of
-     * {@link SqlTypeAssignmentRules}) should go away.
-     *
-     * @param toType   target of assignment
-     * @param fromType source of assignment
-     * @param coerce   if true, the SQL rules for CAST are used; if false, the
-     *                 rules are similar to Java; e.g. you can't assign short x =
-     *                 (int) y, and you can't assign int x = (String) z.
-     * @return true iff cast is legal
-     */
-    public static boolean canCastFrom(
-            RelDataType toType,
-            RelDataType fromType,
-            boolean coerce) {
-        if (toType == fromType) {
-            return true;
-        }
-        if (isAny(toType) || isAny(fromType)) {
-            return true;
-        }
-
-        final SqlTypeName fromTypeName = fromType.getSqlTypeName();
-        final SqlTypeName toTypeName = toType.getSqlTypeName();
-        if (toType.isStruct() || fromType.isStruct()) {
-            if (toTypeName == SqlTypeName.DISTINCT) {
-                if (fromTypeName == SqlTypeName.DISTINCT) {
-                    // can't cast between different distinct types
-                    return false;
-                }
-                return canCastFrom(
-                        toType.getFieldList().get(0).getType(), fromType, coerce);
-            } else if (fromTypeName == SqlTypeName.DISTINCT) {
-                return canCastFrom(
-                        toType, fromType.getFieldList().get(0).getType(), coerce);
-            } else if (toTypeName == SqlTypeName.ROW) {
-                if (fromTypeName != SqlTypeName.ROW) {
-                    return false;
-                }
-                int n = toType.getFieldCount();
-                if (fromType.getFieldCount() != n) {
-                    return false;
-                }
-                for (int i = 0; i < n; ++i) {
-                    RelDataTypeField toField = toType.getFieldList().get(i);
-                    RelDataTypeField fromField = fromType.getFieldList().get(i);
-                    if (!canCastFrom(
-                            toField.getType(),
-                            fromField.getType(),
-                            coerce)) {
-                        return false;
-                    }
-                }
-                return true;
-            } else if (toTypeName == SqlTypeName.MULTISET) {
-                if (!fromType.isStruct()) {
-                    return false;
-                }
-                if (fromTypeName != SqlTypeName.MULTISET) {
-                    return false;
-                }
-                return canCastFrom(
-                        toType.getComponentType(),
-                        fromType.getComponentType(),
-                        coerce);
-            } else if (fromTypeName == SqlTypeName.MULTISET) {
-                return false;
-            } else {
-                return toType.getFamily() == fromType.getFamily();
-            }
-        }
-        RelDataType c1 = toType.getComponentType();
-        if (c1 != null) {
-            RelDataType c2 = fromType.getComponentType();
-            if (c2 == null) {
-                return false;
-            }
-            return canCastFrom(c1, c2, coerce);
-        }
-        if ((isInterval(fromType) && isExactNumeric(toType))
-                || (isInterval(toType) && isExactNumeric(fromType))) {
-            IntervalSqlType intervalType =
-                    (IntervalSqlType) (isInterval(fromType) ? fromType : toType);
-            if (!intervalType.getIntervalQualifier().isSingleDatetimeField()) {
-                // Casts between intervals and exact numerics must involve
-                // intervals with a single datetime field.
-                return false;
-            }
-        }
-        if (toTypeName == null || fromTypeName == null) {
-            return false;
-        }
-
-        // REVIEW jvs 9-Feb-2009: we don't impose SQL rules for character sets
-        // here; instead, we do that in SqlCastFunction.  The reason is that
-        // this method is called from at least one place (MedJdbcNameDirectory)
-        // where internally a cast across character repertoires is OK.  Should
-        // probably clean that up.
-
-        SqlTypeAssignmentRules rules = SqlTypeAssignmentRules.instance();
-        return rules.canCastFrom(toTypeName, fromTypeName, coerce);
-    }
-
-    /**
-     * Flattens a record type by recursively expanding any fields which are
-     * themselves record types. For each record type, a representative null
-     * value field is also prepended (with state NULL for a null value and FALSE
-     * for non-null), and all component types are asserted to be nullable, since
-     * SQL doesn't allow NOT NULL to be specified on attributes.
-     *
-     * @param typeFactory   factory which should produced flattened type
-     * @param recordType    type with possible nesting
-     * @param flatteningMap if non-null, receives map from unflattened ordinal
-     *                      to flattened ordinal (must have length at least
-     *                      recordType.getFieldList().size())
-     * @return flattened equivalent
-     */
-    public static RelDataType flattenRecordType(
-            RelDataTypeFactory typeFactory,
-            RelDataType recordType,
-            int[] flatteningMap) {
-        if (!recordType.isStruct()) {
-            return recordType;
-        }
-        List<RelDataTypeField> fieldList = new ArrayList<RelDataTypeField>();
-        boolean nested =
-                flattenFields(
-                        typeFactory,
-                        recordType,
-                        fieldList,
-                        flatteningMap);
-        if (!nested) {
-            return recordType;
-        }
-        List<RelDataType> types = new ArrayList<RelDataType>();
-        List<String> fieldNames = new ArrayList<String>();
-        int i = -1;
-        for (RelDataTypeField field : fieldList) {
-            ++i;
-            types.add(field.getType());
-            fieldNames.add(field.getName() + "_" + i);
-        }
-        return typeFactory.createStructType(types, fieldNames);
-    }
-
-    public static boolean needsNullIndicator(RelDataType recordType) {
-        // NOTE jvs 9-Mar-2005: It would be more storage-efficient to say that
-        // no null indicator is required for structured type columns declared
-        // as NOT NULL.  However, the uniformity of always having a null
-        // indicator makes things cleaner in many places.
-        return recordType.getSqlTypeName() == SqlTypeName.STRUCTURED;
-    }
-
-    private static boolean flattenFields(
-            RelDataTypeFactory typeFactory,
-            RelDataType type,
-            List<RelDataTypeField> list,
-            int[] flatteningMap) {
-        boolean nested = false;
-        if (needsNullIndicator(type)) {
-            // NOTE jvs 9-Mar-2005:  other code
-            // (e.g. RelStructuredTypeFlattener) relies on the
-            // null indicator field coming first.
-            RelDataType indicatorType =
-                    typeFactory.createSqlType(SqlTypeName.BOOLEAN);
-            if (type.isNullable()) {
-                indicatorType =
-                        typeFactory.createTypeWithNullability(
-                                indicatorType,
-                                true);
-            }
-            RelDataTypeField nullIndicatorField =
-                    new RelDataTypeFieldImpl(
-                            "NULL_VALUE",
-                            0,
-                            indicatorType);
-            list.add(nullIndicatorField);
-            nested = true;
-        }
-        for (RelDataTypeField field : type.getFieldList()) {
-            if (flatteningMap != null) {
-                flatteningMap[field.getIndex()] = list.size();
-            }
-            if (field.getType().isStruct()) {
-                nested = true;
-                flattenFields(
-                        typeFactory,
-                        field.getType(),
-                        list,
-                        null);
-            } else if (field.getType().getComponentType() != null) {
-                nested = true;
-
-                // TODO jvs 14-Feb-2005:  generalize to any kind of
-                // collection type
-                RelDataType flattenedCollectionType =
-                        typeFactory.createMultisetType(
-                                flattenRecordType(
-                                        typeFactory,
-                                        field.getType().getComponentType(),
-                                        null),
-                                -1);
-                field =
-                        new RelDataTypeFieldImpl(
-                                field.getName(),
-                                field.getIndex(),
-                                flattenedCollectionType);
-                list.add(field);
-            } else {
-                list.add(field);
-            }
-        }
-        return nested;
-    }
-
-    /**
-     * Converts an instance of RelDataType to an instance of SqlDataTypeSpec.
-     *
-     * @param type type descriptor
-     * @return corresponding parse representation
-     */
-    public static SqlDataTypeSpec convertTypeToSpec(RelDataType type) {
-        SqlTypeName typeName = type.getSqlTypeName();
-
-        // TODO jvs 28-Dec-2004:  support row types, user-defined types,
-        // interval types, multiset types, etc
-        assert typeName != null;
-        SqlIdentifier typeIdentifier =
-                new SqlIdentifier(
-                        typeName.name(),
-                        SqlParserPos.ZERO);
-
-        String charSetName = null;
-
-        if (inCharFamily(type)) {
-            charSetName = type.getCharset().name();
-            // TODO jvs 28-Dec-2004:  collation
-        }
-
-        // REVIEW jvs 28-Dec-2004:  discriminate between precision/scale
-        // zero and unspecified?
-
-        // REVIEW angel 11-Jan-2006:
-        // Use neg numbers to indicate unspecified precision/scale
-
-        if (typeName.allowsScale()) {
-            return new SqlDataTypeSpec(
-                    typeIdentifier,
-                    type.getPrecision(),
-                    type.getScale(),
-                    charSetName,
-                    null,
-                    SqlParserPos.ZERO);
-        } else if (typeName.allowsPrec()) {
-            return new SqlDataTypeSpec(
-                    typeIdentifier,
-                    type.getPrecision(),
-                    -1,
-                    charSetName,
-                    null,
-                    SqlParserPos.ZERO);
-        } else {
-            return new SqlDataTypeSpec(
-                    typeIdentifier,
-                    -1,
-                    -1,
-                    charSetName,
-                    null,
-                    SqlParserPos.ZERO);
-        }
-    }
-
-    public static RelDataType createMultisetType(
-            RelDataTypeFactory typeFactory,
-            RelDataType type,
-            boolean nullable) {
-        RelDataType ret = typeFactory.createMultisetType(type, -1);
-        return typeFactory.createTypeWithNullability(ret, nullable);
-    }
-
-    public static RelDataType createArrayType(
-            RelDataTypeFactory typeFactory,
-            RelDataType type,
-            boolean nullable) {
-        RelDataType ret = typeFactory.createArrayType(type, -1);
-        return typeFactory.createTypeWithNullability(ret, nullable);
-    }
-
-    public static RelDataType createMapType(
-            RelDataTypeFactory typeFactory,
-            RelDataType keyType,
-            RelDataType valueType,
-            boolean nullable) {
-        RelDataType ret = typeFactory.createMapType(keyType, valueType);
-        return typeFactory.createTypeWithNullability(ret, nullable);
-    }
-
-    /**
-     * Adds collation and charset to a character type, returns other types
-     * unchanged.
-     *
-     * @param type        Type
-     * @param typeFactory Type factory
-     * @return Type with added charset and collation, or unchanged type if it is
-     * not a char type.
-     */
-    public static RelDataType addCharsetAndCollation(
-            RelDataType type,
-            RelDataTypeFactory typeFactory) {
-        if (!inCharFamily(type)) {
-            return type;
-        }
-        Charset charset = type.getCharset();
-        if (charset == null) {
-            charset = typeFactory.getDefaultCharset();
-        }
-        SqlCollation collation = type.getCollation();
-        if (collation == null) {
-            collation = SqlCollation.IMPLICIT;
-        }
-
-        // todo: should get the implicit collation from repository
-        //   instead of null
-        type =
-                typeFactory.createTypeWithCharsetAndCollation(
-                        type,
-                        charset,
-                        collation);
-        SqlValidatorUtil.checkCharsetAndCollateConsistentIfCharType(type);
-        return type;
-    }
-
-    /**
-     * Returns whether two types are equal, ignoring nullability.
-     *
-     * <p>They need not come from the same factory.
-     *
-     * @param factory Type factory
-     * @param type1   First type
-     * @param type2   Second type
-     * @return whether types are equal, ignoring nullability
-     */
-    public static boolean equalSansNullability(
-            RelDataTypeFactory factory,
-            RelDataType type1,
-            RelDataType type2) {
-        if (type1.equals(type2)) {
-            return true;
-        }
-
-        if (isAny(type1) || isAny(type2)) {
-            return true;
-        }
-
-        if (type1.isNullable() == type2.isNullable()) {
-            // If types have the same nullability and they weren't equal above,
-            // they must be different.
-            return false;
-        }
-        return type1.equals(
-                factory.createTypeWithNullability(type2, type1.isNullable()));
-    }
-
-    /**
-     * Returns the ordinal of a given field in a record type, or -1 if the field
-     * is not found.
-     *
-     * @param type      Record type
-     * @param fieldName Name of field
-     * @return Ordinal of field
-     */
-    public static int findField(RelDataType type, String fieldName) {
-        List<RelDataTypeField> fields = type.getFieldList();
-        for (int i = 0; i < fields.size(); i++) {
-            RelDataTypeField field = fields.get(i);
-            if (field.getName().equals(fieldName)) {
-                return i;
-            }
-        }
-        return -1;
-    }
-
-    /**
-     * Selects data types of the specified fields from an input row type.
-     * This is useful when identifying data types of a function that is going
-     * to operate on inputs that are specified as field ordinals (e.g.
-     * aggregate calls).
-     *
-     * @param rowType input row type
-     * @param requiredFields ordinals of the projected fields
-     * @return list of data types that are requested by requiredFields
-     */
-    public static List<RelDataType> projectTypes(final RelDataType rowType,
-                                                 final List<? extends Number> requiredFields) {
-        final List<RelDataTypeField> fields = rowType.getFieldList();
-
-        return new AbstractList<RelDataType>() {
-            @Override public RelDataType get(int index) {
-                return fields.get(requiredFields.get(index).intValue()).getType();
-            }
-
-            @Override public int size() {
-                return requiredFields.size();
-            }
-        };
-    }
-
-    /**
-     * Records a struct type with no fields.
-     *
-     * @param typeFactory Type factory
-     * @return Struct type with no fields
-     */
-    public static RelDataType createEmptyStructType(
-            RelDataTypeFactory typeFactory) {
-        return typeFactory.createStructType(
-                ImmutableList.<RelDataType>of(),
-                ImmutableList.<String>of());
-    }
-
-    /** Returns whether a type is flat. It is not flat if it is a record type that
-     * has one or more fields that are themselves record types. */
-    public static boolean isFlat(RelDataType type) {
-        if (type.isStruct()) {
-            for (RelDataTypeField field : type.getFieldList()) {
-                if (field.getType().isStruct()) {
-                    return false;
-                }
-            }
-        }
-        return true;
-    }
-
-    /**
-     * Returns whether two types are comparable. They need to be scalar types of
-     * the same family, or struct types whose fields are pairwise comparable.
-     *
-     * @param type1 First type
-     * @param type2 Second type
-     * @return Whether types are comparable
-     */
-    public static boolean isComparable(RelDataType type1, RelDataType type2) {
-        if (type1.isStruct() != type2.isStruct()) {
-            return false;
-        }
-
-        if (type1.isStruct()) {
-            int n = type1.getFieldCount();
-            if (n != type2.getFieldCount()) {
-                return false;
-            }
-            for (Pair<RelDataTypeField, RelDataTypeField> pair
-                    : Pair.zip(type1.getFieldList(), type2.getFieldList())) {
-                if (!isComparable(pair.left.getType(), pair.right.getType())) {
-                    return false;
-                }
-            }
-            return true;
-        }
-        RelDataTypeFamily family1 = null;
-        RelDataTypeFamily family2 = null;
-
-        // REVIEW jvs 2-June-2005:  This is needed to keep
-        // the Saffron type system happy.
-        if (type1.getSqlTypeName() != null) {
-            family1 = type1.getSqlTypeName().getFamily();
-        }
-        if (type2.getSqlTypeName() != null) {
-            family2 = type2.getSqlTypeName().getFamily();
-        }
-        if (family1 == null) {
-            family1 = type1.getFamily();
-        }
-        if (family2 == null) {
-            family2 = type2.getFamily();
-        }
-        if (family1 == family2) {
-            return true;
-        }
-
-        // If one of the operators is of type 'ANY', return true.
-        if (family1 == SqlTypeFamily.ANY
-                || family2 == SqlTypeFamily.ANY) {
-            return true;
-        }
-
-        // We can implicitly convert from character to date
-        if (family1 == SqlTypeFamily.CHARACTER
-                && canConvertStringInCompare(family2)
-                || family2 == SqlTypeFamily.CHARACTER
-                && canConvertStringInCompare(family1)) {
-            return true;
-        }
-
-        // HACK POINT: allow boolean = integer (integer = boolean)
-        if (type1 instanceof BasicSqlType && type2 instanceof BasicSqlType) {
-            SqlTypeName typeName1 = ((BasicSqlType) type1).typeName;
-            SqlTypeName typeName2 = ((BasicSqlType) type2).typeName;
-            if (typeName1 == SqlTypeName.INTEGER
-                    && typeName2 == SqlTypeName.BOOLEAN
-                    || typeName1 == SqlTypeName.BOOLEAN
-                    && typeName2 == SqlTypeName.INTEGER) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
-    /** Returns whether a character data type can be implicitly converted to a
-     * given family in a compare operation. */
-    private static boolean canConvertStringInCompare(RelDataTypeFamily family) {
-        if (family instanceof SqlTypeFamily) {
-            SqlTypeFamily sqlTypeFamily = (SqlTypeFamily) family;
-            switch (sqlTypeFamily) {
-                case DATE:
-                case TIME:
-                case TIMESTAMP:
-                case INTERVAL_DAY_TIME:
-                case INTERVAL_YEAR_MONTH:
-                case NUMERIC:
-                case APPROXIMATE_NUMERIC:
-                case EXACT_NUMERIC:
-                case INTEGER:
-                case BOOLEAN:
-                    return true;
-            }
-        }
-        return false;
-    }
-
-    /**
-     * Checks whether a type represents Unicode character data.
-     *
-     * @param type type to test
-     * @return whether type represents Unicode character data
-     */
-    public static boolean isUnicode(RelDataType type) {
-        Charset charset = type.getCharset();
-        if (charset == null) {
-            return false;
-        }
-        return charset.name().startsWith("UTF");
-    }
-
-    /** Returns the larger of two precisions, treating
-     * {@link RelDataType#PRECISION_NOT_SPECIFIED} as infinity. */
-    public static int maxPrecision(int p0, int p1) {
-        return (p0 == RelDataType.PRECISION_NOT_SPECIFIED
-                || p0 >= p1
-                && p1 != RelDataType.PRECISION_NOT_SPECIFIED) ? p0 : p1;
-    }
-
-    /** Returns whether a precision is greater or equal than another,
-     * treating {@link RelDataType#PRECISION_NOT_SPECIFIED} as infinity. */
-    public static int comparePrecision(int p0, int p1) {
-        if (p0 == p1) {
-            return 0;
-        }
-        if (p0 == RelDataType.PRECISION_NOT_SPECIFIED) {
-            return 1;
-        }
-        if (p1 == RelDataType.PRECISION_NOT_SPECIFIED) {
-            return -1;
-        }
-        return Integer.compare(p0, p1);
-    }
-
-    public static boolean isArray(RelDataType type) {
-        return type.getSqlTypeName() == SqlTypeName.ARRAY;
-    }
-}
-
-// End SqlTypeUtil.java
diff --git a/atopcalcite/src/main/java/org/apache/calcite/sql2rel/RelFieldTrimmer.java b/atopcalcite/src/main/java/org/apache/calcite/sql2rel/RelFieldTrimmer.java
deleted file mode 100644
index 70d1b2abdf..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/sql2rel/RelFieldTrimmer.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.calcite.sql2rel;
-
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.sql.validate.SqlValidator;
-import org.apache.calcite.tools.RelBuilder;
-import org.apache.calcite.util.ReflectiveVisitor;
-
-/*
- * OVERRIDE POINT:
- * - disable the whole RelFieldTrimmer
- */
-
-public class RelFieldTrimmer implements ReflectiveVisitor {
-
-    public RelFieldTrimmer(SqlValidator validator, RelBuilder relBuilder) {
-    }
-
-    public RelFieldTrimmer(SqlValidator validator, RelOptCluster cluster, RelFactories.ProjectFactory projectFactory, RelFactories.FilterFactory filterFactory, RelFactories.JoinFactory joinFactory, RelFactories.SemiJoinFactory semiJoinFactory, RelFactories.SortFactory sortFactory, RelFactories.AggregateFactory aggregateFactory, RelFactories.SetOpFactory setOpFactory) {
-    }
-
-    public RelNode trim(RelNode rootRel) {
-        return rootRel;
-    }
-
-}
diff --git a/atopcalcite/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java b/atopcalcite/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java
deleted file mode 100644
index 519a73b647..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java
+++ /dev/null
@@ -1,5656 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-package org.apache.calcite.sql2rel;
-
-import static org.apache.calcite.sql.SqlUtil.stripAs;
-import static org.apache.calcite.util.Static.RESOURCE;
-
-import java.lang.reflect.Type;
-import java.math.BigDecimal;
-import java.util.AbstractList;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Deque;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.calcite.avatica.util.Spaces;
-import org.apache.calcite.linq4j.Ord;
-import org.apache.calcite.plan.Convention;
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptPlanner;
-import org.apache.calcite.plan.RelOptSamplingParameters;
-import org.apache.calcite.plan.RelOptTable;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.plan.RelTraitSet;
-import org.apache.calcite.prepare.Prepare;
-import org.apache.calcite.prepare.RelOptTableImpl;
-import org.apache.calcite.rel.RelCollation;
-import org.apache.calcite.rel.RelCollationImpl;
-import org.apache.calcite.rel.RelCollationTraitDef;
-import org.apache.calcite.rel.RelCollations;
-import org.apache.calcite.rel.RelFieldCollation;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.RelRoot;
-import org.apache.calcite.rel.SingleRel;
-import org.apache.calcite.rel.core.Aggregate;
-import org.apache.calcite.rel.core.AggregateCall;
-import org.apache.calcite.rel.core.Collect;
-import org.apache.calcite.rel.core.CorrelationId;
-import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Join;
-import org.apache.calcite.rel.core.JoinInfo;
-import org.apache.calcite.rel.core.JoinRelType;
-import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.core.Sample;
-import org.apache.calcite.rel.core.Sort;
-import org.apache.calcite.rel.core.Uncollect;
-import org.apache.calcite.rel.logical.LogicalAggregate;
-import org.apache.calcite.rel.logical.LogicalCorrelate;
-import org.apache.calcite.rel.logical.LogicalFilter;
-import org.apache.calcite.rel.logical.LogicalIntersect;
-import org.apache.calcite.rel.logical.LogicalJoin;
-import org.apache.calcite.rel.logical.LogicalMatch;
-import org.apache.calcite.rel.logical.LogicalMinus;
-import org.apache.calcite.rel.logical.LogicalProject;
-import org.apache.calcite.rel.logical.LogicalSort;
-import org.apache.calcite.rel.logical.LogicalTableFunctionScan;
-import org.apache.calcite.rel.logical.LogicalTableModify;
-import org.apache.calcite.rel.logical.LogicalTableScan;
-import org.apache.calcite.rel.logical.LogicalUnion;
-import org.apache.calcite.rel.logical.LogicalValues;
-import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider;
-import org.apache.calcite.rel.metadata.RelColumnMapping;
-import org.apache.calcite.rel.metadata.RelMetadataQuery;
-import org.apache.calcite.rel.stream.Delta;
-import org.apache.calcite.rel.stream.LogicalDelta;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeFactory;
-import org.apache.calcite.rel.type.RelDataTypeField;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexCall;
-import org.apache.calcite.rex.RexCallBinding;
-import org.apache.calcite.rex.RexCorrelVariable;
-import org.apache.calcite.rex.RexDynamicParam;
-import org.apache.calcite.rex.RexFieldAccess;
-import org.apache.calcite.rex.RexFieldCollation;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexLiteral;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexPatternFieldRef;
-import org.apache.calcite.rex.RexRangeRef;
-import org.apache.calcite.rex.RexShuttle;
-import org.apache.calcite.rex.RexSubQuery;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.rex.RexWindowBound;
-import org.apache.calcite.schema.ModifiableTable;
-import org.apache.calcite.schema.ModifiableView;
-import org.apache.calcite.schema.Table;
-import org.apache.calcite.schema.TranslatableTable;
-import org.apache.calcite.schema.Wrapper;
-import org.apache.calcite.sql.JoinConditionType;
-import org.apache.calcite.sql.JoinType;
-import org.apache.calcite.sql.SemiJoinType;
-import org.apache.calcite.sql.SqlAggFunction;
-import org.apache.calcite.sql.SqlBasicCall;
-import org.apache.calcite.sql.SqlCall;
-import org.apache.calcite.sql.SqlCallBinding;
-import org.apache.calcite.sql.SqlDataTypeSpec;
-import org.apache.calcite.sql.SqlDelete;
-import org.apache.calcite.sql.SqlDynamicParam;
-import org.apache.calcite.sql.SqlExplainFormat;
-import org.apache.calcite.sql.SqlExplainLevel;
-import org.apache.calcite.sql.SqlFunction;
-import org.apache.calcite.sql.SqlIdentifier;
-import org.apache.calcite.sql.SqlInsert;
-import org.apache.calcite.sql.SqlIntervalQualifier;
-import org.apache.calcite.sql.SqlJoin;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.SqlLiteral;
-import org.apache.calcite.sql.SqlMatchRecognize;
-import org.apache.calcite.sql.SqlMerge;
-import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.sql.SqlNodeList;
-import org.apache.calcite.sql.SqlNumericLiteral;
-import org.apache.calcite.sql.SqlOperator;
-import org.apache.calcite.sql.SqlOperatorTable;
-import org.apache.calcite.sql.SqlOrderBy;
-import org.apache.calcite.sql.SqlSampleSpec;
-import org.apache.calcite.sql.SqlSelect;
-import org.apache.calcite.sql.SqlSelectKeyword;
-import org.apache.calcite.sql.SqlSetOperator;
-import org.apache.calcite.sql.SqlUnnestOperator;
-import org.apache.calcite.sql.SqlUpdate;
-import org.apache.calcite.sql.SqlUtil;
-import org.apache.calcite.sql.SqlValuesOperator;
-import org.apache.calcite.sql.SqlWindow;
-import org.apache.calcite.sql.SqlWith;
-import org.apache.calcite.sql.SqlWithItem;
-import org.apache.calcite.sql.fun.SqlCountAggFunction;
-import org.apache.calcite.sql.fun.SqlInOperator;
-import org.apache.calcite.sql.fun.SqlRowOperator;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.parser.SqlParserPos;
-import org.apache.calcite.sql.type.SqlReturnTypeInference;
-import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.sql.type.SqlTypeUtil;
-import org.apache.calcite.sql.type.TableFunctionReturnTypeInference;
-import org.apache.calcite.sql.util.SqlBasicVisitor;
-import org.apache.calcite.sql.util.SqlVisitor;
-import org.apache.calcite.sql.validate.AggregatingSelectScope;
-import org.apache.calcite.sql.validate.CollectNamespace;
-import org.apache.calcite.sql.validate.DelegatingScope;
-import org.apache.calcite.sql.validate.ListScope;
-import org.apache.calcite.sql.validate.MatchRecognizeScope;
-import org.apache.calcite.sql.validate.ParameterScope;
-import org.apache.calcite.sql.validate.SelectScope;
-import org.apache.calcite.sql.validate.SqlMonotonicity;
-import org.apache.calcite.sql.validate.SqlNameMatcher;
-import org.apache.calcite.sql.validate.SqlQualified;
-import org.apache.calcite.sql.validate.SqlUserDefinedTableFunction;
-import org.apache.calcite.sql.validate.SqlUserDefinedTableMacro;
-import org.apache.calcite.sql.validate.SqlValidator;
-import org.apache.calcite.sql.validate.SqlValidatorImpl;
-import org.apache.calcite.sql.validate.SqlValidatorNamespace;
-import org.apache.calcite.sql.validate.SqlValidatorScope;
-import org.apache.calcite.sql.validate.SqlValidatorTable;
-import org.apache.calcite.sql.validate.SqlValidatorUtil;
-import org.apache.calcite.tools.RelBuilder;
-import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.ImmutableIntList;
-import org.apache.calcite.util.Litmus;
-import org.apache.calcite.util.NlsString;
-import org.apache.calcite.util.NumberUtil;
-import org.apache.calcite.util.Pair;
-import org.apache.calcite.util.Util;
-import org.apache.calcite.util.trace.CalciteTrace;
-import org.slf4j.Logger;
-
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableList.Builder;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
-/*
- * The code has synced with calcite. Hope one day, we could remove the hardcode override point.
- * OVERRIDE POINT:
- * - DEFAULT_IN_SUB_QUERY_THRESHOLD, was `20`, now `Integer.MAX_VALUE`
- * - isTrimUnusedFields(), override to false
- * - AggConverter.translateAgg(...), skip column reading for COUNT(COL), for https://jirap.corp.ebay.com/browse/KYLIN-104
- * - convertQuery(), call hackSelectStar() at the end
- * - createJoin() check cast operation
- */
-
-/**
- * Converts a SQL parse tree (consisting of
- * {@link org.apache.calcite.sql.SqlNode} objects) into a relational algebra
- * expression (consisting of {@link org.apache.calcite.rel.RelNode} objects).
- *
- * <p>The public entry points are: {@link #convertQuery},
- * {@link #convertExpression(SqlNode)}.
- */
-public class SqlToRelConverter {
-  //~ Static fields/initializers ---------------------------------------------
-
-  protected static final Logger SQL2REL_LOGGER =
-      CalciteTrace.getSqlToRelTracer();
-
-  private static final BigDecimal TWO = BigDecimal.valueOf(2L);
-
-  /** Size of the smallest IN list that will be converted to a semijoin to a
-   * static table. */
-  /* OVERRIDE POINT */
-  public static final int DEFAULT_IN_SUB_QUERY_THRESHOLD = Integer.MAX_VALUE;
-
-  @Deprecated // to be removed before 2.0
-  public static final int DEFAULT_IN_SUBQUERY_THRESHOLD =
-      DEFAULT_IN_SUB_QUERY_THRESHOLD;
-
-  //~ Instance fields --------------------------------------------------------
-
-  protected final SqlValidator validator;
-  protected final RexBuilder rexBuilder;
-  protected final Prepare.CatalogReader catalogReader;
-  protected final RelOptCluster cluster;
-  private SubQueryConverter subQueryConverter;
-  protected final List<RelNode> leaves = new ArrayList<>();
-  private final List<SqlDynamicParam> dynamicParamSqlNodes = new ArrayList<>();
-  private final SqlOperatorTable opTab;
-  protected final RelDataTypeFactory typeFactory;
-  private final SqlNodeToRexConverter exprConverter;
-  private int explainParamCount;
-  public final SqlToRelConverter.Config config;
-
-  /**
-   * Fields used in name resolution for correlated sub-queries.
-   */
-  private final Map<CorrelationId, DeferredLookup> mapCorrelToDeferred =
-      new HashMap<>();
-
-  /**
-   * Stack of names of datasets requested by the <code>
-   * TABLE(SAMPLE(&lt;datasetName&gt;, &lt;query&gt;))</code> construct.
-   */
-  private final Deque<String> datasetStack = new ArrayDeque<>();
-
-  /**
-   * Mapping of non-correlated sub-queries that have been converted to their
-   * equivalent constants. Used to avoid re-evaluating the sub-query if it's
-   * already been evaluated.
-   */
-  private final Map<SqlNode, RexNode> mapConvertedNonCorrSubqs =
-      new HashMap<>();
-
-  public final RelOptTable.ViewExpander viewExpander;
-
-  //~ Constructors -----------------------------------------------------------
-  /**
-   * Creates a converter.
-   *
-   * @param viewExpander    Preparing statement
-   * @param validator       Validator
-   * @param catalogReader   Schema
-   * @param planner         Planner
-   * @param rexBuilder      Rex builder
-   * @param convertletTable Expression converter
-   */
-  @Deprecated // to be removed before 2.0
-  public SqlToRelConverter(
-      RelOptTable.ViewExpander viewExpander,
-      SqlValidator validator,
-      Prepare.CatalogReader catalogReader,
-      RelOptPlanner planner,
-      RexBuilder rexBuilder,
-      SqlRexConvertletTable convertletTable) {
-    this(viewExpander, validator, catalogReader,
-        RelOptCluster.create(planner, rexBuilder), convertletTable,
-        Config.DEFAULT);
-  }
-
-  @Deprecated // to be removed before 2.0
-  public SqlToRelConverter(
-      RelOptTable.ViewExpander viewExpander,
-      SqlValidator validator,
-      Prepare.CatalogReader catalogReader,
-      RelOptCluster cluster,
-      SqlRexConvertletTable convertletTable) {
-    this(viewExpander, validator, catalogReader, cluster, convertletTable,
-        Config.DEFAULT);
-  }
-
-  /* Creates a converter. */
-  public SqlToRelConverter(
-      RelOptTable.ViewExpander viewExpander,
-      SqlValidator validator,
-      Prepare.CatalogReader catalogReader,
-      RelOptCluster cluster,
-      SqlRexConvertletTable convertletTable,
-      Config config) {
-    this.viewExpander = viewExpander;
-    this.opTab =
-        (validator
-            == null) ? SqlStdOperatorTable.instance()
-            : validator.getOperatorTable();
-    this.validator = validator;
-    this.catalogReader = catalogReader;
-    this.subQueryConverter = new NoOpSubQueryConverter();
-    this.rexBuilder = cluster.getRexBuilder();
-    this.typeFactory = rexBuilder.getTypeFactory();
-    this.cluster = Preconditions.checkNotNull(cluster);
-    this.exprConverter = new SqlNodeToRexConverterImpl(convertletTable);
-    this.explainParamCount = 0;
-    this.config = new ConfigBuilder().withConfig(config).build();
-  }
-
-  //~ Methods ----------------------------------------------------------------
-
-  /**
-   * @return the RelOptCluster in use.
-   */
-  public RelOptCluster getCluster() {
-    return cluster;
-  }
-
-  /**
-   * Returns the row-expression builder.
-   */
-  public RexBuilder getRexBuilder() {
-    return rexBuilder;
-  }
-
-  /**
-   * Returns the number of dynamic parameters encountered during translation;
-   * this must only be called after {@link #convertQuery}.
-   *
-   * @return number of dynamic parameters
-   */
-  public int getDynamicParamCount() {
-    return dynamicParamSqlNodes.size();
-  }
-
-  /**
-   * Returns the type inferred for a dynamic parameter.
-   *
-   * @param index 0-based index of dynamic parameter
-   * @return inferred type, never null
-   */
-  public RelDataType getDynamicParamType(int index) {
-    SqlNode sqlNode = dynamicParamSqlNodes.get(index);
-    if (sqlNode == null) {
-      throw Util.needToImplement("dynamic param type inference");
-    }
-    return validator.getValidatedNodeType(sqlNode);
-  }
-
-  /**
-   * Returns the current count of the number of dynamic parameters in an
-   * EXPLAIN PLAN statement.
-   *
-   * @param increment if true, increment the count
-   * @return the current count before the optional increment
-   */
-  public int getDynamicParamCountInExplain(boolean increment) {
-    int retVal = explainParamCount;
-    if (increment) {
-      ++explainParamCount;
-    }
-    return retVal;
-  }
-
-  /**
-   * @return mapping of non-correlated sub-queries that have been converted to
-   * the constants that they evaluate to
-   */
-  public Map<SqlNode, RexNode> getMapConvertedNonCorrSubqs() {
-    return mapConvertedNonCorrSubqs;
-  }
-
-  /**
-   * Adds to the current map of non-correlated converted sub-queries the
-   * elements from another map that contains non-correlated sub-queries that
-   * have been converted by another SqlToRelConverter.
-   *
-   * @param alreadyConvertedNonCorrSubqs the other map
-   */
-  public void addConvertedNonCorrSubqs(
-      Map<SqlNode, RexNode> alreadyConvertedNonCorrSubqs) {
-    mapConvertedNonCorrSubqs.putAll(alreadyConvertedNonCorrSubqs);
-  }
-
-  /**
-   * Sets a new SubQueryConverter. To have any effect, this must be called
-   * before any convert method.
-   *
-   * @param converter new SubQueryConverter
-   */
-  public void setSubQueryConverter(SubQueryConverter converter) {
-    subQueryConverter = converter;
-  }
-
-  /**
-   * Sets the number of dynamic parameters in the current EXPLAIN PLAN
-   * statement.
-   *
-   * @param explainParamCount number of dynamic parameters in the statement
-   */
-  public void setDynamicParamCountInExplain(int explainParamCount) {
-    assert config.isExplain();
-    this.explainParamCount = explainParamCount;
-  }
-
-  private void checkConvertedType(SqlNode query, RelNode result) {
-    if (query.isA(SqlKind.DML)) {
-      return;
-    }
-    // Verify that conversion from SQL to relational algebra did
-    // not perturb any type information.  (We can't do this if the
-    // SQL statement is something like an INSERT which has no
-    // validator type information associated with its result,
-    // hence the namespace check above.)
-    final List<RelDataTypeField> validatedFields =
-        validator.getValidatedNodeType(query).getFieldList();
-    final RelDataType validatedRowType =
-        validator.getTypeFactory().createStructType(
-            Pair.right(validatedFields),
-            SqlValidatorUtil.uniquify(Pair.left(validatedFields),
-                catalogReader.nameMatcher().isCaseSensitive()));
-
-    final List<RelDataTypeField> convertedFields =
-        result.getRowType().getFieldList().subList(0, validatedFields.size());
-    final RelDataType convertedRowType =
-        validator.getTypeFactory().createStructType(convertedFields);
-
-    if (!RelOptUtil.equal("validated row type", validatedRowType,
-        "converted row type", convertedRowType, Litmus.IGNORE)) {
-      throw new AssertionError("Conversion to relational algebra failed to "
-          + "preserve datatypes:\n"
-          + "validated type:\n"
-          + validatedRowType.getFullTypeString()
-          + "\nconverted type:\n"
-          + convertedRowType.getFullTypeString()
-          + "\nrel:\n"
-          + RelOptUtil.toString(result));
-    }
-  }
-
-  public RelNode flattenTypes(
-      RelNode rootRel,
-      boolean restructure) {
-    RelStructuredTypeFlattener typeFlattener =
-        new RelStructuredTypeFlattener(rexBuilder, createToRelContext(), restructure);
-    return typeFlattener.rewrite(rootRel);
-  }
-
-  /**
-   * If sub-query is correlated and decorrelation is enabled, performs
-   * decorrelation.
-   *
-   * @param query   Query
-   * @param rootRel Root relational expression
-   * @return New root relational expression after decorrelation
-   */
-  public RelNode decorrelate(SqlNode query, RelNode rootRel) {
-    if (!enableDecorrelation()) {
-      return rootRel;
-    }
-    final RelNode result = decorrelateQuery(rootRel);
-    if (result != rootRel) {
-      checkConvertedType(query, result);
-    }
-    return result;
-  }
-
-  /**
-   * Walks over a tree of relational expressions, replacing each
-   * {@link RelNode} with a 'slimmed down' relational expression that projects
-   * only the fields required by its consumer.
-   *
-   * <p>This may make things easier for the optimizer, by removing crud that
-   * would expand the search space, but is difficult for the optimizer itself
-   * to do it, because optimizer rules must preserve the number and type of
-   * fields. Hence, this transform that operates on the entire tree, similar
-   * to the {@link RelStructuredTypeFlattener type-flattening transform}.
-   *
-   * <p>Currently this functionality is disabled in farrago/luciddb; the
-   * default implementation of this method does nothing.
-   *
-   * @param ordered Whether the relational expression must produce results in
-   * a particular order (typically because it has an ORDER BY at top level)
-   * @param rootRel Relational expression that is at the root of the tree
-   * @return Trimmed relational expression
-   */
-  public RelNode trimUnusedFields(boolean ordered, RelNode rootRel) {
-    // Trim fields that are not used by their consumer.
-    if (isTrimUnusedFields()) {
-      final RelFieldTrimmer trimmer = newFieldTrimmer();
-      final List<RelCollation> collations =
-          rootRel.getTraitSet().getTraits(RelCollationTraitDef.INSTANCE);
-      rootRel = trimmer.trim(rootRel);
-      if (!ordered
-          && collations != null
-          && !collations.isEmpty()
-          && !collations.equals(ImmutableList.of(RelCollations.EMPTY))) {
-        final RelTraitSet traitSet = rootRel.getTraitSet()
-            .replace(RelCollationTraitDef.INSTANCE, collations);
-        rootRel = rootRel.copy(traitSet, rootRel.getInputs());
-      }
-      if (SQL2REL_LOGGER.isDebugEnabled()) {
-        SQL2REL_LOGGER.debug(
-            RelOptUtil.dumpPlan("Plan after trimming unused fields", rootRel,
-                SqlExplainFormat.TEXT, SqlExplainLevel.EXPPLAN_ATTRIBUTES));
-      }
-    }
-    return rootRel;
-  }
-
-  /**
-   * Creates a RelFieldTrimmer.
-   *
-   * @return Field trimmer
-   */
-  protected RelFieldTrimmer newFieldTrimmer() {
-    final RelBuilder relBuilder =
-        RelFactories.LOGICAL_BUILDER.create(cluster, null);
-    return new RelFieldTrimmer(validator, relBuilder);
-  }
-
-  /**
-   * Converts an unvalidated query's parse tree into a relational expression.
-   *
-   * @param query           Query to convert
-   * @param needsValidation Whether to validate the query before converting;
-   *                        <code>false</code> if the query has already been
-   *                        validated.
-   * @param top             Whether the query is top-level, say if its result
-   *                        will become a JDBC result set; <code>false</code> if
-   *                        the query will be part of a view.
-   */
-  public RelRoot convertQuery(
-      SqlNode query,
-      final boolean needsValidation,
-      final boolean top) {
-
-    SqlNode origQuery = query; /* OVERRIDE POINT */
-
-    if (needsValidation) {
-      query = validator.validate(query);
-    }
-
-    RelMetadataQuery.THREAD_PROVIDERS.set(
-        JaninoRelMetadataProvider.of(cluster.getMetadataProvider()));
-    RelNode result = convertQueryRecursive(query, top, null).rel;
-    if (top) {
-      if (isStream(query)) {
-        result = new LogicalDelta(cluster, result.getTraitSet(), result);
-      }
-    }
-    RelCollation collation = RelCollations.EMPTY;
-    if (!query.isA(SqlKind.DML)) {
-      if (isOrdered(query)) {
-        collation = requiredCollation(result);
-      }
-    }
-    checkConvertedType(query, result);
-
-    if (SQL2REL_LOGGER.isDebugEnabled()) {
-      SQL2REL_LOGGER.debug(
-          RelOptUtil.dumpPlan("Plan after converting SqlNode to RelNode",
-              result, SqlExplainFormat.TEXT,
-              SqlExplainLevel.EXPPLAN_ATTRIBUTES));
-    }
-
-    final RelDataType validatedRowType = validator.getValidatedNodeType(query);
-    RelRoot origResult = RelRoot.of(result, validatedRowType, query.getKind())
-            .withCollation(collation);
-    return hackSelectStar(origQuery, origResult);
-  }
-
-  /* OVERRIDE POINT */
-  private RelRoot hackSelectStar(SqlNode query, RelRoot root) {
-    //        /*
-    //         * Rel tree is like:
-    //         *
-    //         *   LogicalSort (optional)
-    //         *    |- LogicalProject
-    //         *        |- LogicalFilter (optional)
-    //         *            |- OLAPTableScan or LogicalJoin
-    //         */
-    LogicalProject rootPrj = null;
-    LogicalSort rootSort = null;
-    if (root.rel instanceof LogicalProject) {
-      rootPrj = (LogicalProject) root.rel;
-    } else if (root.rel instanceof LogicalSort && root.rel.getInput(0) instanceof LogicalProject) {
-      rootPrj = (LogicalProject) root.rel.getInput(0);
-      rootSort = (LogicalSort) root.rel;
-    } else {
-      return root;
-    }
-
-    //
-    RelNode input = rootPrj.getInput();
-    //        if (!(//
-    //                isAmong(input, "OLAPTableScan", "LogicalJoin")//
-    //                || (isAmong(input, "LogicalFilter") && isAmong(input.getInput(0), "OLAPTableScan", "LogicalJoin"))//
-    //             ))
-    //            return root;
-    //
-    //        if (rootPrj.getRowType().getFieldCount() < input.getRowType().getFieldCount())
-    //            return root;
-
-    RelDataType inType = rootPrj.getRowType();
-    List<String> inFields = inType.getFieldNames();
-    List<RexNode> projExp = new ArrayList<>();
-    List<Pair<Integer, String>> projFields = new ArrayList<>();
-    Map<Integer,Integer> projFieldMapping = new HashMap<>();
-    RelDataTypeFactory.FieldInfoBuilder projTypeBuilder = getCluster().getTypeFactory().builder();
-    RelDataTypeFactory.FieldInfoBuilder validTypeBuilder = getCluster().getTypeFactory().builder();
-
-    boolean hiddenColumnExists = false;
-    for (int i = 0; i < root.validatedRowType.getFieldList().size(); i++) {
-      if (root.validatedRowType.getFieldNames().get(i).startsWith("_KY_"))
-        hiddenColumnExists = true;
-    }
-    if(!hiddenColumnExists) {
-      return root;
-    }
-
-    for (int i = 0; i < inFields.size(); i++) {
-      if (!inFields.get(i).startsWith("_KY_")) {
-        projExp.add(rootPrj.getProjects().get(i));
-        projFieldMapping.put(i, projFields.size());
-        projFields.add(Pair.of(projFields.size(), inFields.get(i)));
-        projTypeBuilder.add(inType.getFieldList().get(i));
-
-        if (i < root.validatedRowType.getFieldList().size()) //for cases like kylin-it/src/test/resources/query/sql_verifyCount/query10.sql
-          validTypeBuilder.add(root.validatedRowType.getFieldList().get(i));
-      }
-    }
-
-    RelDataType projRowType = getCluster().getTypeFactory().createStructType(projTypeBuilder);
-    rootPrj = LogicalProject.create(input, projExp, projRowType);
-    if (rootSort != null) {
-      //for cases like kylin-it/src/test/resources/query/sql_verifyCount/query10.sql, original RelCollation is stale, need to fix its fieldIndex
-      RelCollation originalCollation = rootSort.collation;
-      RelCollation newCollation = null;
-      List<RelFieldCollation> fieldCollations = originalCollation.getFieldCollations();
-      ImmutableList.Builder<RelFieldCollation> newFieldCollations = ImmutableList.builder();
-      for (RelFieldCollation fieldCollation : fieldCollations) {
-        if(projFieldMapping.containsKey(fieldCollation.getFieldIndex())) {
-          newFieldCollations.add(fieldCollation.copy(projFieldMapping.get(fieldCollation.getFieldIndex())));
-        } else {
-          newFieldCollations.add(fieldCollation);
-        }
-      }
-      newCollation = RelCollationImpl.of(newFieldCollations.build());
-      rootSort = LogicalSort.create(rootPrj, newCollation, rootSort.offset, rootSort.fetch);
-    }
-
-    RelDataType validRowType = getCluster().getTypeFactory().createStructType(validTypeBuilder);
-    root = new RelRoot(rootSort == null ? rootPrj : rootSort, validRowType, root.kind, projFields, rootSort == null ? root.collation : rootSort.getCollation());
-
-    validator.setValidatedNodeType(query, validRowType);
-
-    return root;
-  }
-
-  private static boolean isStream(SqlNode query) {
-    return query instanceof SqlSelect
-        && ((SqlSelect) query).isKeywordPresent(SqlSelectKeyword.STREAM);
-  }
-
-  public static boolean isOrdered(SqlNode query) {
-    switch (query.getKind()) {
-    case SELECT:
-      return ((SqlSelect) query).getOrderList() != null
-          && ((SqlSelect) query).getOrderList().size() > 0;
-    case WITH:
-      return isOrdered(((SqlWith) query).body);
-    case ORDER_BY:
-      return ((SqlOrderBy) query).orderList.size() > 0;
-    default:
-      return false;
-    }
-  }
-
-  private RelCollation requiredCollation(RelNode r) {
-    if (r instanceof Sort) {
-      return ((Sort) r).collation;
-    }
-    if (r instanceof Project) {
-      return requiredCollation(((Project) r).getInput());
-    }
-    if (r instanceof Delta) {
-      return requiredCollation(((Delta) r).getInput());
-    }
-    throw new AssertionError();
-  }
-
-  /**
-   * Converts a SELECT statement's parse tree into a relational expression.
-   */
-  public RelNode convertSelect(SqlSelect select, boolean top) {
-    final SqlValidatorScope selectScope = validator.getWhereScope(select);
-    final Blackboard bb = createBlackboard(selectScope, null, top);
-    convertSelectImpl(bb, select);
-    return bb.root;
-  }
-
-  /**
-   * Factory method for creating translation workspace.
-   */
-  protected Blackboard createBlackboard(SqlValidatorScope scope,
-                                        Map<String, RexNode> nameToNodeMap, boolean top) {
-    return new Blackboard(scope, nameToNodeMap, top);
-  }
-
-  /**
-   * Implementation of {@link #convertSelect(SqlSelect, boolean)};
-   * derived class may override.
-   */
-  protected void convertSelectImpl(
-      final Blackboard bb,
-      SqlSelect select) {
-    convertFrom(
-        bb,
-        select.getFrom());
-    convertWhere(
-        bb,
-        select.getWhere());
-
-    final List<SqlNode> orderExprList = new ArrayList<>();
-    final List<RelFieldCollation> collationList = new ArrayList<>();
-    gatherOrderExprs(
-        bb,
-        select,
-        select.getOrderList(),
-        orderExprList,
-        collationList);
-    final RelCollation collation =
-        cluster.traitSet().canonize(RelCollations.of(collationList));
-
-    if (validator.isAggregate(select)) {
-      convertAgg(
-          bb,
-          select,
-          orderExprList);
-    } else {
-      convertSelectList(
-          bb,
-          select,
-          orderExprList);
-    }
-
-    if (select.isDistinct()) {
-      distinctify(bb, true);
-    }
-    convertOrder(
-        select, bb, collation, orderExprList, select.getOffset(),
-        select.getFetch());
-    bb.setRoot(bb.root, true);
-  }
-
-  /**
-   * Having translated 'SELECT ... FROM ... [GROUP BY ...] [HAVING ...]', adds
-   * a relational expression to make the results unique.
-   *
-   * <p>If the SELECT clause contains duplicate expressions, adds
-   * {@link org.apache.calcite.rel.logical.LogicalProject}s so that we are
-   * grouping on the minimal set of keys. The performance gain isn't huge, but
-   * it is difficult to detect these duplicate expressions later.
-   *
-   * @param bb               Blackboard
-   * @param checkForDupExprs Check for duplicate expressions
-   */
-  private void distinctify(
-      Blackboard bb,
-      boolean checkForDupExprs) {
-    // Look for duplicate expressions in the project.
-    // Say we have 'select x, y, x, z'.
-    // Then dups will be {[2, 0]}
-    // and oldToNew will be {[0, 0], [1, 1], [2, 0], [3, 2]}
-    RelNode rel = bb.root;
-    if (checkForDupExprs && (rel instanceof LogicalProject)) {
-      LogicalProject project = (LogicalProject) rel;
-      final List<RexNode> projectExprs = project.getProjects();
-      final List<Integer> origins = new ArrayList<>();
-      int dupCount = 0;
-      for (int i = 0; i < projectExprs.size(); i++) {
-        int x = findExpr(projectExprs.get(i), projectExprs, i);
-        if (x >= 0) {
-          origins.add(x);
-          ++dupCount;
-        } else {
-          origins.add(i);
-        }
-      }
-      if (dupCount == 0) {
-        distinctify(bb, false);
-        return;
-      }
-
-      final Map<Integer, Integer> squished = Maps.newHashMap();
-      final List<RelDataTypeField> fields = rel.getRowType().getFieldList();
-      final List<Pair<RexNode, String>> newProjects = Lists.newArrayList();
-      for (int i = 0; i < fields.size(); i++) {
-        if (origins.get(i) == i) {
-          squished.put(i, newProjects.size());
-          newProjects.add(RexInputRef.of2(i, fields));
-        }
-      }
-      rel =
-          LogicalProject.create(rel, Pair.left(newProjects),
-              Pair.right(newProjects));
-      bb.root = rel;
-      distinctify(bb, false);
-      rel = bb.root;
-
-      // Create the expressions to reverse the mapping.
-      // Project($0, $1, $0, $2).
-      final List<Pair<RexNode, String>> undoProjects = Lists.newArrayList();
-      for (int i = 0; i < fields.size(); i++) {
-        final int origin = origins.get(i);
-        RelDataTypeField field = fields.get(i);
-        undoProjects.add(
-            Pair.of(
-                (RexNode) new RexInputRef(
-                    squished.get(origin), field.getType()),
-                field.getName()));
-      }
-
-      rel =
-          LogicalProject.create(rel, Pair.left(undoProjects),
-              Pair.right(undoProjects));
-      bb.setRoot(
-          rel,
-          false);
-
-      return;
-    }
-
-    // Usual case: all of the expressions in the SELECT clause are
-    // different.
-    final ImmutableBitSet groupSet =
-        ImmutableBitSet.range(rel.getRowType().getFieldCount());
-    rel =
-        createAggregate(bb, false, groupSet, ImmutableList.of(groupSet),
-            ImmutableList.<AggregateCall>of());
-
-    bb.setRoot(
-        rel,
-        false);
-  }
-
-  private int findExpr(RexNode seek, List<RexNode> exprs, int count) {
-    for (int i = 0; i < count; i++) {
-      RexNode expr = exprs.get(i);
-      if (expr.toString().equals(seek.toString())) {
-        return i;
-      }
-    }
-    return -1;
-  }
-
-  /**
-   * Converts a query's ORDER BY clause, if any.
-   *
-   * @param select        Query
-   * @param bb            Blackboard
-   * @param collation     Collation list
-   * @param orderExprList Method populates this list with orderBy expressions
-   *                      not present in selectList
-   * @param offset        Expression for number of rows to discard before
-   *                      returning first row
-   * @param fetch         Expression for number of rows to fetch
-   */
-  protected void convertOrder(
-      SqlSelect select,
-      Blackboard bb,
-      RelCollation collation,
-      List<SqlNode> orderExprList,
-      SqlNode offset,
-      SqlNode fetch) {
-    if (select.getOrderList() == null
-        || select.getOrderList().getList().isEmpty()) {
-      assert collation.getFieldCollations().isEmpty();
-      if ((offset == null
-            || ((SqlLiteral) offset).bigDecimalValue().equals(BigDecimal.ZERO))
-          && fetch == null) {
-        return;
-      }
-    }
-
-    // Create a sorter using the previously constructed collations.
-    bb.setRoot(
-        LogicalSort.create(bb.root, collation,
-            offset == null ? null : convertExpression(offset),
-            fetch == null ? null : convertExpression(fetch)),
-        false);
-
-    // If extra expressions were added to the project list for sorting,
-    // add another project to remove them. But make the collation empty, because
-    // we can't represent the real collation.
-    //
-    // If it is the top node, use the real collation, but don't trim fields.
-    if (orderExprList.size() > 0 && !bb.top) {
-      final List<RexNode> exprs = new ArrayList<>();
-      final RelDataType rowType = bb.root.getRowType();
-      final int fieldCount =
-          rowType.getFieldCount() - orderExprList.size();
-      for (int i = 0; i < fieldCount; i++) {
-        exprs.add(rexBuilder.makeInputRef(bb.root, i));
-      }
-      bb.setRoot(
-          LogicalProject.create(bb.root, exprs,
-              rowType.getFieldNames().subList(0, fieldCount)),
-          false);
-    }
-  }
-
-  /**
-   * Returns whether a given node contains a {@link SqlInOperator}.
-   *
-   * @param node a RexNode tree
-   */
-  private static boolean containsInOperator(
-      SqlNode node) {
-    try {
-      SqlVisitor<Void> visitor =
-          new SqlBasicVisitor<Void>() {
-            public Void visit(SqlCall call) {
-              if (call.getOperator() instanceof SqlInOperator) {
-                throw new Util.FoundOne(call);
-              }
-              return super.visit(call);
-            }
-          };
-      node.accept(visitor);
-      return false;
-    } catch (Util.FoundOne e) {
-      Util.swallow(e, null);
-      return true;
-    }
-  }
-
-  /**
-   * Push down all the NOT logical operators into any IN/NOT IN operators.
-   *
-   * @param scope Scope where {@code sqlNode} occurs
-   * @param sqlNode the root node from which to look for NOT operators
-   * @return the transformed SqlNode representation with NOT pushed down.
-   */
-  private static SqlNode pushDownNotForIn(SqlValidatorScope scope,
-                                          SqlNode sqlNode) {
-    if ((sqlNode instanceof SqlCall) && containsInOperator(sqlNode)) {
-      SqlCall sqlCall = (SqlCall) sqlNode;
-      if ((sqlCall.getOperator() == SqlStdOperatorTable.AND)
-          || (sqlCall.getOperator() == SqlStdOperatorTable.OR)) {
-        SqlNode[] sqlOperands = ((SqlBasicCall) sqlCall).operands;
-        for (int i = 0; i < sqlOperands.length; i++) {
-          sqlOperands[i] = pushDownNotForIn(scope, sqlOperands[i]);
-        }
-        return reg(scope, sqlNode);
-      } else if (sqlCall.getOperator() == SqlStdOperatorTable.NOT) {
-        SqlNode childNode = sqlCall.operand(0);
-        assert childNode instanceof SqlCall;
-        SqlBasicCall childSqlCall = (SqlBasicCall) childNode;
-        if (childSqlCall.getOperator() == SqlStdOperatorTable.AND) {
-          SqlNode[] andOperands = childSqlCall.getOperands();
-          SqlNode[] orOperands = new SqlNode[andOperands.length];
-          for (int i = 0; i < orOperands.length; i++) {
-            orOperands[i] = reg(scope,
-                SqlStdOperatorTable.NOT.createCall(SqlParserPos.ZERO,
-                    andOperands[i]));
-          }
-          for (int i = 0; i < orOperands.length; i++) {
-            orOperands[i] = pushDownNotForIn(scope, orOperands[i]);
-          }
-          return reg(scope,
-              SqlStdOperatorTable.OR.createCall(SqlParserPos.ZERO,
-                  orOperands[0], orOperands[1]));
-        } else if (childSqlCall.getOperator() == SqlStdOperatorTable.OR) {
-          SqlNode[] orOperands = childSqlCall.getOperands();
-          SqlNode[] andOperands = new SqlNode[orOperands.length];
-          for (int i = 0; i < andOperands.length; i++) {
-            andOperands[i] = reg(scope,
-                SqlStdOperatorTable.NOT.createCall(SqlParserPos.ZERO,
-                    orOperands[i]));
-          }
-          for (int i = 0; i < andOperands.length; i++) {
-            andOperands[i] = pushDownNotForIn(scope, andOperands[i]);
-          }
-          return reg(scope,
-              SqlStdOperatorTable.AND.createCall(SqlParserPos.ZERO,
-                  andOperands[0], andOperands[1]));
-        } else if (childSqlCall.getOperator() == SqlStdOperatorTable.NOT) {
-          SqlNode[] notOperands = childSqlCall.getOperands();
-          assert notOperands.length == 1;
-          return pushDownNotForIn(scope, notOperands[0]);
-        } else if (childSqlCall.getOperator() instanceof SqlInOperator) {
-          SqlNode[] inOperands = childSqlCall.getOperands();
-          SqlInOperator inOp =
-              (SqlInOperator) childSqlCall.getOperator();
-          if (inOp.isNotIn()) {
-            return reg(scope,
-                SqlStdOperatorTable.IN.createCall(SqlParserPos.ZERO,
-                    inOperands[0], inOperands[1]));
-          } else {
-            return reg(scope,
-                SqlStdOperatorTable.NOT_IN.createCall(SqlParserPos.ZERO,
-                    inOperands[0], inOperands[1]));
-          }
-        } else {
-          // childSqlCall is "leaf" node in a logical expression tree
-          // (only considering AND, OR, NOT)
-          return sqlNode;
-        }
-      } else {
-        // sqlNode is "leaf" node in a logical expression tree
-        // (only considering AND, OR, NOT)
-        return sqlNode;
-      }
-    } else {
-      // tree rooted at sqlNode does not contain inOperator
-      return sqlNode;
-    }
-  }
-
-  /** Registers with the validator a {@link SqlNode} that has been created
-   * during the Sql-to-Rel process. */
-  private static SqlNode reg(SqlValidatorScope scope, SqlNode e) {
-    scope.getValidator().deriveType(scope, e);
-    return e;
-  }
-
-  /**
-   * Converts a WHERE clause.
-   *
-   * @param bb    Blackboard
-   * @param where WHERE clause, may be null
-   */
-  private void convertWhere(
-      final Blackboard bb,
-      final SqlNode where) {
-    if (where == null) {
-      return;
-    }
-    SqlNode newWhere = pushDownNotForIn(bb.scope, where);
-    replaceSubQueries(bb, newWhere, RelOptUtil.Logic.UNKNOWN_AS_FALSE);
-    final RexNode convertedWhere = bb.convertExpression(newWhere);
-
-    // only allocate filter if the condition is not TRUE
-    if (convertedWhere.isAlwaysTrue()) {
-      return;
-    }
-
-    final RelFactories.FilterFactory factory =
-        RelFactories.DEFAULT_FILTER_FACTORY;
-    final RelNode filter = factory.createFilter(bb.root, convertedWhere);
-    final RelNode r;
-    final CorrelationUse p = getCorrelationUse(bb, filter);
-    if (p != null) {
-      assert p.r instanceof Filter;
-      Filter f = (Filter) p.r;
-      r = LogicalFilter.create(f.getInput(), f.getCondition(),
-          ImmutableSet.of(p.id));
-    } else {
-      r = filter;
-    }
-
-    bb.setRoot(r, false);
-  }
-
-  private void replaceSubQueries(
-      final Blackboard bb,
-      final SqlNode expr,
-      RelOptUtil.Logic logic) {
-    findSubQueries(bb, expr, logic, false);
-    for (SubQuery node : bb.subQueryList) {
-      substituteSubQuery(bb, node);
-    }
-  }
-
-  private void substituteSubQuery(Blackboard bb, SubQuery subQuery) {
-    final RexNode expr = subQuery.expr;
-    if (expr != null) {
-      // Already done.
-      return;
-    }
-
-    final SqlBasicCall call;
-    final RelNode rel;
-    final SqlNode query;
-    final RelOptUtil.Exists converted;
-    switch (subQuery.node.getKind()) {
-    case CURSOR:
-      convertCursor(bb, subQuery);
-      return;
-
-    case MULTISET_QUERY_CONSTRUCTOR:
-    case MULTISET_VALUE_CONSTRUCTOR:
-    case ARRAY_QUERY_CONSTRUCTOR:
-      rel = convertMultisets(ImmutableList.of(subQuery.node), bb);
-      subQuery.expr = bb.register(rel, JoinRelType.INNER);
-      return;
-
-    case IN:
-      call = (SqlBasicCall) subQuery.node;
-      query = call.operand(1);
-      if (!config.isExpand() && !(query instanceof SqlNodeList)) {
-        return;
-      }
-      final SqlNode leftKeyNode = call.operand(0);
-
-      final List<RexNode> leftKeys;
-      switch (leftKeyNode.getKind()) {
-      case ROW:
-        leftKeys = Lists.newArrayList();
-        for (SqlNode sqlExpr : ((SqlBasicCall) leftKeyNode).getOperandList()) {
-          leftKeys.add(bb.convertExpression(sqlExpr));
-        }
-        break;
-      default:
-        leftKeys = ImmutableList.of(bb.convertExpression(leftKeyNode));
-      }
-
-      final boolean notIn = ((SqlInOperator) call.getOperator()).isNotIn();
-      if (query instanceof SqlNodeList) {
-        SqlNodeList valueList = (SqlNodeList) query;
-        if (!containsNullLiteral(valueList)
-            && valueList.size() < config.getInSubQueryThreshold()) {
-          // We're under the threshold, so convert to OR.
-          subQuery.expr =
-              convertInToOr(
-                  bb,
-                  leftKeys,
-                  valueList,
-                  notIn);
-          return;
-        }
-
-        // Otherwise, let convertExists translate
-        // values list into an inline table for the
-        // reference to Q below.
-      }
-
-      // Project out the search columns from the left side
-
-      // Q1:
-      // "select from emp where emp.deptno in (select col1 from T)"
-      //
-      // is converted to
-      //
-      // "select from
-      //   emp inner join (select distinct col1 from T)) q
-      //   on emp.deptno = q.col1
-      //
-      // Q2:
-      // "select from emp where emp.deptno not in (Q)"
-      //
-      // is converted to
-      //
-      // "select from
-      //   emp left outer join (select distinct col1, TRUE from T) q
-      //   on emp.deptno = q.col1
-      //   where emp.deptno <> null
-      //         and q.indicator <> TRUE"
-      //
-      final RelDataType targetRowType =
-          SqlTypeUtil.promoteToRowType(typeFactory,
-              validator.getValidatedNodeType(leftKeyNode), null);
-      converted =
-          convertExists(query, RelOptUtil.SubQueryType.IN, subQuery.logic,
-              notIn, targetRowType);
-      if (converted.indicator) {
-        // Generate
-        //    emp CROSS JOIN (SELECT COUNT(*) AS c,
-        //                       COUNT(deptno) AS ck FROM dept)
-        final RelDataType longType =
-            typeFactory.createSqlType(SqlTypeName.BIGINT);
-        final RelNode seek = converted.r.getInput(0); // fragile
-        final int keyCount = leftKeys.size();
-        final List<Integer> args = ImmutableIntList.range(0, keyCount);
-        LogicalAggregate aggregate =
-            LogicalAggregate.create(seek, false, ImmutableBitSet.of(), null,
-                ImmutableList.of(
-                    AggregateCall.create(SqlStdOperatorTable.COUNT, false,
-                        ImmutableList.<Integer>of(), -1, longType, null),
-                    AggregateCall.create(SqlStdOperatorTable.COUNT, false,
-                        args, -1, longType, null)));
-        LogicalJoin join =
-            LogicalJoin.create(bb.root, aggregate, rexBuilder.makeLiteral(true),
-                ImmutableSet.<CorrelationId>of(), JoinRelType.INNER);
-        bb.setRoot(join, false);
-      }
-      final RexNode rex =
-          bb.register(converted.r,
-              converted.outerJoin ? JoinRelType.LEFT : JoinRelType.INNER,
-              leftKeys);
-
-      RelOptUtil.Logic logic = subQuery.logic;
-      switch (logic) {
-      case TRUE_FALSE_UNKNOWN:
-      case UNKNOWN_AS_TRUE:
-        if (!converted.indicator) {
-          logic = RelOptUtil.Logic.TRUE_FALSE;
-        }
-      }
-      subQuery.expr = translateIn(logic, bb.root, rex);
-      if (notIn) {
-        subQuery.expr =
-            rexBuilder.makeCall(SqlStdOperatorTable.NOT, subQuery.expr);
-      }
-      return;
-
-    case EXISTS:
-      // "select from emp where exists (select a from T)"
-      //
-      // is converted to the following if the sub-query is correlated:
-      //
-      // "select from emp left outer join (select AGG_TRUE() as indicator
-      // from T group by corr_var) q where q.indicator is true"
-      //
-      // If there is no correlation, the expression is replaced with a
-      // boolean indicating whether the sub-query returned 0 or >= 1 row.
-      call = (SqlBasicCall) subQuery.node;
-      query = call.operand(0);
-      if (!config.isExpand()) {
-        return;
-      }
-      converted = convertExists(query, RelOptUtil.SubQueryType.EXISTS,
-          subQuery.logic, true, null);
-      assert !converted.indicator;
-      if (convertNonCorrelatedSubQuery(subQuery, bb, converted.r, true)) {
-        return;
-      }
-      subQuery.expr = bb.register(converted.r, JoinRelType.LEFT);
-      return;
-
-    case SCALAR_QUERY:
-      // Convert the sub-query.  If it's non-correlated, convert it
-      // to a constant expression.
-      if (!config.isExpand()) {
-        return;
-      }
-      call = (SqlBasicCall) subQuery.node;
-      query = call.operand(0);
-      converted = convertExists(query, RelOptUtil.SubQueryType.SCALAR,
-          subQuery.logic, true, null);
-      assert !converted.indicator;
-      if (convertNonCorrelatedSubQuery(subQuery, bb, converted.r, false)) {
-        return;
-      }
-      rel = convertToSingleValueSubq(query, converted.r);
-      subQuery.expr = bb.register(rel, JoinRelType.LEFT);
-      return;
-
-    case SELECT:
-      // This is used when converting multiset queries:
-      //
-      // select * from unnest(select multiset[deptno] from emps);
-      //
-      converted = convertExists(subQuery.node, RelOptUtil.SubQueryType.SCALAR,
-          subQuery.logic, true, null);
-      assert !converted.indicator;
-      subQuery.expr = bb.register(converted.r, JoinRelType.LEFT);
-      return;
-
-    default:
-      throw new AssertionError("unexpected kind of sub-query: "
-          + subQuery.node);
-    }
-  }
-
-  private RexNode translateIn(RelOptUtil.Logic logic, RelNode root,
-                              final RexNode rex) {
-    switch (logic) {
-    case TRUE:
-      return rexBuilder.makeLiteral(true);
-
-    case TRUE_FALSE:
-    case UNKNOWN_AS_FALSE:
-      assert rex instanceof RexRangeRef;
-      final int fieldCount = rex.getType().getFieldCount();
-      RexNode rexNode = rexBuilder.makeFieldAccess(rex, fieldCount - 1);
-      rexNode = rexBuilder.makeCall(SqlStdOperatorTable.IS_TRUE, rexNode);
-
-      // Then append the IS NOT NULL(leftKeysForIn).
-      //
-      // RexRangeRef contains the following fields:
-      //   leftKeysForIn,
-      //   rightKeysForIn (the original sub-query select list),
-      //   nullIndicator
-      //
-      // The first two lists contain the same number of fields.
-      final int k = (fieldCount - 1) / 2;
-      for (int i = 0; i < k; i++) {
-        rexNode =
-            rexBuilder.makeCall(
-                SqlStdOperatorTable.AND,
-                rexNode,
-                rexBuilder.makeCall(
-                    SqlStdOperatorTable.IS_NOT_NULL,
-                    rexBuilder.makeFieldAccess(rex, i)));
-      }
-      return rexNode;
-
-    case TRUE_FALSE_UNKNOWN:
-    case UNKNOWN_AS_TRUE:
-      // select e.deptno,
-      //   case
-      //   when ct.c = 0 then false
-      //   when dt.i is not null then true
-      //   when e.deptno is null then null
-      //   when ct.ck < ct.c then null
-      //   else false
-      //   end
-      // from e
-      // cross join (select count(*) as c, count(deptno) as ck from v) as ct
-      // left join (select distinct deptno, true as i from v) as dt
-      //   on e.deptno = dt.deptno
-      final Join join = (Join) root;
-      final Project left = (Project) join.getLeft();
-      final RelNode leftLeft = ((Join) left.getInput()).getLeft();
-      final int leftLeftCount = leftLeft.getRowType().getFieldCount();
-      final RelDataType longType =
-          typeFactory.createSqlType(SqlTypeName.BIGINT);
-      final RexNode cRef = rexBuilder.makeInputRef(root, leftLeftCount);
-      final RexNode ckRef = rexBuilder.makeInputRef(root, leftLeftCount + 1);
-      final RexNode iRef =
-          rexBuilder.makeInputRef(root, root.getRowType().getFieldCount() - 1);
-
-      final RexLiteral zero =
-          rexBuilder.makeExactLiteral(BigDecimal.ZERO, longType);
-      final RexLiteral trueLiteral = rexBuilder.makeLiteral(true);
-      final RexLiteral falseLiteral = rexBuilder.makeLiteral(false);
-      final RexNode unknownLiteral =
-          rexBuilder.makeNullLiteral(trueLiteral.getType());
-
-      final ImmutableList.Builder<RexNode> args = ImmutableList.builder();
-      args.add(rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, cRef, zero),
-          falseLiteral,
-          rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, iRef),
-          trueLiteral);
-      final JoinInfo joinInfo = join.analyzeCondition();
-      for (int leftKey : joinInfo.leftKeys) {
-        final RexNode kRef = rexBuilder.makeInputRef(root, leftKey);
-        args.add(rexBuilder.makeCall(SqlStdOperatorTable.IS_NULL, kRef),
-            unknownLiteral);
-      }
-      args.add(rexBuilder.makeCall(SqlStdOperatorTable.LESS_THAN, ckRef, cRef),
-          unknownLiteral,
-          falseLiteral);
-
-      return rexBuilder.makeCall(SqlStdOperatorTable.CASE, args.build());
-
-    default:
-      throw new AssertionError(logic);
-    }
-  }
-
-  private static boolean containsNullLiteral(SqlNodeList valueList) {
-    for (SqlNode node : valueList.getList()) {
-      if (node instanceof SqlLiteral) {
-        SqlLiteral lit = (SqlLiteral) node;
-        if (lit.getValue() == null) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Determines if a sub-query is non-correlated and if so, converts it to a
-   * constant.
-   *
-   * @param subQuery  the call that references the sub-query
-   * @param bb        blackboard used to convert the sub-query
-   * @param converted RelNode tree corresponding to the sub-query
-   * @param isExists  true if the sub-query is part of an EXISTS expression
-   * @return Whether the sub-query can be converted to a constant
-   */
-  private boolean convertNonCorrelatedSubQuery(
-      SubQuery subQuery,
-      Blackboard bb,
-      RelNode converted,
-      boolean isExists) {
-    SqlCall call = (SqlBasicCall) subQuery.node;
-    if (subQueryConverter.canConvertSubQuery()
-        && isSubQueryNonCorrelated(converted, bb)) {
-      // First check if the sub-query has already been converted
-      // because it's a nested sub-query.  If so, don't re-evaluate
-      // it again.
-      RexNode constExpr = mapConvertedNonCorrSubqs.get(call);
-      if (constExpr == null) {
-        constExpr =
-            subQueryConverter.convertSubQuery(
-                call,
-                this,
-                isExists,
-                config.isExplain());
-      }
-      if (constExpr != null) {
-        subQuery.expr = constExpr;
-        mapConvertedNonCorrSubqs.put(call, constExpr);
-        return true;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Converts the RelNode tree for a select statement to a select that
-   * produces a single value.
-   *
-   * @param query the query
-   * @param plan   the original RelNode tree corresponding to the statement
-   * @return the converted RelNode tree
-   */
-  public RelNode convertToSingleValueSubq(
-      SqlNode query,
-      RelNode plan) {
-    // Check whether query is guaranteed to produce a single value.
-    if (query instanceof SqlSelect) {
-      SqlSelect select = (SqlSelect) query;
-      SqlNodeList selectList = select.getSelectList();
-      SqlNodeList groupList = select.getGroup();
-
-      if ((selectList.size() == 1)
-          && ((groupList == null) || (groupList.size() == 0))) {
-        SqlNode selectExpr = selectList.get(0);
-        if (selectExpr instanceof SqlCall) {
-          SqlCall selectExprCall = (SqlCall) selectExpr;
-          if (Util.isSingleValue(selectExprCall)) {
-            return plan;
-          }
-        }
-
-        // If there is a limit with 0 or 1,
-        // it is ensured to produce a single value
-        if (select.getFetch() != null
-            && select.getFetch() instanceof SqlNumericLiteral) {
-          SqlNumericLiteral limitNum = (SqlNumericLiteral) select.getFetch();
-          if (((BigDecimal) limitNum.getValue()).intValue() < 2) {
-            return plan;
-          }
-        }
-      }
-    } else if (query instanceof SqlCall) {
-      // If the query is (values ...),
-      // it is necessary to look into the operands to determine
-      // whether SingleValueAgg is necessary
-      SqlCall exprCall = (SqlCall) query;
-      if (exprCall.getOperator()
-          instanceof SqlValuesOperator
-              && Util.isSingleValue(exprCall)) {
-        return plan;
-      }
-    }
-
-    // If not, project SingleValueAgg
-    return RelOptUtil.createSingleValueAggRel(
-        cluster,
-        plan);
-  }
-
-  /**
-   * Converts "x IN (1, 2, ...)" to "x=1 OR x=2 OR ...".
-   *
-   * @param leftKeys   LHS
-   * @param valuesList RHS
-   * @param isNotIn    is this a NOT IN operator
-   * @return converted expression
-   */
-  private RexNode convertInToOr(
-      final Blackboard bb,
-      final List<RexNode> leftKeys,
-      SqlNodeList valuesList,
-      boolean isNotIn) {
-    final List<RexNode> comparisons = new ArrayList<>();
-    for (SqlNode rightVals : valuesList) {
-      RexNode rexComparison;
-      if (leftKeys.size() == 1) {
-        rexComparison =
-            rexBuilder.makeCall(SqlStdOperatorTable.EQUALS,
-                leftKeys.get(0),
-                ensureSqlType(leftKeys.get(0).getType(),
-                    bb.convertExpression(rightVals)));
-      } else {
-        assert rightVals instanceof SqlCall;
-        final SqlBasicCall call = (SqlBasicCall) rightVals;
-        assert (call.getOperator() instanceof SqlRowOperator)
-            && call.operandCount() == leftKeys.size();
-        rexComparison =
-            RexUtil.composeConjunction(
-                rexBuilder,
-                Iterables.transform(
-                    Pair.zip(leftKeys, call.getOperandList()),
-                    new Function<Pair<RexNode, SqlNode>, RexNode>() {
-                      public RexNode apply(Pair<RexNode, SqlNode> pair) {
-                        return rexBuilder.makeCall(SqlStdOperatorTable.EQUALS,
-                            pair.left,
-                            ensureSqlType(pair.left.getType(),
-                                bb.convertExpression(pair.right)));
-                      }
-                    }),
-                false);
-      }
-      comparisons.add(rexComparison);
-    }
-
-    RexNode result =
-        RexUtil.composeDisjunction(rexBuilder, comparisons, true);
-    assert result != null;
-
-    if (isNotIn) {
-      result =
-          rexBuilder.makeCall(
-              SqlStdOperatorTable.NOT,
-              result);
-    }
-
-    return result;
-  }
-
-  /** Ensures that an expression has a given {@link SqlTypeName}, applying a
-   * cast if necessary. If the expression already has the right type family,
-   * returns the expression unchanged. */
-  private RexNode ensureSqlType(RelDataType type, RexNode node) {
-    if (type.getSqlTypeName() == node.getType().getSqlTypeName()
-        || (type.getSqlTypeName() == SqlTypeName.VARCHAR
-            && node.getType().getSqlTypeName() == SqlTypeName.CHAR)) {
-      return node;
-    }
-    return rexBuilder.ensureType(type, node, true);
-  }
-
-  /**
-   * Gets the list size threshold under which {@link #convertInToOr} is used.
-   * Lists of this size or greater will instead be converted to use a join
-   * against an inline table
-   * ({@link org.apache.calcite.rel.logical.LogicalValues}) rather than a
-   * predicate. A threshold of 0 forces usage of an inline table in all cases; a
-   * threshold of Integer.MAX_VALUE forces usage of OR in all cases
-   *
-   * @return threshold, default {@link #DEFAULT_IN_SUB_QUERY_THRESHOLD}
-   */
-  @Deprecated // to be removed before 2.0
-  protected int getInSubqueryThreshold() {
-    return config.getInSubQueryThreshold();
-  }
-
-  /**
-   * Converts an EXISTS or IN predicate into a join. For EXISTS, the sub-query
-   * produces an indicator variable, and the result is a relational expression
-   * which outer joins that indicator to the original query. After performing
-   * the outer join, the condition will be TRUE if the EXISTS condition holds,
-   * NULL otherwise.
-   *
-   * @param seek           A query, for example 'select * from emp' or
-   *                       'values (1,2,3)' or '('Foo', 34)'.
-   * @param subQueryType   Whether sub-query is IN, EXISTS or scalar
-   * @param logic Whether the answer needs to be in full 3-valued logic (TRUE,
-   *     FALSE, UNKNOWN) will be required, or whether we can accept an
-   *     approximation (say representing UNKNOWN as FALSE)
-   * @param notIn Whether the operation is NOT IN
-   * @return join expression
-   */
-  private RelOptUtil.Exists convertExists(
-      SqlNode seek,
-      RelOptUtil.SubQueryType subQueryType,
-      RelOptUtil.Logic logic,
-      boolean notIn,
-      RelDataType targetDataType) {
-    final SqlValidatorScope seekScope =
-        (seek instanceof SqlSelect)
-            ? validator.getSelectScope((SqlSelect) seek)
-            : null;
-    final Blackboard seekBb = createBlackboard(seekScope, null, false);
-    RelNode seekRel = convertQueryOrInList(seekBb, seek, targetDataType);
-
-    return RelOptUtil.createExistsPlan(seekRel, subQueryType, logic, notIn);
-  }
-
-  private RelNode convertQueryOrInList(
-      Blackboard bb,
-      SqlNode seek,
-      RelDataType targetRowType) {
-    // NOTE: Once we start accepting single-row queries as row constructors,
-    // there will be an ambiguity here for a case like X IN ((SELECT Y FROM
-    // Z)).  The SQL standard resolves the ambiguity by saying that a lone
-    // select should be interpreted as a table expression, not a row
-    // expression.  The semantic difference is that a table expression can
-    // return multiple rows.
-    if (seek instanceof SqlNodeList) {
-      return convertRowValues(
-          bb,
-          seek,
-          ((SqlNodeList) seek).getList(),
-          false,
-          targetRowType);
-    } else {
-      return convertQueryRecursive(seek, false, null).project();
-    }
-  }
-
-  private RelNode convertRowValues(
-      Blackboard bb,
-      SqlNode rowList,
-      Collection<SqlNode> rows,
-      boolean allowLiteralsOnly,
-      RelDataType targetRowType) {
-    // NOTE jvs 30-Apr-2006: We combine all rows consisting entirely of
-    // literals into a single LogicalValues; this gives the optimizer a smaller
-    // input tree.  For everything else (computed expressions, row
-    // sub-queries), we union each row in as a projection on top of a
-    // LogicalOneRow.
-
-    final ImmutableList.Builder<ImmutableList<RexLiteral>> tupleList =
-        ImmutableList.builder();
-    final RelDataType rowType;
-    if (targetRowType != null) {
-      rowType = targetRowType;
-    } else {
-      rowType =
-          SqlTypeUtil.promoteToRowType(
-              typeFactory,
-              validator.getValidatedNodeType(rowList),
-              null);
-    }
-
-    final List<RelNode> unionInputs = new ArrayList<>();
-    for (SqlNode node : rows) {
-      SqlBasicCall call;
-      if (isRowConstructor(node)) {
-        call = (SqlBasicCall) node;
-        ImmutableList.Builder<RexLiteral> tuple = ImmutableList.builder();
-        for (Ord<SqlNode> operand : Ord.zip(call.operands)) {
-          RexLiteral rexLiteral =
-              convertLiteralInValuesList(
-                  operand.e,
-                  bb,
-                  rowType,
-                  operand.i);
-          if ((rexLiteral == null) && allowLiteralsOnly) {
-            return null;
-          }
-          if ((rexLiteral == null) || !config.isCreateValuesRel()) {
-            // fallback to convertRowConstructor
-            tuple = null;
-            break;
-          }
-          tuple.add(rexLiteral);
-        }
-        if (tuple != null) {
-          tupleList.add(tuple.build());
-          continue;
-        }
-      } else {
-        RexLiteral rexLiteral =
-            convertLiteralInValuesList(
-                node,
-                bb,
-                rowType,
-                0);
-        if ((rexLiteral != null) && config.isCreateValuesRel()) {
-          tupleList.add(ImmutableList.of(rexLiteral));
-          continue;
-        } else {
-          if ((rexLiteral == null) && allowLiteralsOnly) {
-            return null;
-          }
-        }
-
-        // convert "1" to "row(1)"
-        call =
-            (SqlBasicCall) SqlStdOperatorTable.ROW.createCall(
-                SqlParserPos.ZERO,
-                node);
-      }
-      unionInputs.add(convertRowConstructor(bb, call));
-    }
-    LogicalValues values =
-        LogicalValues.create(cluster, rowType, tupleList.build());
-    RelNode resultRel;
-    if (unionInputs.isEmpty()) {
-      resultRel = values;
-    } else {
-      if (!values.getTuples().isEmpty()) {
-        unionInputs.add(values);
-      }
-      resultRel = LogicalUnion.create(unionInputs, true);
-    }
-    leaves.add(resultRel);
-    return resultRel;
-  }
-
-  private RexLiteral convertLiteralInValuesList(
-      SqlNode sqlNode,
-      Blackboard bb,
-      RelDataType rowType,
-      int iField) {
-    if (!(sqlNode instanceof SqlLiteral)) {
-      return null;
-    }
-    RelDataTypeField field = rowType.getFieldList().get(iField);
-    RelDataType type = field.getType();
-    if (type.isStruct()) {
-      // null literals for weird stuff like UDT's need
-      // special handling during type flattening, so
-      // don't use LogicalValues for those
-      return null;
-    }
-
-    RexNode literalExpr =
-        exprConverter.convertLiteral(
-            bb,
-            (SqlLiteral) sqlNode);
-
-    if (!(literalExpr instanceof RexLiteral)) {
-      assert literalExpr.isA(SqlKind.CAST);
-      RexNode child = ((RexCall) literalExpr).getOperands().get(0);
-      assert RexLiteral.isNullLiteral(child);
-
-      // NOTE jvs 22-Nov-2006:  we preserve type info
-      // in LogicalValues digest, so it's OK to lose it here
-      return (RexLiteral) child;
-    }
-
-    RexLiteral literal = (RexLiteral) literalExpr;
-
-    Comparable value = literal.getValue();
-
-    if (SqlTypeUtil.isExactNumeric(type) && SqlTypeUtil.hasScale(type)) {
-      BigDecimal roundedValue =
-          NumberUtil.rescaleBigDecimal(
-              (BigDecimal) value,
-              type.getScale());
-      return rexBuilder.makeExactLiteral(
-          roundedValue,
-          type);
-    }
-
-    if ((value instanceof NlsString)
-        && (type.getSqlTypeName() == SqlTypeName.CHAR)) {
-      // pad fixed character type
-      NlsString unpadded = (NlsString) value;
-      return rexBuilder.makeCharLiteral(
-          new NlsString(
-              Spaces.padRight(unpadded.getValue(), type.getPrecision()),
-              unpadded.getCharsetName(),
-              unpadded.getCollation()));
-    }
-    return literal;
-  }
-
-  private boolean isRowConstructor(SqlNode node) {
-    if (!(node.getKind() == SqlKind.ROW)) {
-      return false;
-    }
-    SqlCall call = (SqlCall) node;
-    return call.getOperator().getName().equalsIgnoreCase("row");
-  }
-
-  /**
-   * Builds a list of all <code>IN</code> or <code>EXISTS</code> operators
-   * inside SQL parse tree. Does not traverse inside queries.
-   *
-   * @param bb                           blackboard
-   * @param node                         the SQL parse tree
-   * @param logic Whether the answer needs to be in full 3-valued logic (TRUE,
-   *              FALSE, UNKNOWN) will be required, or whether we can accept
-   *              an approximation (say representing UNKNOWN as FALSE)
-   * @param registerOnlyScalarSubQueries if set to true and the parse tree
-   *                                     corresponds to a variation of a select
-   *                                     node, only register it if it's a scalar
-   *                                     sub-query
-   */
-  private void findSubQueries(
-      Blackboard bb,
-      SqlNode node,
-      RelOptUtil.Logic logic,
-      boolean registerOnlyScalarSubQueries) {
-    final SqlKind kind = node.getKind();
-    switch (kind) {
-    case EXISTS:
-    case SELECT:
-    case MULTISET_QUERY_CONSTRUCTOR:
-    case MULTISET_VALUE_CONSTRUCTOR:
-    case ARRAY_QUERY_CONSTRUCTOR:
-    case CURSOR:
-    case SCALAR_QUERY:
-      if (!registerOnlyScalarSubQueries
-          || (kind == SqlKind.SCALAR_QUERY)) {
-        bb.registerSubQuery(node, RelOptUtil.Logic.TRUE_FALSE);
-      }
-      return;
-    case IN:
-      if (((SqlCall) node).getOperator() == SqlStdOperatorTable.NOT_IN) {
-        logic = logic.negate();
-      }
-      break;
-    case NOT:
-      logic = logic.negate();
-      break;
-    }
-    if (node instanceof SqlCall) {
-      for (SqlNode operand : ((SqlCall) node).getOperandList()) {
-        if (operand != null) {
-          // In the case of an IN expression, locate scalar
-          // sub-queries so we can convert them to constants
-          findSubQueries(
-              bb,
-              operand,
-              logic,
-              kind == SqlKind.IN || registerOnlyScalarSubQueries);
-        }
-      }
-    } else if (node instanceof SqlNodeList) {
-      for (SqlNode child : (SqlNodeList) node) {
-        findSubQueries(
-            bb,
-            child,
-            logic,
-            kind == SqlKind.IN || registerOnlyScalarSubQueries);
-      }
-    }
-
-    // Now that we've located any scalar sub-queries inside the IN
-    // expression, register the IN expression itself.  We need to
-    // register the scalar sub-queries first so they can be converted
-    // before the IN expression is converted.
-    if (kind == SqlKind.IN) {
-      switch (logic) {
-      case TRUE_FALSE_UNKNOWN:
-        if (validator.getValidatedNodeType(node).isNullable()) {
-          break;
-        } else if (true) {
-          break;
-        }
-        // fall through
-      case UNKNOWN_AS_FALSE:
-        logic = RelOptUtil.Logic.TRUE;
-      }
-      bb.registerSubQuery(node, logic);
-    }
-  }
-
-  /**
-   * Converts an expression from {@link SqlNode} to {@link RexNode} format.
-   *
-   * @param node Expression to translate
-   * @return Converted expression
-   */
-  public RexNode convertExpression(
-      SqlNode node) {
-    Map<String, RelDataType> nameToTypeMap = Collections.emptyMap();
-    final ParameterScope scope =
-        new ParameterScope((SqlValidatorImpl) validator, nameToTypeMap);
-    final Blackboard bb = createBlackboard(scope, null, false);
-    return bb.convertExpression(node);
-  }
-
-  /**
-   * Converts an expression from {@link SqlNode} to {@link RexNode} format,
-   * mapping identifier references to predefined expressions.
-   *
-   * @param node          Expression to translate
-   * @param nameToNodeMap map from String to {@link RexNode}; when an
-   *                      {@link SqlIdentifier} is encountered, it is used as a
-   *                      key and translated to the corresponding value from
-   *                      this map
-   * @return Converted expression
-   */
-  public RexNode convertExpression(
-      SqlNode node,
-      Map<String, RexNode> nameToNodeMap) {
-    final Map<String, RelDataType> nameToTypeMap = new HashMap<>();
-    for (Map.Entry<String, RexNode> entry : nameToNodeMap.entrySet()) {
-      nameToTypeMap.put(entry.getKey(), entry.getValue().getType());
-    }
-    final ParameterScope scope =
-        new ParameterScope((SqlValidatorImpl) validator, nameToTypeMap);
-    final Blackboard bb = createBlackboard(scope, nameToNodeMap, false);
-    return bb.convertExpression(node);
-  }
-
-  /**
-   * Converts a non-standard expression.
-   *
-   * <p>This method is an extension-point that derived classes can override. If
-   * this method returns a null result, the normal expression translation
-   * process will proceed. The default implementation always returns null.
-   *
-   * @param node Expression
-   * @param bb   Blackboard
-   * @return null to proceed with the usual expression translation process
-   */
-  protected RexNode convertExtendedExpression(
-      SqlNode node,
-      Blackboard bb) {
-    return null;
-  }
-
-  private RexNode convertOver(Blackboard bb, SqlNode node) {
-    SqlCall call = (SqlCall) node;
-    SqlCall aggCall = call.operand(0);
-    SqlNode windowOrRef = call.operand(1);
-    final SqlWindow window =
-        validator.resolveWindow(windowOrRef, bb.scope, true);
-
-    // ROW_NUMBER() expects specific kind of framing.
-    if (aggCall.getKind() == SqlKind.ROW_NUMBER) {
-      window.setLowerBound(SqlWindow.createUnboundedPreceding(SqlParserPos.ZERO));
-      window.setUpperBound(SqlWindow.createCurrentRow(SqlParserPos.ZERO));
-      window.setRows(SqlLiteral.createBoolean(true, SqlParserPos.ZERO));
-    }
-    final SqlNodeList partitionList = window.getPartitionList();
-    final ImmutableList.Builder<RexNode> partitionKeys =
-        ImmutableList.builder();
-    for (SqlNode partition : partitionList) {
-      partitionKeys.add(bb.convertExpression(partition));
-    }
-    RexNode lowerBound = bb.convertExpression(window.getLowerBound());
-    RexNode upperBound = bb.convertExpression(window.getUpperBound());
-    SqlNodeList orderList = window.getOrderList();
-    if ((orderList.size() == 0) && !window.isRows()) {
-      // A logical range requires an ORDER BY clause. Use the implicit
-      // ordering of this relation. There must be one, otherwise it would
-      // have failed validation.
-      orderList = bb.scope.getOrderList();
-      if (orderList == null) {
-        throw new AssertionError(
-            "Relation should have sort key for implicit ORDER BY");
-      }
-    }
-    final ImmutableList.Builder<RexFieldCollation> orderKeys =
-        ImmutableList.builder();
-    final Set<SqlKind> flags = EnumSet.noneOf(SqlKind.class);
-    for (SqlNode order : orderList) {
-      flags.clear();
-      RexNode e = bb.convertSortExpression(order, flags);
-      orderKeys.add(new RexFieldCollation(e, flags));
-    }
-    try {
-      Preconditions.checkArgument(bb.window == null,
-          "already in window agg mode");
-      bb.window = window;
-      RexNode rexAgg = exprConverter.convertCall(bb, aggCall);
-      rexAgg =
-          rexBuilder.ensureType(
-              validator.getValidatedNodeType(call), rexAgg, false);
-
-      // Walk over the tree and apply 'over' to all agg functions. This is
-      // necessary because the returned expression is not necessarily a call
-      // to an agg function. For example, AVG(x) becomes SUM(x) / COUNT(x).
-
-      boolean isDistinct = false;
-      if (aggCall.getFunctionQuantifier() != null
-        && aggCall.getFunctionQuantifier().getValue().equals(SqlSelectKeyword.DISTINCT)) {
-        isDistinct = true;
-      }
-
-      final RexShuttle visitor =
-          new HistogramShuttle(
-              partitionKeys.build(), orderKeys.build(),
-              RexWindowBound.create(window.getLowerBound(), lowerBound),
-              RexWindowBound.create(window.getUpperBound(), upperBound),
-              window,
-              isDistinct);
-      RexNode overNode = rexAgg.accept(visitor);
-
-      return overNode;
-    } finally {
-      bb.window = null;
-    }
-  }
-
-  /**
-   * Converts a FROM clause into a relational expression.
-   *
-   * @param bb   Scope within which to resolve identifiers
-   * @param from FROM clause of a query. Examples include:
-   *
-   *             <ul>
-   *             <li>a single table ("SALES.EMP"),
-   *             <li>an aliased table ("EMP AS E"),
-   *             <li>a list of tables ("EMP, DEPT"),
-   *             <li>an ANSI Join expression ("EMP JOIN DEPT ON EMP.DEPTNO =
-   *             DEPT.DEPTNO"),
-   *             <li>a VALUES clause ("VALUES ('Fred', 20)"),
-   *             <li>a query ("(SELECT * FROM EMP WHERE GENDER = 'F')"),
-   *             <li>or any combination of the above.
-   *             </ul>
-   */
-  protected void convertFrom(
-      Blackboard bb,
-      SqlNode from) {
-    if (from == null) {
-      bb.setRoot(LogicalValues.createOneRow(cluster), false);
-      return;
-    }
-
-    final SqlCall call;
-    final SqlNode[] operands;
-    switch (from.getKind()) {
-    case MATCH_RECOGNIZE:
-      convertMatchRecognize(bb, (SqlCall) from);
-      return;
-
-    case AS:
-      convertFrom(bb, ((SqlCall) from).operand(0));
-      return;
-
-    case WITH_ITEM:
-      convertFrom(bb, ((SqlWithItem) from).query);
-      return;
-
-    case WITH:
-      convertFrom(bb, ((SqlWith) from).body);
-      return;
-
-    case TABLESAMPLE:
-      operands = ((SqlBasicCall) from).getOperands();
-      SqlSampleSpec sampleSpec = SqlLiteral.sampleValue(operands[1]);
-      if (sampleSpec instanceof SqlSampleSpec.SqlSubstitutionSampleSpec) {
-        String sampleName =
-            ((SqlSampleSpec.SqlSubstitutionSampleSpec) sampleSpec)
-                .getName();
-        datasetStack.push(sampleName);
-        convertFrom(bb, operands[0]);
-        datasetStack.pop();
-      } else if (sampleSpec instanceof SqlSampleSpec.SqlTableSampleSpec) {
-        SqlSampleSpec.SqlTableSampleSpec tableSampleSpec =
-            (SqlSampleSpec.SqlTableSampleSpec) sampleSpec;
-        convertFrom(bb, operands[0]);
-        RelOptSamplingParameters params =
-            new RelOptSamplingParameters(
-                tableSampleSpec.isBernoulli(),
-                tableSampleSpec.getSamplePercentage(),
-                tableSampleSpec.isRepeatable(),
-                tableSampleSpec.getRepeatableSeed());
-        bb.setRoot(new Sample(cluster, bb.root, params), false);
-      } else {
-        throw new AssertionError("unknown TABLESAMPLE type: " + sampleSpec);
-      }
-      return;
-
-    case IDENTIFIER:
-      convertIdentifier(bb, (SqlIdentifier) from, null);
-      return;
-
-    case EXTEND:
-      call = (SqlCall) from;
-      SqlIdentifier id = (SqlIdentifier) call.getOperandList().get(0);
-      SqlNodeList extendedColumns = (SqlNodeList) call.getOperandList().get(1);
-      convertIdentifier(bb, id, extendedColumns);
-      return;
-
-    case JOIN:
-      final SqlJoin join = (SqlJoin) from;
-      final SqlValidatorScope scope = validator.getJoinScope(from);
-      final Blackboard fromBlackboard = createBlackboard(scope, null, false);
-      SqlNode left = join.getLeft();
-      SqlNode right = join.getRight();
-      final boolean isNatural = join.isNatural();
-      final JoinType joinType = join.getJoinType();
-      final SqlValidatorScope leftScope =
-          Util.first(validator.getJoinScope(left),
-              ((DelegatingScope) bb.scope).getParent());
-      final Blackboard leftBlackboard =
-          createBlackboard(leftScope, null, false);
-      final SqlValidatorScope rightScope =
-          Util.first(validator.getJoinScope(right),
-              ((DelegatingScope) bb.scope).getParent());
-      final Blackboard rightBlackboard =
-          createBlackboard(rightScope, null, false);
-      convertFrom(leftBlackboard, left);
-      RelNode leftRel = leftBlackboard.root;
-      convertFrom(rightBlackboard, right);
-      RelNode rightRel = rightBlackboard.root;
-      JoinRelType convertedJoinType = convertJoinType(joinType);
-      RexNode conditionExp;
-      final SqlValidatorNamespace leftNamespace = validator.getNamespace(left);
-      final SqlValidatorNamespace rightNamespace = validator.getNamespace(right);
-      if (isNatural) {
-        final RelDataType leftRowType = leftNamespace.getRowType();
-        final RelDataType rightRowType = rightNamespace.getRowType();
-        final List<String> columnList =
-            SqlValidatorUtil.deriveNaturalJoinColumnList(leftRowType,
-                rightRowType);
-        conditionExp = convertUsing(leftNamespace, rightNamespace,
-            columnList);
-      } else {
-        conditionExp =
-            convertJoinCondition(
-                fromBlackboard,
-                leftNamespace,
-                rightNamespace,
-                join.getCondition(),
-                join.getConditionType(),
-                leftRel,
-                rightRel);
-      }
-
-      final RelNode joinRel =
-          createJoin(
-              fromBlackboard,
-              leftRel,
-              rightRel,
-              conditionExp,
-              convertedJoinType);
-      bb.setRoot(joinRel, false);
-      return;
-
-    case SELECT:
-    case INTERSECT:
-    case EXCEPT:
-    case UNION:
-      final RelNode rel = convertQueryRecursive(from, false, null).project();
-      bb.setRoot(rel, true);
-      return;
-
-    case VALUES:
-      convertValuesImpl(bb, (SqlCall) from, null);
-      return;
-
-    case UNNEST:
-      call = (SqlCall) from;
-      final List<SqlNode> nodes = call.getOperandList();
-      final SqlUnnestOperator operator = (SqlUnnestOperator) call.getOperator();
-      for (SqlNode node : nodes) {
-        replaceSubQueries(bb, node, RelOptUtil.Logic.TRUE_FALSE_UNKNOWN);
-      }
-      final List<RexNode> exprs = new ArrayList<>();
-      final List<String> fieldNames = new ArrayList<>();
-      for (Ord<SqlNode> node : Ord.zip(nodes)) {
-        exprs.add(bb.convertExpression(node.e));
-        fieldNames.add(validator.deriveAlias(node.e, node.i));
-      }
-      final RelNode input =
-          RelOptUtil.createProject(
-              (null != bb.root) ? bb.root : LogicalValues.createOneRow(cluster),
-              exprs, fieldNames, true);
-
-      Uncollect uncollect =
-          new Uncollect(cluster, cluster.traitSetOf(Convention.NONE),
-              input, operator.withOrdinality);
-      bb.setRoot(uncollect, true);
-      return;
-
-    case COLLECTION_TABLE:
-      call = (SqlCall) from;
-
-      // Dig out real call; TABLE() wrapper is just syntactic.
-      assert call.getOperandList().size() == 1;
-      final SqlCall call2 = call.operand(0);
-      convertCollectionTable(bb, call2);
-      return;
-
-    default:
-      throw new AssertionError("not a join operator " + from);
-    }
-  }
-
-  protected void convertMatchRecognize(Blackboard bb, SqlCall call) {
-    final SqlMatchRecognize matchRecognize = (SqlMatchRecognize) call;
-    final SqlValidatorNamespace ns = validator.getNamespace(matchRecognize);
-    final SqlValidatorScope scope = validator.getMatchRecognizeScope(matchRecognize);
-
-    final Blackboard matchBb = createBlackboard(scope, null, false);
-    final RelDataType rowType = ns.getRowType();
-    // convert inner query, could be a table name or a derived table
-    SqlNode expr = matchRecognize.getTableRef();
-    convertFrom(matchBb, expr);
-    final RelNode input = matchBb.root;
-
-    // PARTITION BY
-    final SqlNodeList partitionList = matchRecognize.getPartitionList();
-    final List<RexNode> partitionKeys = new ArrayList<>();
-    for (SqlNode partition : partitionList) {
-      RexNode e = matchBb.convertExpression(partition);
-      partitionKeys.add(e);
-    }
-
-    // ORDER BY
-    final SqlNodeList orderList = matchRecognize.getOrderList();
-    final List<RelFieldCollation> orderKeys = new ArrayList<>();
-    for (SqlNode order : orderList) {
-      final RelFieldCollation.Direction direction;
-      switch (order.getKind()) {
-      case DESCENDING:
-        direction = RelFieldCollation.Direction.DESCENDING;
-        order = ((SqlCall) order).operand(0);
-        break;
-      case NULLS_FIRST:
-      case NULLS_LAST:
-        throw new AssertionError();
-      default:
-        direction = RelFieldCollation.Direction.ASCENDING;
-        break;
-      }
-      final RelFieldCollation.NullDirection nullDirection =
-          validator.getDefaultNullCollation().last(desc(direction))
-              ? RelFieldCollation.NullDirection.LAST
-              : RelFieldCollation.NullDirection.FIRST;
-      RexNode e = matchBb.convertExpression(order);
-      orderKeys.add(
-          new RelFieldCollation(((RexInputRef) e).getIndex(), direction,
-              nullDirection));
-    }
-    final RelCollation orders = cluster.traitSet().canonize(RelCollations.of(orderKeys));
-
-    // convert pattern
-    final Set<String> patternVarsSet = new HashSet<>();
-    SqlNode pattern = matchRecognize.getPattern();
-    final SqlBasicVisitor<RexNode> patternVarVisitor =
-      new SqlBasicVisitor<RexNode>() {
-        @Override public RexNode visit(SqlCall call) {
-          List<SqlNode> operands = call.getOperandList();
-          List<RexNode> newOperands = Lists.newArrayList();
-          for (SqlNode node : operands) {
-            newOperands.add(node.accept(this));
-          }
-          return rexBuilder.makeCall(
-            validator.getUnknownType(), call.getOperator(), newOperands);
-        }
-
-        @Override public RexNode visit(SqlIdentifier id) {
-          assert id.isSimple();
-          patternVarsSet.add(id.getSimple());
-          return rexBuilder.makeLiteral(id.getSimple());
-        }
-
-        @Override public RexNode visit(SqlLiteral literal) {
-          if (literal instanceof SqlNumericLiteral) {
-            return rexBuilder.makeExactLiteral(BigDecimal.valueOf(literal.intValue(true)));
-          } else {
-            return rexBuilder.makeLiteral(literal.booleanValue());
-          }
-        }
-      };
-    final RexNode patternNode = pattern.accept(patternVarVisitor);
-
-    // convert subset
-    final SqlNodeList subsets = matchRecognize.getSubsetList();
-    final Map<String, TreeSet<String>> subsetMap = Maps.newHashMap();
-    for (SqlNode node : subsets) {
-      List<SqlNode> operands = ((SqlCall) node).getOperandList();
-      SqlIdentifier left = (SqlIdentifier) operands.get(0);
-      patternVarsSet.add(left.getSimple());
-      SqlNodeList rights = (SqlNodeList) operands.get(1);
-      final TreeSet<String> list = new TreeSet<String>();
-      for (SqlNode right : rights) {
-        assert right instanceof SqlIdentifier;
-        list.add(((SqlIdentifier) right).getSimple());
-      }
-      subsetMap.put(left.getSimple(), list);
-    }
-
-    SqlNode afterMatch = matchRecognize.getAfter();
-    if (afterMatch == null) {
-      afterMatch =
-          SqlMatchRecognize.AfterOption.SKIP_TO_NEXT_ROW.symbol(SqlParserPos.ZERO);
-    }
-
-    final RexNode after;
-    if (afterMatch instanceof SqlCall) {
-      List<SqlNode> operands = ((SqlCall) afterMatch).getOperandList();
-      SqlOperator operator = ((SqlCall) afterMatch).getOperator();
-      assert operands.size() == 1;
-      SqlIdentifier id = (SqlIdentifier) operands.get(0);
-      assert patternVarsSet.contains(id.getSimple())
-          : id.getSimple() + " not defined in pattern";
-      RexNode rex = rexBuilder.makeLiteral(id.getSimple());
-      after =
-          rexBuilder.makeCall(validator.getUnknownType(), operator,
-              ImmutableList.of(rex));
-    } else {
-      after = matchBb.convertExpression(afterMatch);
-    }
-
-    matchBb.setPatternVarRef(true);
-
-    // convert measures
-    final ImmutableMap.Builder<String, RexNode> measureNodes =
-        ImmutableMap.builder();
-    for (SqlNode measure : matchRecognize.getMeasureList()) {
-      List<SqlNode> operands = ((SqlCall) measure).getOperandList();
-      String alias = ((SqlIdentifier) operands.get(1)).getSimple();
-      RexNode rex = matchBb.convertExpression(operands.get(0));
-      measureNodes.put(alias, rex);
-    }
-
-    // convert definitions
-    final ImmutableMap.Builder<String, RexNode> definitionNodes =
-        ImmutableMap.builder();
-    for (SqlNode def : matchRecognize.getPatternDefList()) {
-      List<SqlNode> operands = ((SqlCall) def).getOperandList();
-      String alias = ((SqlIdentifier) operands.get(1)).getSimple();
-      RexNode rex = matchBb.convertExpression(operands.get(0));
-      definitionNodes.put(alias, rex);
-    }
-
-    final SqlLiteral rowsPerMatch = matchRecognize.getRowsPerMatch();
-    final boolean allRows = rowsPerMatch != null
-        && rowsPerMatch.getValue() == SqlMatchRecognize.RowsPerMatchOption.ALL_ROWS;
-
-    matchBb.setPatternVarRef(false);
-
-    final RelFactories.MatchFactory factory =
-        RelFactories.DEFAULT_MATCH_FACTORY;
-    final RelNode rel =
-        factory.createMatchRecognize(input, patternNode,
-            matchRecognize.getStrictStart().booleanValue(),
-            matchRecognize.getStrictEnd().booleanValue(),
-            definitionNodes.build(), measureNodes.build(), after,
-            subsetMap, allRows, partitionKeys, orders, rowType);
-    bb.setRoot(rel, false);
-  }
-
-  private void convertIdentifier(Blackboard bb, SqlIdentifier id,
-      SqlNodeList extendedColumns) {
-    final SqlValidatorNamespace fromNamespace =
-        validator.getNamespace(id).resolve();
-    if (fromNamespace.getNode() != null) {
-      convertFrom(bb, fromNamespace.getNode());
-      return;
-    }
-    final String datasetName =
-        datasetStack.isEmpty() ? null : datasetStack.peek();
-    final boolean[] usedDataset = {false};
-    RelOptTable table =
-        SqlValidatorUtil.getRelOptTable(fromNamespace, catalogReader,
-            datasetName, usedDataset);
-    if (extendedColumns != null && extendedColumns.size() > 0) {
-      assert table != null;
-      final SqlValidatorTable validatorTable =
-          table.unwrap(SqlValidatorTable.class);
-      final List<RelDataTypeField> extendedFields =
-          SqlValidatorUtil.getExtendedColumns(validator.getTypeFactory(), validatorTable,
-              extendedColumns);
-      table = table.extend(extendedFields);
-    }
-    final RelNode tableRel;
-    if (config.isConvertTableAccess()) {
-      tableRel = toRel(table);
-    } else {
-      tableRel = LogicalTableScan.create(cluster, table);
-    }
-    bb.setRoot(tableRel, true);
-    if (usedDataset[0]) {
-      bb.setDataset(datasetName);
-    }
-  }
-
-  protected void convertCollectionTable(
-      Blackboard bb,
-      SqlCall call) {
-    final SqlOperator operator = call.getOperator();
-    if (operator == SqlStdOperatorTable.TABLESAMPLE) {
-      final String sampleName = (String) SqlLiteral.value(call.operand(0));
-      datasetStack.push(sampleName);
-      SqlCall cursorCall = call.operand(1);
-      SqlNode query = cursorCall.operand(0);
-      RelNode converted = convertQuery(query, false, false).rel;
-      bb.setRoot(converted, false);
-      datasetStack.pop();
-      return;
-    }
-    replaceSubQueries(bb, call, RelOptUtil.Logic.TRUE_FALSE_UNKNOWN);
-
-    // Expand table macro if possible. It's more efficient than
-    // LogicalTableFunctionScan.
-    final SqlCallBinding callBinding =
-        new SqlCallBinding(bb.scope.getValidator(), bb.scope, call);
-    if (operator instanceof SqlUserDefinedTableMacro) {
-      final SqlUserDefinedTableMacro udf =
-          (SqlUserDefinedTableMacro) operator;
-      final TranslatableTable table =
-          udf.getTable(typeFactory, callBinding.operands());
-      final RelDataType rowType = table.getRowType(typeFactory);
-      RelOptTable relOptTable = RelOptTableImpl.create(null, rowType, table,
-          udf.getNameAsId().names);
-      RelNode converted = toRel(relOptTable);
-      bb.setRoot(converted, true);
-      return;
-    }
-
-    Type elementType;
-    if (operator instanceof SqlUserDefinedTableFunction) {
-      SqlUserDefinedTableFunction udtf = (SqlUserDefinedTableFunction) operator;
-      elementType = udtf.getElementType(typeFactory, callBinding.operands());
-    } else {
-      elementType = null;
-    }
-
-    RexNode rexCall = bb.convertExpression(call);
-    final List<RelNode> inputs = bb.retrieveCursors();
-    Set<RelColumnMapping> columnMappings =
-        getColumnMappings(operator);
-    LogicalTableFunctionScan callRel =
-        LogicalTableFunctionScan.create(
-            cluster,
-            inputs,
-            rexCall,
-            elementType,
-            validator.getValidatedNodeType(call),
-            columnMappings);
-    bb.setRoot(callRel, true);
-    afterTableFunction(bb, call, callRel);
-  }
-
-  protected void afterTableFunction(
-      SqlToRelConverter.Blackboard bb,
-      SqlCall call,
-      LogicalTableFunctionScan callRel) {
-  }
-
-  private Set<RelColumnMapping> getColumnMappings(SqlOperator op) {
-    SqlReturnTypeInference rti = op.getReturnTypeInference();
-    if (rti == null) {
-      return null;
-    }
-    if (rti instanceof TableFunctionReturnTypeInference) {
-      TableFunctionReturnTypeInference tfrti =
-          (TableFunctionReturnTypeInference) rti;
-      return tfrti.getColumnMappings();
-    } else {
-      return null;
-    }
-  }
-
-  protected RelNode createJoin(
-      Blackboard bb,
-      RelNode leftRel,
-      RelNode rightRel,
-      RexNode joinCond,
-      JoinRelType joinType) {
-    assert joinCond != null;
-
-    final CorrelationUse p = getCorrelationUse(bb, rightRel);
-    if (p != null) {
-      LogicalCorrelate corr = LogicalCorrelate.create(leftRel, p.r,
-          p.id, p.requiredColumns, SemiJoinType.of(joinType));
-      if (!joinCond.isAlwaysTrue()) {
-        final RelFactories.FilterFactory factory =
-            RelFactories.DEFAULT_FILTER_FACTORY;
-        return factory.createFilter(corr, joinCond);
-      }
-      return corr;
-    }
-
-    // OVERRIDE POINT
-    if (containOnlyCast(joinCond)) {
-      joinCond = convertCastCondition(joinCond);
-    }
-
-    final Join originalJoin =
-        (Join) RelFactories.DEFAULT_JOIN_FACTORY.createJoin(leftRel, rightRel,
-            joinCond, ImmutableSet.<CorrelationId>of(), joinType, false);
-
-    return RelOptUtil.pushDownJoinConditions(originalJoin);
-  }
-
-  // OVERRIDE POINT
-  private boolean containOnlyCast(RexNode node) {
-    boolean result = true;
-    switch (node.getKind()) {
-      case AND:
-      case EQUALS:
-        final RexCall call = (RexCall) node;
-        List<RexNode> operands = Lists.newArrayList(call.getOperands());
-        for (int i = 0; i < operands.size(); i++) {
-          RexNode operand = operands.get(i);
-          result &= containOnlyCast(operand);
-        }
-        break;
-      case OR:
-      case INPUT_REF:
-      case LITERAL:
-      case CAST:
-        return true;
-      default:
-        return false;
-    }
-    return result;
-  }
-
-  // OVERRIDE POINT
-  private static RexNode convertCastCondition(RexNode node) {
-    switch (node.getKind()) {
-      case IS_NULL:
-      case IS_NOT_NULL:
-      case OR:
-      case AND:
-      case EQUALS:
-        RexCall call = (RexCall) node;
-        List<RexNode> list = Lists.newArrayList();
-        List<RexNode> operands = Lists.newArrayList(call.getOperands());
-        for (int i = 0; i < operands.size(); i++) {
-          RexNode operand = operands.get(i);
-          final RexNode e =
-                  convertCastCondition(
-                          operand);
-          list.add(e);
-        }
-        if (!list.equals(call.getOperands())) {
-          return call.clone(call.getType(), list);
-        }
-        return call;
-      case CAST:
-        call = (RexCall) node;
-        operands = Lists.newArrayList(call.getOperands());
-        return operands.get(0);
-      default:
-        return node;
-    }
-  }
-
-  private CorrelationUse getCorrelationUse(Blackboard bb, final RelNode r0) {
-    final Set<CorrelationId> correlatedVariables =
-        RelOptUtil.getVariablesUsed(r0);
-    if (correlatedVariables.isEmpty()) {
-      return null;
-    }
-    final ImmutableBitSet.Builder requiredColumns = ImmutableBitSet.builder();
-    final List<CorrelationId> correlNames = Lists.newArrayList();
-
-    // All correlations must refer the same namespace since correlation
-    // produces exactly one correlation source.
-    // The same source might be referenced by different variables since
-    // DeferredLookups are not de-duplicated at create time.
-    SqlValidatorNamespace prevNs = null;
-
-    for (CorrelationId correlName : correlatedVariables) {
-      DeferredLookup lookup =
-          mapCorrelToDeferred.get(correlName);
-      RexFieldAccess fieldAccess = lookup.getFieldAccess(correlName);
-      String originalRelName = lookup.getOriginalRelName();
-      String originalFieldName = fieldAccess.getField().getName();
-
-      final SqlNameMatcher nameMatcher =
-          lookup.bb.scope.getValidator().getCatalogReader().nameMatcher();
-      final SqlValidatorScope.ResolvedImpl resolved =
-          new SqlValidatorScope.ResolvedImpl();
-      lookup.bb.scope.resolve(ImmutableList.of(originalRelName),
-          nameMatcher, false, resolved);
-      assert resolved.count() == 1;
-      final SqlValidatorScope.Resolve resolve = resolved.only();
-      final SqlValidatorNamespace foundNs = resolve.namespace;
-      final RelDataType rowType = resolve.rowType();
-      final int childNamespaceIndex = resolve.path.steps().get(0).i;
-      final SqlValidatorScope ancestorScope = resolve.scope;
-      boolean correlInCurrentScope = ancestorScope == bb.scope;
-
-      if (!correlInCurrentScope) {
-        continue;
-      }
-
-      if (prevNs == null) {
-        prevNs = foundNs;
-      } else {
-        assert prevNs == foundNs : "All correlation variables should resolve"
-            + " to the same namespace."
-            + " Prev ns=" + prevNs
-            + ", new ns=" + foundNs;
-      }
-
-      int namespaceOffset = 0;
-      if (childNamespaceIndex > 0) {
-        // If not the first child, need to figure out the width
-        // of output types from all the preceding namespaces
-        assert ancestorScope instanceof ListScope;
-        List<SqlValidatorNamespace> children =
-            ((ListScope) ancestorScope).getChildren();
-
-        for (int i = 0; i < childNamespaceIndex; i++) {
-          SqlValidatorNamespace child = children.get(i);
-          namespaceOffset +=
-              child.getRowType().getFieldCount();
-        }
-      }
-
-      RexFieldAccess topLevelFieldAccess = fieldAccess;
-      while (topLevelFieldAccess.getReferenceExpr() instanceof RexFieldAccess) {
-        topLevelFieldAccess = (RexFieldAccess) topLevelFieldAccess.getReferenceExpr();
-      }
-      final RelDataTypeField field = rowType.getFieldList()
-          .get(topLevelFieldAccess.getField().getIndex() - namespaceOffset);
-      int pos = namespaceOffset + field.getIndex();
-
-      assert field.getType()
-          == topLevelFieldAccess.getField().getType();
-
-      assert pos != -1;
-
-      if (bb.mapRootRelToFieldProjection.containsKey(bb.root)) {
-        // bb.root is an aggregate and only projects group by
-        // keys.
-        Map<Integer, Integer> exprProjection =
-            bb.mapRootRelToFieldProjection.get(bb.root);
-
-        // sub-query can reference group by keys projected from
-        // the root of the outer relation.
-        if (exprProjection.containsKey(pos)) {
-          pos = exprProjection.get(pos);
-        } else {
-          // correl not grouped
-          throw new AssertionError("Identifier '" + originalRelName + "."
-              + originalFieldName + "' is not a group expr");
-        }
-      }
-
-      requiredColumns.set(pos);
-      correlNames.add(correlName);
-    }
-
-    if (correlNames.isEmpty()) {
-      // None of the correlating variables originated in this scope.
-      return null;
-    }
-
-    RelNode r = r0;
-    if (correlNames.size() > 1) {
-      // The same table was referenced more than once.
-      // So we deduplicate.
-      r = DeduplicateCorrelateVariables.go(rexBuilder, correlNames.get(0),
-          Util.skip(correlNames), r0);
-      // Add new node to leaves.
-      leaves.add(r);
-    }
-    return new CorrelationUse(correlNames.get(0), requiredColumns.build(), r);
-  }
-
-  /**
-   * Determines whether a sub-query is non-correlated. Note that a
-   * non-correlated sub-query can contain correlated references, provided those
-   * references do not reference select statements that are parents of the
-   * sub-query.
-   *
-   * @param subq the sub-query
-   * @param bb   blackboard used while converting the sub-query, i.e., the
-   *             blackboard of the parent query of this sub-query
-   * @return true if the sub-query is non-correlated
-   */
-  private boolean isSubQueryNonCorrelated(RelNode subq, Blackboard bb) {
-    Set<CorrelationId> correlatedVariables = RelOptUtil.getVariablesUsed(subq);
-    for (CorrelationId correlName : correlatedVariables) {
-      DeferredLookup lookup = mapCorrelToDeferred.get(correlName);
-      String originalRelName = lookup.getOriginalRelName();
-
-      final SqlNameMatcher nameMatcher =
-          lookup.bb.scope.getValidator().getCatalogReader().nameMatcher();
-      final SqlValidatorScope.ResolvedImpl resolved =
-          new SqlValidatorScope.ResolvedImpl();
-      lookup.bb.scope.resolve(ImmutableList.of(originalRelName), nameMatcher,
-          false, resolved);
-
-      SqlValidatorScope ancestorScope = resolved.only().scope;
-
-      // If the correlated reference is in a scope that's "above" the
-      // sub-query, then this is a correlated sub-query.
-      SqlValidatorScope parentScope = bb.scope;
-      do {
-        if (ancestorScope == parentScope) {
-          return false;
-        }
-        if (parentScope instanceof DelegatingScope) {
-          parentScope = ((DelegatingScope) parentScope).getParent();
-        } else {
-          break;
-        }
-      } while (parentScope != null);
-    }
-    return true;
-  }
-
-  /**
-   * Returns a list of fields to be prefixed to each relational expression.
-   *
-   * @return List of system fields
-   */
-  protected List<RelDataTypeField> getSystemFields() {
-    return Collections.emptyList();
-  }
-
-  private RexNode convertJoinCondition(Blackboard bb,
-                                       SqlValidatorNamespace leftNamespace,
-                                       SqlValidatorNamespace rightNamespace,
-                                       SqlNode condition,
-                                       JoinConditionType conditionType,
-                                       RelNode leftRel,
-                                       RelNode rightRel) {
-    if (condition == null) {
-      return rexBuilder.makeLiteral(true);
-    }
-    bb.setRoot(ImmutableList.of(leftRel, rightRel));
-    replaceSubQueries(bb, condition, RelOptUtil.Logic.UNKNOWN_AS_FALSE);
-    switch (conditionType) {
-    case ON:
-      bb.setRoot(ImmutableList.of(leftRel, rightRel));
-      return bb.convertExpression(condition);
-    case USING:
-      final SqlNodeList list = (SqlNodeList) condition;
-      final List<String> nameList = new ArrayList<>();
-      for (SqlNode columnName : list) {
-        final SqlIdentifier id = (SqlIdentifier) columnName;
-        String name = id.getSimple();
-        nameList.add(name);
-      }
-      return convertUsing(leftNamespace, rightNamespace, nameList);
-    default:
-      throw Util.unexpected(conditionType);
-    }
-  }
-
-  /**
-   * Returns an expression for matching columns of a USING clause or inferred
-   * from NATURAL JOIN. "a JOIN b USING (x, y)" becomes "a.x = b.x AND a.y =
-   * b.y". Returns null if the column list is empty.
-   *
-   * @param leftNamespace Namespace of left input to join
-   * @param rightNamespace Namespace of right input to join
-   * @param nameList List of column names to join on
-   * @return Expression to match columns from name list, or true if name list
-   * is empty
-   */
-  private RexNode convertUsing(SqlValidatorNamespace leftNamespace,
-                               SqlValidatorNamespace rightNamespace,
-                               List<String> nameList) {
-    final SqlNameMatcher nameMatcher = catalogReader.nameMatcher();
-    final List<RexNode> list = Lists.newArrayList();
-    for (String name : nameList) {
-      List<RexNode> operands = new ArrayList<>();
-      int offset = 0;
-      for (SqlValidatorNamespace n : ImmutableList.of(leftNamespace,
-          rightNamespace)) {
-        final RelDataType rowType = n.getRowType();
-        final RelDataTypeField field = nameMatcher.field(rowType, name);
-        operands.add(
-            rexBuilder.makeInputRef(field.getType(),
-                offset + field.getIndex()));
-        offset += rowType.getFieldList().size();
-      }
-      list.add(rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, operands));
-    }
-    return RexUtil.composeConjunction(rexBuilder, list, false);
-  }
-
-  private static JoinRelType convertJoinType(JoinType joinType) {
-    switch (joinType) {
-    case COMMA:
-    case INNER:
-    case CROSS:
-      return JoinRelType.INNER;
-    case FULL:
-      return JoinRelType.FULL;
-    case LEFT:
-      return JoinRelType.LEFT;
-    case RIGHT:
-      return JoinRelType.RIGHT;
-    default:
-      throw Util.unexpected(joinType);
-    }
-  }
-
-  /**
-   * Converts the SELECT, GROUP BY and HAVING clauses of an aggregate query.
-   *
-   * <p>This method extracts SELECT, GROUP BY and HAVING clauses, and creates
-   * an {@link AggConverter}, then delegates to {@link #createAggImpl}.
-   * Derived class may override this method to change any of those clauses or
-   * specify a different {@link AggConverter}.
-   *
-   * @param bb            Scope within which to resolve identifiers
-   * @param select        Query
-   * @param orderExprList Additional expressions needed to implement ORDER BY
-   */
-  protected void convertAgg(
-      Blackboard bb,
-      SqlSelect select,
-      List<SqlNode> orderExprList) {
-    assert bb.root != null : "precondition: child != null";
-    SqlNodeList groupList = select.getGroup();
-    SqlNodeList selectList = select.getSelectList();
-    SqlNode having = select.getHaving();
-
-    final AggConverter aggConverter = new AggConverter(bb, select);
-    createAggImpl(
-        bb,
-        aggConverter,
-        selectList,
-        groupList,
-        having,
-        orderExprList);
-  }
-
-  protected final void createAggImpl(
-      Blackboard bb,
-      final AggConverter aggConverter,
-      SqlNodeList selectList,
-      SqlNodeList groupList,
-      SqlNode having,
-      List<SqlNode> orderExprList) {
-    // Find aggregate functions in SELECT and HAVING clause
-    final AggregateFinder aggregateFinder = new AggregateFinder();
-    selectList.accept(aggregateFinder);
-    if (having != null) {
-      having.accept(aggregateFinder);
-    }
-
-    // first replace the sub-queries inside the aggregates
-    // because they will provide input rows to the aggregates.
-    replaceSubQueries(bb, aggregateFinder.list,
-        RelOptUtil.Logic.TRUE_FALSE_UNKNOWN);
-
-    // If group-by clause is missing, pretend that it has zero elements.
-    if (groupList == null) {
-      groupList = SqlNodeList.EMPTY;
-    }
-
-    replaceSubQueries(bb, groupList, RelOptUtil.Logic.TRUE_FALSE_UNKNOWN);
-
-    // register the group exprs
-
-    // build a map to remember the projections from the top scope to the
-    // output of the current root.
-    //
-    // Calcite allows expressions, not just column references in
-    // group by list. This is not SQL 2003 compliant, but hey.
-
-    final AggregatingSelectScope scope = aggConverter.aggregatingSelectScope;
-    final AggregatingSelectScope.Resolved r = scope.resolved.get();
-    for (SqlNode groupExpr : r.groupExprList) {
-      aggConverter.addGroupExpr(groupExpr);
-    }
-
-    RexNode havingExpr = null;
-    final List<Pair<RexNode, String>> projects = Lists.newArrayList();
-
-    try {
-      Preconditions.checkArgument(bb.agg == null, "already in agg mode");
-      bb.agg = aggConverter;
-
-      // convert the select and having expressions, so that the
-      // agg converter knows which aggregations are required
-
-      selectList.accept(aggConverter);
-      // Assert we don't have dangling items left in the stack
-      assert !aggConverter.inOver;
-      for (SqlNode expr : orderExprList) {
-        expr.accept(aggConverter);
-        assert !aggConverter.inOver;
-      }
-      if (having != null) {
-        having.accept(aggConverter);
-        assert !aggConverter.inOver;
-      }
-
-      // compute inputs to the aggregator
-      List<Pair<RexNode, String>> preExprs = aggConverter.getPreExprs();
-
-      if (preExprs.size() == 0) {
-        // Special case for COUNT(*), where we can end up with no inputs
-        // at all.  The rest of the system doesn't like 0-tuples, so we
-        // select a dummy constant here.
-        final RexNode zero = rexBuilder.makeExactLiteral(BigDecimal.ZERO);
-        preExprs = ImmutableList.of(Pair.of(zero, (String) null));
-      }
-
-      final RelNode inputRel = bb.root;
-
-      // Project the expressions required by agg and having.
-      bb.setRoot(
-          RelOptUtil.createProject(
-              inputRel,
-              preExprs,
-              true),
-          false);
-      bb.mapRootRelToFieldProjection.put(bb.root, r.groupExprProjection);
-
-      // REVIEW jvs 31-Oct-2007:  doesn't the declaration of
-      // monotonicity here assume sort-based aggregation at
-      // the physical level?
-
-      // Tell bb which of group columns are sorted.
-      bb.columnMonotonicities.clear();
-      for (SqlNode groupItem : groupList) {
-        bb.columnMonotonicities.add(
-            bb.scope.getMonotonicity(groupItem));
-      }
-
-      // Add the aggregator
-      bb.setRoot(
-          createAggregate(bb, r.indicator, r.groupSet, r.groupSets,
-              aggConverter.getAggCalls()),
-          false);
-
-      // Generate NULL values for rolled-up not-null fields.
-      final Aggregate aggregate = (Aggregate) bb.root;
-      if (aggregate.getGroupType() != Aggregate.Group.SIMPLE) {
-        assert aggregate.indicator;
-        List<Pair<RexNode, String>> projects2 = Lists.newArrayList();
-        int converted = 0;
-        final int groupCount = aggregate.getGroupSet().cardinality();
-        for (RelDataTypeField field : aggregate.getRowType().getFieldList()) {
-          final int i = field.getIndex();
-          final RexNode rex;
-          if (i < groupCount && r.isNullable(i)) {
-            ++converted;
-
-            rex = rexBuilder.makeCall(SqlStdOperatorTable.CASE,
-                rexBuilder.makeInputRef(aggregate, groupCount + i),
-                rexBuilder.makeCast(
-                    typeFactory.createTypeWithNullability(
-                        field.getType(), true),
-                    rexBuilder.constantNull()),
-                rexBuilder.makeInputRef(aggregate, i));
-          } else {
-            rex = rexBuilder.makeInputRef(aggregate, i);
-          }
-          projects2.add(Pair.of(rex, field.getName()));
-        }
-        if (converted > 0) {
-          bb.setRoot(
-              RelOptUtil.createProject(bb.root, projects2, true),
-              false);
-        }
-      }
-
-      bb.mapRootRelToFieldProjection.put(bb.root, r.groupExprProjection);
-
-      // Replace sub-queries in having here and modify having to use
-      // the replaced expressions
-      if (having != null) {
-        SqlNode newHaving = pushDownNotForIn(bb.scope, having);
-        replaceSubQueries(bb, newHaving, RelOptUtil.Logic.UNKNOWN_AS_FALSE);
-        havingExpr = bb.convertExpression(newHaving);
-        if (havingExpr.isAlwaysTrue()) {
-          havingExpr = null;
-        }
-      }
-
-      // Now convert the other sub-queries in the select list.
-      // This needs to be done separately from the sub-query inside
-      // any aggregate in the select list, and after the aggregate rel
-      // is allocated.
-      replaceSubQueries(bb, selectList, RelOptUtil.Logic.TRUE_FALSE_UNKNOWN);
-
-      // Now sub-queries in the entire select list have been converted.
-      // Convert the select expressions to get the final list to be
-      // projected.
-      int k = 0;
-
-      // For select expressions, use the field names previously assigned
-      // by the validator. If we derive afresh, we might generate names
-      // like "EXPR$2" that don't match the names generated by the
-      // validator. This is especially the case when there are system
-      // fields; system fields appear in the relnode's rowtype but do not
-      // (yet) appear in the validator type.
-      final SelectScope selectScope =
-          SqlValidatorUtil.getEnclosingSelectScope(bb.scope);
-      assert selectScope != null;
-      final SqlValidatorNamespace selectNamespace =
-          validator.getNamespace(selectScope.getNode());
-      final List<String> names =
-          selectNamespace.getRowType().getFieldNames();
-      int sysFieldCount = selectList.size() - names.size();
-      for (SqlNode expr : selectList) {
-        projects.add(
-            Pair.of(bb.convertExpression(expr),
-                k < sysFieldCount
-                    ? validator.deriveAlias(expr, k++)
-                    : names.get(k++ - sysFieldCount)));
-      }
-
-      for (SqlNode expr : orderExprList) {
-        projects.add(
-            Pair.of(bb.convertExpression(expr),
-                validator.deriveAlias(expr, k++)));
-      }
-    } finally {
-      bb.agg = null;
-    }
-
-    // implement HAVING (we have already checked that it is non-trivial)
-    if (havingExpr != null) {
-      final RelFactories.FilterFactory factory =
-          RelFactories.DEFAULT_FILTER_FACTORY;
-      bb.setRoot(factory.createFilter(bb.root, havingExpr), false);
-    }
-
-    // implement the SELECT list
-    bb.setRoot(
-        RelOptUtil.createProject(
-            bb.root,
-            projects,
-            true),
-        false);
-
-    // Tell bb which of group columns are sorted.
-    bb.columnMonotonicities.clear();
-    for (SqlNode selectItem : selectList) {
-      bb.columnMonotonicities.add(
-          bb.scope.getMonotonicity(selectItem));
-    }
-  }
-
-  /**
-   * Creates an Aggregate.
-   *
-   * <p>In case the aggregate rel changes the order in which it projects
-   * fields, the <code>groupExprProjection</code> parameter is provided, and
-   * the implementation of this method may modify it.
-   *
-   * <p>The <code>sortedCount</code> parameter is the number of expressions
-   * known to be monotonic. These expressions must be on the leading edge of
-   * the grouping keys. The default implementation of this method ignores this
-   * parameter.
-   *
-   * @param bb       Blackboard
-   * @param indicator Whether to output fields indicating grouping sets
-   * @param groupSet Bit set of ordinals of grouping columns
-   * @param groupSets Grouping sets
-   * @param aggCalls Array of calls to aggregate functions
-   * @return LogicalAggregate
-   */
-  protected RelNode createAggregate(Blackboard bb, boolean indicator,
-                                    ImmutableBitSet groupSet, ImmutableList<ImmutableBitSet> groupSets,
-                                    List<AggregateCall> aggCalls) {
-    return LogicalAggregate.create(
-        bb.root, indicator, groupSet, groupSets, aggCalls);
-  }
-
-  public RexDynamicParam convertDynamicParam(
-      final SqlDynamicParam dynamicParam) {
-    // REVIEW jvs 8-Jan-2005:  dynamic params may be encountered out of
-    // order.  Should probably cross-check with the count from the parser
-    // at the end and make sure they all got filled in.  Why doesn't List
-    // have a resize() method?!?  Make this a utility.
-    while (dynamicParam.getIndex() >= dynamicParamSqlNodes.size()) {
-      dynamicParamSqlNodes.add(null);
-    }
-
-    dynamicParamSqlNodes.set(
-        dynamicParam.getIndex(),
-        dynamicParam);
-    return rexBuilder.makeDynamicParam(
-        getDynamicParamType(dynamicParam.getIndex()),
-        dynamicParam.getIndex());
-  }
-
-  /**
-   * Creates a list of collations required to implement the ORDER BY clause,
-   * if there is one. Populates <code>extraOrderExprs</code> with any sort
-   * expressions which are not in the select clause.
-   *
-   * @param bb              Scope within which to resolve identifiers
-   * @param select          Select clause. Never null, because we invent a
-   *                        dummy SELECT if ORDER BY is applied to a set
-   *                        operation (UNION etc.)
-   * @param orderList       Order by clause, may be null
-   * @param extraOrderExprs Sort expressions which are not in the select
-   *                        clause (output)
-   * @param collationList   List of collations (output)
-   */
-  protected void gatherOrderExprs(
-      Blackboard bb,
-      SqlSelect select,
-      SqlNodeList orderList,
-      List<SqlNode> extraOrderExprs,
-      List<RelFieldCollation> collationList) {
-    // TODO:  add validation rules to SqlValidator also
-    assert bb.root != null : "precondition: child != null";
-    assert select != null;
-    if (orderList == null) {
-      return;
-    }
-    for (SqlNode orderItem : orderList) {
-      collationList.add(
-          convertOrderItem(
-              select,
-              orderItem,
-              extraOrderExprs,
-              RelFieldCollation.Direction.ASCENDING,
-              RelFieldCollation.NullDirection.UNSPECIFIED));
-    }
-  }
-
-  protected RelFieldCollation convertOrderItem(
-          SqlSelect select,
-          SqlNode orderItem, List<SqlNode> extraExprs,
-          RelFieldCollation.Direction direction,
-          RelFieldCollation.NullDirection nullDirection) {
-    assert select != null;
-    // Handle DESC keyword, e.g. 'select a, b from t order by a desc'.
-    switch (orderItem.getKind()) {
-    case DESCENDING:
-      return convertOrderItem(
-          select,
-          ((SqlCall) orderItem).operand(0),
-          extraExprs,
-          RelFieldCollation.Direction.DESCENDING,
-          nullDirection);
-    case NULLS_FIRST:
-      return convertOrderItem(
-          select,
-          ((SqlCall) orderItem).operand(0),
-          extraExprs,
-          direction,
-          RelFieldCollation.NullDirection.FIRST);
-    case NULLS_LAST:
-      return convertOrderItem(
-          select,
-          ((SqlCall) orderItem).operand(0),
-          extraExprs,
-          direction,
-          RelFieldCollation.NullDirection.LAST);
-    }
-
-    SqlNode converted = validator.expandOrderExpr(select, orderItem);
-
-    switch (nullDirection) {
-    case UNSPECIFIED:
-      nullDirection = validator.getDefaultNullCollation().last(desc(direction))
-          ? RelFieldCollation.NullDirection.LAST
-          : RelFieldCollation.NullDirection.FIRST;
-    }
-
-    // Scan the select list and order exprs for an identical expression.
-    final SelectScope selectScope = validator.getRawSelectScope(select);
-    int ordinal = -1;
-    for (SqlNode selectItem : selectScope.getExpandedSelectList()) {
-      ++ordinal;
-      if (converted.equalsDeep(stripAs(selectItem), Litmus.IGNORE)) {
-        return new RelFieldCollation(ordinal, direction, nullDirection);
-      }
-    }
-
-    for (SqlNode extraExpr : extraExprs) {
-      ++ordinal;
-      if (converted.equalsDeep(extraExpr, Litmus.IGNORE)) {
-        return new RelFieldCollation(ordinal, direction, nullDirection);
-      }
-    }
-
-    // TODO:  handle collation sequence
-    // TODO: flag expressions as non-standard
-
-    extraExprs.add(converted);
-    return new RelFieldCollation(ordinal + 1, direction, nullDirection);
-  }
-
-  private static boolean desc(RelFieldCollation.Direction direction) {
-    switch (direction) {
-    case DESCENDING:
-    case STRICTLY_DESCENDING:
-      return true;
-    default:
-      return false;
-    }
-  }
-
-  @Deprecated // to be removed before 2.0
-  protected boolean enableDecorrelation() {
-    // disable sub-query decorrelation when needed.
-    // e.g. if outer joins are not supported.
-    return config.isDecorrelationEnabled();
-  }
-
-  protected RelNode decorrelateQuery(RelNode rootRel) {
-    return RelDecorrelator.decorrelateQuery(rootRel);
-  }
-
-  /**
-   * Returns whether to trim unused fields as part of the conversion process.
-   *
-   * @return Whether to trim unused fields
-   */
-  @Deprecated // to be removed before 2.0
-  public boolean isTrimUnusedFields() {
-//    return config.isTrimUnusedFields();
-    /* OVERRIDE POINT */
-    return false;
-  }
-
-  /**
-   * Recursively converts a query to a relational expression.
-   *
-   * @param query         Query
-   * @param top           Whether this query is the top-level query of the
-   *                      statement
-   * @param targetRowType Target row type, or null
-   * @return Relational expression
-   */
-  protected RelRoot convertQueryRecursive(SqlNode query, boolean top,
-                                          RelDataType targetRowType) {
-    final SqlKind kind = query.getKind();
-    switch (kind) {
-    case SELECT:
-      return RelRoot.of(convertSelect((SqlSelect) query, top), kind);
-    case INSERT:
-      return RelRoot.of(convertInsert((SqlInsert) query), kind);
-    case DELETE:
-      return RelRoot.of(convertDelete((SqlDelete) query), kind);
-    case UPDATE:
-      return RelRoot.of(convertUpdate((SqlUpdate) query), kind);
-    case MERGE:
-      return RelRoot.of(convertMerge((SqlMerge) query), kind);
-    case UNION:
-    case INTERSECT:
-    case EXCEPT:
-      return RelRoot.of(convertSetOp((SqlCall) query), kind);
-    case WITH:
-      return convertWith((SqlWith) query, top);
-    case VALUES:
-      return RelRoot.of(convertValues((SqlCall) query, targetRowType), kind);
-    default:
-      throw new AssertionError("not a query: " + query);
-    }
-  }
-
-  /**
-   * Converts a set operation (UNION, INTERSECT, MINUS) into relational
-   * expressions.
-   *
-   * @param call Call to set operator
-   * @return Relational expression
-   */
-  protected RelNode convertSetOp(SqlCall call) {
-    final RelNode left =
-        convertQueryRecursive(call.operand(0), false, null).project();
-    final RelNode right =
-        convertQueryRecursive(call.operand(1), false, null).project();
-    switch (call.getKind()) {
-    case UNION:
-      return LogicalUnion.create(ImmutableList.of(left, right), all(call));
-
-    case INTERSECT:
-      return LogicalIntersect.create(ImmutableList.of(left, right), all(call));
-
-    case EXCEPT:
-      return LogicalMinus.create(ImmutableList.of(left, right), all(call));
-
-    default:
-      throw Util.unexpected(call.getKind());
-    }
-  }
-
-  private boolean all(SqlCall call) {
-    return ((SqlSetOperator) call.getOperator()).isAll();
-  }
-
-  protected RelNode convertInsert(SqlInsert call) {
-    RelOptTable targetTable = getTargetTable(call);
-
-    final RelDataType targetRowType =
-        validator.getValidatedNodeType(call);
-    assert targetRowType != null;
-    RelNode sourceRel =
-        convertQueryRecursive(call.getSource(), false, targetRowType).project();
-    RelNode massagedRel = convertColumnList(call, sourceRel);
-
-    return createModify(targetTable, massagedRel);
-  }
-
-  /** Creates a relational expression to modify a table or modifiable view. */
-  private RelNode createModify(RelOptTable targetTable, RelNode source) {
-    final ModifiableTable modifiableTable =
-        targetTable.unwrap(ModifiableTable.class);
-    if (modifiableTable != null) {
-      return modifiableTable.toModificationRel(cluster, targetTable,
-          catalogReader, source, LogicalTableModify.Operation.INSERT, null,
-          null, false);
-    }
-    final ModifiableView modifiableView =
-        targetTable.unwrap(ModifiableView.class);
-    if (modifiableView != null) {
-      final Table delegateTable = modifiableView.getTable();
-      final RelDataType delegateRowType = delegateTable.getRowType(typeFactory);
-      final RelOptTable delegateRelOptTable =
-          RelOptTableImpl.create(null, delegateRowType, delegateTable,
-              modifiableView.getTablePath());
-      final RelNode newSource =
-          createSource(targetTable, source, modifiableView, delegateRowType);
-      return createModify(delegateRelOptTable, newSource);
-    }
-    return LogicalTableModify.create(targetTable, catalogReader, source,
-        LogicalTableModify.Operation.INSERT, null, null, false);
-  }
-
-  /** Wraps a relational expression in the projects and filters implied by
-   * a {@link ModifiableView}.
-   *
-   * <p>The input relational expression is suitable for inserting into the view,
-   * and the returned relational expression is suitable for inserting into its
-   * delegate table.
-   *
-   * <p>In principle, the delegate table of a view might be another modifiable
-   * view, and if so, the process can be repeated. */
-  private RelNode createSource(RelOptTable targetTable, RelNode source,
-                               ModifiableView modifiableView, RelDataType delegateRowType) {
-    final ImmutableIntList mapping = modifiableView.getColumnMapping();
-    assert mapping.size() == targetTable.getRowType().getFieldCount();
-
-    // For columns represented in the mapping, the expression is just a field
-    // reference.
-    final Map<Integer, RexNode> projectMap = new HashMap<>();
-    final List<RexNode> filters = new ArrayList<>();
-    for (int i = 0; i < mapping.size(); i++) {
-      int target = mapping.get(i);
-      if (target >= 0) {
-        projectMap.put(target, RexInputRef.of(i, source.getRowType()));
-      }
-    }
-
-    // For columns that are not in the mapping, and have a constraint of the
-    // form "column = value", the expression is the literal "value".
-    //
-    // If a column has multiple constraints, the extra ones will become a
-    // filter.
-    final RexNode constraint =
-        modifiableView.getConstraint(rexBuilder, delegateRowType);
-    RelOptUtil.inferViewPredicates(projectMap, filters, constraint);
-    final List<Pair<RexNode, String>> projects = new ArrayList<>();
-    for (RelDataTypeField field : delegateRowType.getFieldList()) {
-      RexNode node = projectMap.get(field.getIndex());
-      if (node == null) {
-        node = rexBuilder.makeNullLiteral(field.getType());
-      }
-      projects.add(
-          Pair.of(rexBuilder.ensureType(field.getType(), node, false),
-              field.getName()));
-    }
-
-    source = RelOptUtil.createProject(source, projects, true);
-    if (filters.size() > 0) {
-      source = RelOptUtil.createFilter(source, filters);
-    }
-    return source;
-  }
-
-  private RelOptTable.ToRelContext createToRelContext() {
-    return new RelOptTable.ToRelContext() {
-      public RelOptCluster getCluster() {
-        return cluster;
-      }
-
-      @Override public RelRoot expandView(
-          RelDataType rowType,
-          String queryString,
-          List<String> schemaPath,
-          List<String> viewPath) {
-        return viewExpander.expandView(rowType, queryString, schemaPath, viewPath);
-      }
-
-    };
-  }
-
-  public RelNode toRel(RelOptTable table) {
-    return table.toRel(createToRelContext());
-  }
-
-  protected RelOptTable getTargetTable(SqlNode call) {
-    final SqlValidatorNamespace targetNs = validator.getNamespace(call);
-    if (targetNs.isWrapperFor(SqlValidatorImpl.DmlNamespace.class)) {
-      final SqlValidatorImpl.DmlNamespace dmlNamespace =
-          targetNs.unwrap(SqlValidatorImpl.DmlNamespace.class);
-      return SqlValidatorUtil.getRelOptTable(dmlNamespace, catalogReader, null, null);
-    }
-    final SqlValidatorNamespace resolvedNamespace = targetNs.resolve();
-    return SqlValidatorUtil.getRelOptTable(resolvedNamespace, catalogReader, null, null);
-  }
-
-  /**
-   * Creates a source for an INSERT statement.
-   *
-   * <p>If the column list is not specified, source expressions match target
-   * columns in order.
-   *
-   * <p>If the column list is specified, Source expressions are mapped to
-   * target columns by name via targetColumnList, and may not cover the entire
-   * target table. So, we'll make up a full row, using a combination of
-   * default values and the source expressions provided.
-   *
-   * @param call      Insert expression
-   * @param sourceRel Source relational expression
-   * @return Converted INSERT statement
-   */
-  protected RelNode convertColumnList(
-      SqlInsert call,
-      RelNode sourceRel) {
-    RelDataType sourceRowType = sourceRel.getRowType();
-    final RexNode sourceRef =
-        rexBuilder.makeRangeReference(sourceRowType, 0, false);
-    final List<String> targetColumnNames = new ArrayList<>();
-    final List<RexNode> columnExprs = new ArrayList<>();
-    collectInsertTargets(call, sourceRef, targetColumnNames, columnExprs);
-
-    final RelOptTable targetTable = getTargetTable(call);
-    final RelDataType targetRowType = targetTable.getRowType();
-    final List<RelDataTypeField> targetFields =
-        targetRowType.getFieldList();
-    final List<RexNode> sourceExps =
-        new ArrayList<>(
-            Collections.<RexNode>nCopies(targetFields.size(), null));
-    final List<String> fieldNames =
-        new ArrayList<>(
-            Collections.<String>nCopies(targetFields.size(), null));
-
-    final InitializerExpressionFactory initializerFactory =
-        getInitializerFactory(validator.getNamespace(call).getTable());
-
-    // Walk the name list and place the associated value in the
-    // expression list according to the ordinal value returned from
-    // the table construct, leaving nulls in the list for columns
-    // that are not referenced.
-    final SqlNameMatcher nameMatcher = catalogReader.nameMatcher();
-    for (Pair<String, RexNode> p : Pair.zip(targetColumnNames, columnExprs)) {
-      RelDataTypeField field = nameMatcher.field(targetRowType, p.left);
-      assert field != null : "column " + p.left + " not found";
-      sourceExps.set(field.getIndex(), p.right);
-    }
-
-    // Walk the expression list and get default values for any columns
-    // that were not supplied in the statement. Get field names too.
-    for (int i = 0; i < targetFields.size(); ++i) {
-      final RelDataTypeField field = targetFields.get(i);
-      final String fieldName = field.getName();
-      fieldNames.set(i, fieldName);
-      if (sourceExps.get(i) != null) {
-        if (initializerFactory.isGeneratedAlways(targetTable, i)) {
-          throw RESOURCE.insertIntoAlwaysGenerated(fieldName).ex();
-        }
-        continue;
-      }
-      sourceExps.set(i,
-          initializerFactory.newColumnDefaultValue(targetTable, i,
-              new InitializerContext() {
-                public RexBuilder getRexBuilder() {
-                  return rexBuilder;
-                }
-              }));
-
-      // bare nulls are dangerous in the wrong hands
-      sourceExps.set(i,
-          castNullLiteralIfNeeded(sourceExps.get(i), field.getType()));
-    }
-
-    return RelOptUtil.createProject(sourceRel, sourceExps, fieldNames, true);
-  }
-
-  private InitializerExpressionFactory getInitializerFactory(
-      SqlValidatorTable validatorTable) {
-    // We might unwrap a null instead of a InitializerExpressionFactory.
-    final Table table = unwrap(validatorTable, Table.class);
-    if (table != null) {
-      InitializerExpressionFactory f =
-          unwrap(table, InitializerExpressionFactory.class);
-      if (f != null) {
-        return f;
-      }
-    }
-    return new NullInitializerExpressionFactory();
-  }
-
-  private static <T> T unwrap(Object o, Class<T> clazz) {
-    if (o instanceof Wrapper) {
-      return ((Wrapper) o).unwrap(clazz);
-    }
-    return null;
-  }
-
-  private RexNode castNullLiteralIfNeeded(RexNode node, RelDataType type) {
-    if (!RexLiteral.isNullLiteral(node)) {
-      return node;
-    }
-    return rexBuilder.makeCast(type, node);
-  }
-
-  /**
-   * Given an INSERT statement, collects the list of names to be populated and
-   * the expressions to put in them.
-   *
-   * @param call              Insert statement
-   * @param sourceRef         Expression representing a row from the source
-   *                          relational expression
-   * @param targetColumnNames List of target column names, to be populated
-   * @param columnExprs       List of expressions, to be populated
-   */
-  protected void collectInsertTargets(
-      SqlInsert call,
-      final RexNode sourceRef,
-      final List<String> targetColumnNames,
-      List<RexNode> columnExprs) {
-    final RelOptTable targetTable = getTargetTable(call);
-    final RelDataType tableRowType = targetTable.getRowType();
-    SqlNodeList targetColumnList = call.getTargetColumnList();
-    if (targetColumnList == null) {
-      if (validator.getConformance().isInsertSubsetColumnsAllowed()) {
-        final RelDataType targetRowType =
-            typeFactory.createStructType(
-                tableRowType.getFieldList()
-                    .subList(0, sourceRef.getType().getFieldCount()));
-        targetColumnNames.addAll(targetRowType.getFieldNames());
-      } else {
-        targetColumnNames.addAll(tableRowType.getFieldNames());
-      }
-    } else {
-      for (int i = 0; i < targetColumnList.size(); i++) {
-        SqlIdentifier id = (SqlIdentifier) targetColumnList.get(i);
-        RelDataTypeField field =
-            SqlValidatorUtil.getTargetField(
-                tableRowType, typeFactory, id, catalogReader, targetTable);
-        assert field != null : "column " + id.toString() + " not found";
-        targetColumnNames.add(field.getName());
-      }
-    }
-
-    for (int i = 0; i < targetColumnNames.size(); i++) {
-      final RexNode expr = rexBuilder.makeFieldAccess(sourceRef, i);
-      columnExprs.add(expr);
-    }
-  }
-
-  private RelNode convertDelete(SqlDelete call) {
-    RelOptTable targetTable = getTargetTable(call);
-    RelNode sourceRel = convertSelect(call.getSourceSelect(), false);
-    return LogicalTableModify.create(targetTable, catalogReader, sourceRel,
-        LogicalTableModify.Operation.DELETE, null, null, false);
-  }
-
-  private RelNode convertUpdate(SqlUpdate call) {
-    final SqlValidatorScope scope = validator.getWhereScope(call.getSourceSelect());
-    Blackboard bb = createBlackboard(scope, null, false);
-
-    Builder<RexNode> rexNodeSourceExpressionListBuilder = ImmutableList.builder();
-    for (SqlNode n : call.getSourceExpressionList()) {
-      RexNode rn = bb.convertExpression(n);
-      rexNodeSourceExpressionListBuilder.add(rn);
-    }
-
-    RelOptTable targetTable = getTargetTable(call);
-
-    // convert update column list from SqlIdentifier to String
-    final List<String> targetColumnNameList = new ArrayList<>();
-    final RelDataType targetRowType = targetTable.getRowType();
-    for (SqlNode node : call.getTargetColumnList()) {
-      SqlIdentifier id = (SqlIdentifier) node;
-      RelDataTypeField field =
-          SqlValidatorUtil.getTargetField(
-              targetRowType, typeFactory, id, catalogReader, targetTable);
-      assert field != null : "column " + id.toString() + " not found";
-      targetColumnNameList.add(field.getName());
-    }
-
-    RelNode sourceRel = convertSelect(call.getSourceSelect(), false);
-
-    return LogicalTableModify.create(targetTable, catalogReader, sourceRel,
-        LogicalTableModify.Operation.UPDATE, targetColumnNameList,
-        rexNodeSourceExpressionListBuilder.build(), false);
-  }
-
-  private RelNode convertMerge(SqlMerge call) {
-    RelOptTable targetTable = getTargetTable(call);
-
-    // convert update column list from SqlIdentifier to String
-    final List<String> targetColumnNameList = new ArrayList<>();
-    final RelDataType targetRowType = targetTable.getRowType();
-    SqlUpdate updateCall = call.getUpdateCall();
-    if (updateCall != null) {
-      for (SqlNode targetColumn : updateCall.getTargetColumnList()) {
-        SqlIdentifier id = (SqlIdentifier) targetColumn;
-        RelDataTypeField field =
-            SqlValidatorUtil.getTargetField(
-                targetRowType, typeFactory, id, catalogReader, targetTable);
-        assert field != null : "column " + id.toString() + " not found";
-        targetColumnNameList.add(field.getName());
-      }
-    }
-
-    // replace the projection of the source select with a
-    // projection that contains the following:
-    // 1) the expressions corresponding to the new insert row (if there is
-    //    an insert)
-    // 2) all columns from the target table (if there is an update)
-    // 3) the set expressions in the update call (if there is an update)
-
-    // first, convert the merge's source select to construct the columns
-    // from the target table and the set expressions in the update call
-    RelNode mergeSourceRel = convertSelect(call.getSourceSelect(), false);
-
-    // then, convert the insert statement so we can get the insert
-    // values expressions
-    SqlInsert insertCall = call.getInsertCall();
-    int nLevel1Exprs = 0;
-    List<RexNode> level1InsertExprs = null;
-    List<RexNode> level2InsertExprs = null;
-    if (insertCall != null) {
-      RelNode insertRel = convertInsert(insertCall);
-
-      // if there are 2 level of projections in the insert source, combine
-      // them into a single project; level1 refers to the topmost project;
-      // the level1 projection contains references to the level2
-      // expressions, except in the case where no target expression was
-      // provided, in which case, the expression is the default value for
-      // the column; or if the expressions directly map to the source
-      // table
-      level1InsertExprs =
-          ((LogicalProject) insertRel.getInput(0)).getProjects();
-      if (insertRel.getInput(0).getInput(0) instanceof LogicalProject) {
-        level2InsertExprs =
-            ((LogicalProject) insertRel.getInput(0).getInput(0))
-                .getProjects();
-      }
-      nLevel1Exprs = level1InsertExprs.size();
-    }
-
-    LogicalJoin join = (LogicalJoin) mergeSourceRel.getInput(0);
-    int nSourceFields = join.getLeft().getRowType().getFieldCount();
-    final List<RexNode> projects = new ArrayList<>();
-    for (int level1Idx = 0; level1Idx < nLevel1Exprs; level1Idx++) {
-      if ((level2InsertExprs != null)
-          && (level1InsertExprs.get(level1Idx) instanceof RexInputRef)) {
-        int level2Idx =
-            ((RexInputRef) level1InsertExprs.get(level1Idx)).getIndex();
-        projects.add(level2InsertExprs.get(level2Idx));
-      } else {
-        projects.add(level1InsertExprs.get(level1Idx));
-      }
-    }
-    if (updateCall != null) {
-      final LogicalProject project = (LogicalProject) mergeSourceRel;
-      projects.addAll(
-          Util.skip(project.getProjects(), nSourceFields));
-    }
-
-    RelNode massagedRel =
-        RelOptUtil.createProject(join, projects, null, true);
-
-    return LogicalTableModify.create(targetTable, catalogReader, massagedRel,
-        LogicalTableModify.Operation.MERGE, targetColumnNameList, null, false);
-  }
-
-  /**
-   * Converts an identifier into an expression in a given scope. For example,
-   * the "empno" in "select empno from emp join dept" becomes "emp.empno".
-   */
-  private RexNode convertIdentifier(
-      Blackboard bb,
-      SqlIdentifier identifier) {
-    // first check for reserved identifiers like CURRENT_USER
-    final SqlCall call = SqlUtil.makeCall(opTab, identifier);
-    if (call != null) {
-      return bb.convertExpression(call);
-    }
-
-    String pv = null;
-    if (bb.isPatternVarRef && identifier.names.size() > 1) {
-      pv = identifier.names.get(0);
-    }
-
-    final SqlQualified qualified;
-    if (bb.scope != null) {
-      qualified = bb.scope.fullyQualify(identifier);
-    } else {
-      qualified = SqlQualified.create(null, 1, null, identifier);
-    }
-    final Pair<RexNode, Map<String, Integer>> e0 = bb.lookupExp(qualified);
-    RexNode e = e0.left;
-    for (String name : qualified.suffixTranslated()) {
-      if (e == e0.left && e0.right != null) {
-        int i = e0.right.get(name);
-        e = rexBuilder.makeFieldAccess(e, i);
-      } else {
-        final boolean caseSensitive = true; // name already fully-qualified
-        if (identifier.isStar() && bb.scope instanceof MatchRecognizeScope) {
-          e = rexBuilder.makeFieldAccess(e, 0);
-        } else {
-          e = rexBuilder.makeFieldAccess(e, name, caseSensitive);
-        }
-      }
-    }
-    if (e instanceof RexInputRef) {
-      // adjust the type to account for nulls introduced by outer joins
-      e = adjustInputRef(bb, (RexInputRef) e);
-      if (pv != null) {
-        e = RexPatternFieldRef.of(pv, (RexInputRef) e);
-      }
-    }
-
-    if (e0.left instanceof RexCorrelVariable) {
-      assert e instanceof RexFieldAccess;
-      final RexNode prev =
-          bb.mapCorrelateToRex.put(((RexCorrelVariable) e0.left).id,
-              (RexFieldAccess) e);
-      assert prev == null;
-    }
-    return e;
-  }
-
-  /**
-   * Adjusts the type of a reference to an input field to account for nulls
-   * introduced by outer joins; and adjusts the offset to match the physical
-   * implementation.
-   *
-   * @param bb       Blackboard
-   * @param inputRef Input ref
-   * @return Adjusted input ref
-   */
-  protected RexNode adjustInputRef(
-      Blackboard bb,
-      RexInputRef inputRef) {
-    RelDataTypeField field = bb.getRootField(inputRef);
-    if (field != null) {
-      return rexBuilder.makeInputRef(
-          field.getType(),
-          inputRef.getIndex());
-    }
-    return inputRef;
-  }
-
-  /**
-   * Converts a row constructor into a relational expression.
-   *
-   * @param bb             Blackboard
-   * @param rowConstructor Row constructor expression
-   * @return Relational expression which returns a single row.
-   */
-  private RelNode convertRowConstructor(
-      Blackboard bb,
-      SqlCall rowConstructor) {
-    Preconditions.checkArgument(isRowConstructor(rowConstructor));
-    final List<SqlNode> operands = rowConstructor.getOperandList();
-    return convertMultisets(operands, bb);
-  }
-
-  private RelNode convertCursor(Blackboard bb, SubQuery subQuery) {
-    final SqlCall cursorCall = (SqlCall) subQuery.node;
-    assert cursorCall.operandCount() == 1;
-    SqlNode query = cursorCall.operand(0);
-    RelNode converted = convertQuery(query, false, false).rel;
-    int iCursor = bb.cursors.size();
-    bb.cursors.add(converted);
-    subQuery.expr =
-        new RexInputRef(
-            iCursor,
-            converted.getRowType());
-    return converted;
-  }
-
-  private RelNode convertMultisets(final List<SqlNode> operands,
-                                   Blackboard bb) {
-    // NOTE: Wael 2/04/05: this implementation is not the most efficient in
-    // terms of planning since it generates XOs that can be reduced.
-    final List<Object> joinList = new ArrayList<>();
-    List<SqlNode> lastList = new ArrayList<>();
-    for (int i = 0; i < operands.size(); i++) {
-      SqlNode operand = operands.get(i);
-      if (!(operand instanceof SqlCall)) {
-        lastList.add(operand);
-        continue;
-      }
-
-      final SqlCall call = (SqlCall) operand;
-      final RelNode input;
-      switch (call.getKind()) {
-      case MULTISET_VALUE_CONSTRUCTOR:
-      case ARRAY_VALUE_CONSTRUCTOR:
-        final SqlNodeList list =
-            new SqlNodeList(call.getOperandList(), call.getParserPosition());
-        CollectNamespace nss =
-            (CollectNamespace) validator.getNamespace(call);
-        Blackboard usedBb;
-        if (null != nss) {
-          usedBb = createBlackboard(nss.getScope(), null, false);
-        } else {
-          usedBb =
-              createBlackboard(new ListScope(bb.scope) {
-                public SqlNode getNode() {
-                  return call;
-                }
-              }, null, false);
-        }
-        RelDataType multisetType = validator.getValidatedNodeType(call);
-        ((SqlValidatorImpl) validator).setValidatedNodeType(list,
-            multisetType.getComponentType());
-        input = convertQueryOrInList(usedBb, list, null);
-        break;
-      case MULTISET_QUERY_CONSTRUCTOR:
-      case ARRAY_QUERY_CONSTRUCTOR:
-        final RelRoot root = convertQuery(call.operand(0), false, true);
-        input = root.rel;
-        break;
-      default:
-        lastList.add(operand);
-        continue;
-      }
-
-      if (lastList.size() > 0) {
-        joinList.add(lastList);
-      }
-      lastList = new ArrayList<>();
-      Collect collect =
-          new Collect(
-              cluster,
-              cluster.traitSetOf(Convention.NONE),
-              input,
-              validator.deriveAlias(call, i));
-      joinList.add(collect);
-    }
-
-    if (joinList.size() == 0) {
-      joinList.add(lastList);
-    }
-
-    for (int i = 0; i < joinList.size(); i++) {
-      Object o = joinList.get(i);
-      if (o instanceof List) {
-        @SuppressWarnings("unchecked")
-        List<SqlNode> projectList = (List<SqlNode>) o;
-        final List<RexNode> selectList = new ArrayList<>();
-        final List<String> fieldNameList = new ArrayList<>();
-        for (int j = 0; j < projectList.size(); j++) {
-          SqlNode operand = projectList.get(j);
-          selectList.add(bb.convertExpression(operand));
-
-          // REVIEW angel 5-June-2005: Use deriveAliasFromOrdinal
-          // instead of deriveAlias to match field names from
-          // SqlRowOperator. Otherwise, get error   Type
-          // 'RecordType(INTEGER EMPNO)' has no field 'EXPR$0' when
-          // doing   select * from unnest(     select multiset[empno]
-          // from sales.emps);
-
-          fieldNameList.add(SqlUtil.deriveAliasFromOrdinal(j));
-        }
-
-        RelNode projRel =
-            RelOptUtil.createProject(
-                LogicalValues.createOneRow(cluster),
-                selectList,
-                fieldNameList);
-
-        joinList.set(i, projRel);
-      }
-    }
-
-    RelNode ret = (RelNode) joinList.get(0);
-    for (int i = 1; i < joinList.size(); i++) {
-      RelNode relNode = (RelNode) joinList.get(i);
-      ret =
-          RelFactories.DEFAULT_JOIN_FACTORY.createJoin(
-              ret,
-              relNode,
-              rexBuilder.makeLiteral(true),
-              ImmutableSet.<CorrelationId>of(),
-              JoinRelType.INNER,
-              false);
-    }
-    return ret;
-  }
-
-  private void convertSelectList(
-      Blackboard bb,
-      SqlSelect select,
-      List<SqlNode> orderList) {
-    SqlNodeList selectList = select.getSelectList();
-    selectList = validator.expandStar(selectList, select, false);
-
-    replaceSubQueries(bb, selectList, RelOptUtil.Logic.TRUE_FALSE_UNKNOWN);
-
-    List<String> fieldNames = new ArrayList<>();
-    final List<RexNode> exprs = new ArrayList<>();
-    final Collection<String> aliases = new TreeSet<>();
-
-    // Project any system fields. (Must be done before regular select items,
-    // because offsets may be affected.)
-    final List<SqlMonotonicity> columnMonotonicityList = new ArrayList<>();
-    extraSelectItems(
-        bb,
-        select,
-        exprs,
-        fieldNames,
-        aliases,
-        columnMonotonicityList);
-
-    // Project select clause.
-    int i = -1;
-    for (SqlNode expr : selectList) {
-      ++i;
-      exprs.add(bb.convertExpression(expr));
-      fieldNames.add(deriveAlias(expr, aliases, i));
-    }
-
-    // Project extra fields for sorting.
-    for (SqlNode expr : orderList) {
-      ++i;
-      SqlNode expr2 = validator.expandOrderExpr(select, expr);
-      exprs.add(bb.convertExpression(expr2));
-      fieldNames.add(deriveAlias(expr, aliases, i));
-    }
-
-    fieldNames = SqlValidatorUtil.uniquify(fieldNames,
-        catalogReader.nameMatcher().isCaseSensitive());
-
-    bb.setRoot(
-        RelOptUtil.createProject(bb.root, exprs, fieldNames),
-        false);
-
-    assert bb.columnMonotonicities.isEmpty();
-    bb.columnMonotonicities.addAll(columnMonotonicityList);
-    for (SqlNode selectItem : selectList) {
-      bb.columnMonotonicities.add(
-          selectItem.getMonotonicity(bb.scope));
-    }
-  }
-
-  /**
-   * Adds extra select items. The default implementation adds nothing; derived
-   * classes may add columns to exprList, nameList, aliasList and
-   * columnMonotonicityList.
-   *
-   * @param bb                     Blackboard
-   * @param select                 Select statement being translated
-   * @param exprList               List of expressions in select clause
-   * @param nameList               List of names, one per column
-   * @param aliasList              Collection of aliases that have been used
-   *                               already
-   * @param columnMonotonicityList List of monotonicity, one per column
-   */
-  protected void extraSelectItems(
-      Blackboard bb,
-      SqlSelect select,
-      List<RexNode> exprList,
-      List<String> nameList,
-      Collection<String> aliasList,
-      List<SqlMonotonicity> columnMonotonicityList) {
-  }
-
-  private String deriveAlias(
-      final SqlNode node,
-      Collection<String> aliases,
-      final int ordinal) {
-    String alias = validator.deriveAlias(node, ordinal);
-    if ((alias == null) || aliases.contains(alias)) {
-      String aliasBase = (alias == null) ? "EXPR$" : alias;
-      for (int j = 0;; j++) {
-        alias = aliasBase + j;
-        if (!aliases.contains(alias)) {
-          break;
-        }
-      }
-    }
-    aliases.add(alias);
-    return alias;
-  }
-
-  /**
-   * Converts a WITH sub-query into a relational expression.
-   */
-  public RelRoot convertWith(SqlWith with, boolean top) {
-    return convertQuery(with.body, false, top);
-  }
-
-  /**
-   * Converts a SELECT statement's parse tree into a relational expression.
-   */
-  public RelNode convertValues(
-      SqlCall values,
-      RelDataType targetRowType) {
-    final SqlValidatorScope scope = validator.getOverScope(values);
-    assert scope != null;
-    final Blackboard bb = createBlackboard(scope, null, false);
-    convertValuesImpl(bb, values, targetRowType);
-    return bb.root;
-  }
-
-  /**
-   * Converts a values clause (as in "INSERT INTO T(x,y) VALUES (1,2)") into a
-   * relational expression.
-   *
-   * @param bb            Blackboard
-   * @param values        Call to SQL VALUES operator
-   * @param targetRowType Target row type
-   */
-  private void convertValuesImpl(
-      Blackboard bb,
-      SqlCall values,
-      RelDataType targetRowType) {
-    // Attempt direct conversion to LogicalValues; if that fails, deal with
-    // fancy stuff like sub-queries below.
-    RelNode valuesRel =
-        convertRowValues(
-            bb,
-            values,
-            values.getOperandList(),
-            true,
-            targetRowType);
-    if (valuesRel != null) {
-      bb.setRoot(valuesRel, true);
-      return;
-    }
-
-    final List<RelNode> unionRels = new ArrayList<>();
-    for (SqlNode rowConstructor1 : values.getOperandList()) {
-      SqlCall rowConstructor = (SqlCall) rowConstructor1;
-      Blackboard tmpBb = createBlackboard(bb.scope, null, false);
-      replaceSubQueries(tmpBb, rowConstructor,
-          RelOptUtil.Logic.TRUE_FALSE_UNKNOWN);
-      final List<Pair<RexNode, String>> exps = new ArrayList<>();
-      for (Ord<SqlNode> operand : Ord.zip(rowConstructor.getOperandList())) {
-        exps.add(
-            Pair.of(
-                tmpBb.convertExpression(operand.e),
-                validator.deriveAlias(operand.e, operand.i)));
-      }
-      RelNode in =
-          (null == tmpBb.root)
-              ? LogicalValues.createOneRow(cluster)
-              : tmpBb.root;
-      unionRels.add(
-          RelOptUtil.createProject(
-              in,
-              Pair.left(exps),
-              Pair.right(exps),
-              true));
-    }
-
-    if (unionRels.size() == 0) {
-      throw new AssertionError("empty values clause");
-    } else if (unionRels.size() == 1) {
-      bb.setRoot(
-          unionRels.get(0),
-          true);
-    } else {
-      bb.setRoot(
-          LogicalUnion.create(unionRels, true),
-          true);
-    }
-
-    // REVIEW jvs 22-Jan-2004:  should I add
-    // mapScopeToLux.put(validator.getScope(values),bb.root);
-    // ?
-  }
-
-  //~ Inner Classes ----------------------------------------------------------
-
-  /**
-   * Workspace for translating an individual SELECT statement (or sub-SELECT).
-   */
-  protected class Blackboard implements SqlRexContext, SqlVisitor<RexNode> {
-    /**
-     * Collection of {@link RelNode} objects which correspond to a SELECT
-     * statement.
-     */
-    public final SqlValidatorScope scope;
-    private final Map<String, RexNode> nameToNodeMap;
-    public RelNode root;
-    private List<RelNode> inputs;
-    private final Map<CorrelationId, RexFieldAccess> mapCorrelateToRex =
-        new HashMap<>();
-
-    private boolean isPatternVarRef = false;
-
-    final List<RelNode> cursors = new ArrayList<>();
-
-    /**
-     * List of <code>IN</code> and <code>EXISTS</code> nodes inside this
-     * <code>SELECT</code> statement (but not inside sub-queries).
-     */
-    private final Set<SubQuery> subQueryList = new LinkedHashSet<>();
-
-    /**
-     * Workspace for building aggregates.
-     */
-    AggConverter agg;
-
-    /**
-     * When converting window aggregate, we need to know if the window is
-     * guaranteed to be non-empty.
-     */
-    SqlWindow window;
-
-    /**
-     * Project the groupby expressions out of the root of this sub-select.
-     * Sub-queries can reference group by expressions projected from the
-     * "right" to the sub-query.
-     */
-    private final Map<RelNode, Map<Integer, Integer>>
-    mapRootRelToFieldProjection = new HashMap<>();
-
-    private final List<SqlMonotonicity> columnMonotonicities =
-        new ArrayList<>();
-
-    private final List<RelDataTypeField> systemFieldList = new ArrayList<>();
-    final boolean top;
-
-    private final InitializerExpressionFactory initializerExpressionFactory =
-        new NullInitializerExpressionFactory();
-
-    /**
-     * Creates a Blackboard.
-     *
-     * @param scope         Name-resolution scope for expressions validated
-     *                      within this query. Can be null if this Blackboard is
-     *                      for a leaf node, say
-     * @param nameToNodeMap Map which translates the expression to map a
-     *                      given parameter into, if translating expressions;
-     *                      null otherwise
-     * @param top           Whether this is the root of the query
-     */
-    protected Blackboard(SqlValidatorScope scope,
-                         Map<String, RexNode> nameToNodeMap, boolean top) {
-      this.scope = scope;
-      this.nameToNodeMap = nameToNodeMap;
-      this.top = top;
-    }
-
-    public void setPatternVarRef(boolean isVarRef) {
-      this.isPatternVarRef = isVarRef;
-    }
-
-    public RexNode register(
-        RelNode rel,
-        JoinRelType joinType) {
-      return register(rel, joinType, null);
-    }
-
-    /**
-     * Registers a relational expression.
-     *
-     * @param rel               Relational expression
-     * @param joinType          Join type
-     * @param leftKeys LHS of IN clause, or null for expressions
-     *                          other than IN
-     * @return Expression with which to refer to the row (or partial row)
-     * coming from this relational expression's side of the join
-     */
-    public RexNode register(
-        RelNode rel,
-        JoinRelType joinType,
-        List<RexNode> leftKeys) {
-      assert joinType != null;
-      if (root == null) {
-        assert leftKeys == null;
-        setRoot(rel, false);
-        return rexBuilder.makeRangeReference(
-            root.getRowType(),
-            0,
-            false);
-      }
-
-      final RexNode joinCond;
-      final int origLeftInputCount = root.getRowType().getFieldCount();
-      if (leftKeys != null) {
-        List<RexNode> newLeftInputExpr = Lists.newArrayList();
-        for (int i = 0; i < origLeftInputCount; i++) {
-          newLeftInputExpr.add(rexBuilder.makeInputRef(root, i));
-        }
-
-        final List<Integer> leftJoinKeys = Lists.newArrayList();
-        for (RexNode leftKey : leftKeys) {
-          int index = newLeftInputExpr.indexOf(leftKey);
-          if (index < 0 || joinType == JoinRelType.LEFT) {
-            index = newLeftInputExpr.size();
-            newLeftInputExpr.add(leftKey);
-          }
-          leftJoinKeys.add(index);
-        }
-
-        RelNode newLeftInput =
-            RelOptUtil.createProject(
-                root,
-                newLeftInputExpr,
-                null,
-                true);
-
-        // maintain the group by mapping in the new LogicalProject
-        if (mapRootRelToFieldProjection.containsKey(root)) {
-          mapRootRelToFieldProjection.put(
-              newLeftInput,
-              mapRootRelToFieldProjection.get(root));
-        }
-
-        setRoot(newLeftInput, false);
-
-        // right fields appear after the LHS fields.
-        final int rightOffset = root.getRowType().getFieldCount()
-            - newLeftInput.getRowType().getFieldCount();
-        final List<Integer> rightKeys =
-            Util.range(rightOffset, rightOffset + leftKeys.size());
-
-        joinCond =
-            RelOptUtil.createEquiJoinCondition(newLeftInput, leftJoinKeys,
-                rel, rightKeys, rexBuilder);
-      } else {
-        joinCond = rexBuilder.makeLiteral(true);
-      }
-
-      int leftFieldCount = root.getRowType().getFieldCount();
-      final RelNode join =
-          createJoin(
-              this,
-              root,
-              rel,
-              joinCond,
-              joinType);
-
-      setRoot(join, false);
-
-      if (leftKeys != null
-          && joinType == JoinRelType.LEFT) {
-        final int leftKeyCount = leftKeys.size();
-        int rightFieldLength = rel.getRowType().getFieldCount();
-        assert leftKeyCount == rightFieldLength - 1;
-
-        final int rexRangeRefLength = leftKeyCount + rightFieldLength;
-        RelDataType returnType =
-            typeFactory.createStructType(
-                new AbstractList<Map.Entry<String, RelDataType>>() {
-                  public Map.Entry<String, RelDataType> get(
-                      int index) {
-                    return join.getRowType().getFieldList()
-                        .get(origLeftInputCount + index);
-                  }
-
-                  public int size() {
-                    return rexRangeRefLength;
-                  }
-                });
-
-        return rexBuilder.makeRangeReference(
-            returnType,
-            origLeftInputCount,
-            false);
-      } else {
-        return rexBuilder.makeRangeReference(
-            rel.getRowType(),
-            leftFieldCount,
-            joinType.generatesNullsOnRight());
-      }
-    }
-
-    /**
-     * Sets a new root relational expression, as the translation process
-     * backs its way further up the tree.
-     *
-     * @param root New root relational expression
-     * @param leaf Whether the relational expression is a leaf, that is,
-     *             derived from an atomic relational expression such as a table
-     *             name in the from clause, or the projection on top of a
-     *             select-sub-query. In particular, relational expressions
-     *             derived from JOIN operators are not leaves, but set
-     *             expressions are.
-     */
-    public void setRoot(RelNode root, boolean leaf) {
-      setRoot(
-          Collections.singletonList(root), root, root instanceof LogicalJoin);
-      if (leaf) {
-        leaves.add(root);
-      }
-      this.columnMonotonicities.clear();
-    }
-
-    private void setRoot(
-        List<RelNode> inputs,
-        RelNode root,
-        boolean hasSystemFields) {
-      this.inputs = inputs;
-      this.root = root;
-      this.systemFieldList.clear();
-      if (hasSystemFields) {
-        this.systemFieldList.addAll(getSystemFields());
-      }
-    }
-
-    /**
-     * Notifies this Blackboard that the root just set using
-     * {@link #setRoot(RelNode, boolean)} was derived using dataset
-     * substitution.
-     *
-     * <p>The default implementation is not interested in such
-     * notifications, and does nothing.
-     *
-     * @param datasetName Dataset name
-     */
-    public void setDataset(String datasetName) {
-    }
-
-    void setRoot(List<RelNode> inputs) {
-      setRoot(inputs, null, false);
-    }
-
-    /**
-     * Returns an expression with which to reference a from-list item.
-     *
-     * @param qualified the alias of the from item
-     * @return a {@link RexFieldAccess} or {@link RexRangeRef}, or null if
-     * not found
-     */
-    Pair<RexNode, Map<String, Integer>> lookupExp(SqlQualified qualified) {
-      if (nameToNodeMap != null && qualified.prefixLength == 1) {
-        RexNode node = nameToNodeMap.get(qualified.identifier.names.get(0));
-        if (node == null) {
-          throw new AssertionError("Unknown identifier '" + qualified.identifier
-              + "' encountered while expanding expression");
-        }
-        return Pair.of(node, null);
-      }
-      final SqlNameMatcher nameMatcher =
-          scope.getValidator().getCatalogReader().nameMatcher();
-      final SqlValidatorScope.ResolvedImpl resolved =
-          new SqlValidatorScope.ResolvedImpl();
-      scope.resolve(qualified.prefix(), nameMatcher, false, resolved);
-      if (!(resolved.count() == 1)) {
-        return null;
-      }
-      final SqlValidatorScope.Resolve resolve = resolved.only();
-      final RelDataType rowType = resolve.rowType();
-
-      // Found in current query's from list.  Find which from item.
-      // We assume that the order of the from clause items has been
-      // preserved.
-      final SqlValidatorScope ancestorScope = resolve.scope;
-      boolean isParent = ancestorScope != scope;
-      if ((inputs != null) && !isParent) {
-        final LookupContext rels =
-            new LookupContext(this, inputs, systemFieldList.size());
-        final RexNode node = lookup(resolve.path.steps().get(0).i, rels);
-        if (node == null) {
-          return null;
-        } else {
-          return Pair.of(node, null);
-        }
-      } else {
-        // We're referencing a relational expression which has not been
-        // converted yet. This occurs when from items are correlated,
-        // e.g. "select from emp as emp join emp.getDepts() as dept".
-        // Create a temporary expression.
-        DeferredLookup lookup =
-            new DeferredLookup(this, qualified.identifier.names.get(0));
-        final CorrelationId correlId = cluster.createCorrel();
-        mapCorrelToDeferred.put(correlId, lookup);
-        if (resolve.path.steps().get(0).i < 0) {
-          return Pair.of(rexBuilder.makeCorrel(rowType, correlId), null);
-        } else {
-          final RelDataTypeFactory.FieldInfoBuilder builder =
-              typeFactory.builder();
-          final ListScope ancestorScope1 = (ListScope) resolve.scope;
-          final ImmutableMap.Builder<String, Integer> fields =
-              ImmutableMap.builder();
-          int i = 0;
-          int offset = 0;
-          for (SqlValidatorNamespace c : ancestorScope1.getChildren()) {
-            builder.addAll(c.getRowType().getFieldList());
-            if (i == resolve.path.steps().get(0).i) {
-              for (RelDataTypeField field : c.getRowType().getFieldList()) {
-                fields.put(c.translate(field.getName()),
-                    field.getIndex() + offset);
-              }
-            }
-            ++i;
-            offset += c.getRowType().getFieldCount();
-          }
-          final RexNode c =
-              rexBuilder.makeCorrel(builder.uniquify().build(), correlId);
-          return Pair.<RexNode, Map<String, Integer>>of(c, fields.build());
-        }
-      }
-    }
-
-    /**
-     * Creates an expression with which to reference the expression whose
-     * offset in its from-list is {@code offset}.
-     */
-    RexNode lookup(
-        int offset,
-        LookupContext lookupContext) {
-      Pair<RelNode, Integer> pair = lookupContext.findRel(offset);
-      return rexBuilder.makeRangeReference(
-          pair.left.getRowType(),
-          pair.right,
-          false);
-    }
-
-    RelDataTypeField getRootField(RexInputRef inputRef) {
-      int fieldOffset = inputRef.getIndex();
-      for (RelNode input : inputs) {
-        RelDataType rowType = input.getRowType();
-        if (rowType == null) {
-          // TODO:  remove this once leastRestrictive
-          // is correctly implemented
-          return null;
-        }
-        if (fieldOffset < rowType.getFieldCount()) {
-          return rowType.getFieldList().get(fieldOffset);
-        }
-        fieldOffset -= rowType.getFieldCount();
-      }
-      throw new AssertionError();
-    }
-
-    public void flatten(
-        List<RelNode> rels,
-        int systemFieldCount,
-        int[] start,
-        List<Pair<RelNode, Integer>> relOffsetList) {
-      for (RelNode rel : rels) {
-        if (leaves.contains(rel) || rel instanceof LogicalMatch) {
-          relOffsetList.add(
-              Pair.of(rel, start[0]));
-          start[0] += rel.getRowType().getFieldCount();
-        } else {
-          if (rel instanceof LogicalJoin
-              || rel instanceof LogicalAggregate) {
-            start[0] += systemFieldCount;
-          }
-          flatten(
-              rel.getInputs(),
-              systemFieldCount,
-              start,
-              relOffsetList);
-        }
-      }
-    }
-
-    void registerSubQuery(SqlNode node, RelOptUtil.Logic logic) {
-      for (SubQuery subQuery : subQueryList) {
-        if (node.equalsDeep(subQuery.node, Litmus.IGNORE)) {
-          return;
-        }
-      }
-      subQueryList.add(new SubQuery(node, logic));
-    }
-
-    SubQuery getSubQuery(SqlNode expr) {
-      for (SubQuery subQuery : subQueryList) {
-        if (expr.equalsDeep(subQuery.node, Litmus.IGNORE)) {
-          return subQuery;
-        }
-      }
-
-      return null;
-    }
-
-    ImmutableList<RelNode> retrieveCursors() {
-      try {
-        return ImmutableList.copyOf(cursors);
-      } finally {
-        cursors.clear();
-      }
-    }
-
-    public RexNode convertExpression(SqlNode expr) {
-      // If we're in aggregation mode and this is an expression in the
-      // GROUP BY clause, return a reference to the field.
-      if (agg != null) {
-        final SqlNode expandedGroupExpr = validator.expand(expr, scope);
-        final int ref = agg.lookupGroupExpr(expandedGroupExpr);
-        if (ref >= 0) {
-          return rexBuilder.makeInputRef(root, ref);
-        }
-        if (expr instanceof SqlCall) {
-          final RexNode rex = agg.lookupAggregates((SqlCall) expr);
-          if (rex != null) {
-            return rex;
-          }
-        }
-      }
-
-      // Allow the derived class chance to override the standard
-      // behavior for special kinds of expressions.
-      RexNode rex = convertExtendedExpression(expr, this);
-      if (rex != null) {
-        return rex;
-      }
-
-      // Sub-queries and OVER expressions are not like ordinary
-      // expressions.
-      final SqlKind kind = expr.getKind();
-      final SubQuery subQuery;
-      if (!config.isExpand()) {
-        final SqlCall call;
-        final SqlNode query;
-        final RelRoot root;
-        switch (kind) {
-        case IN:
-          call = (SqlCall) expr;
-          query = call.operand(1);
-          if (!(query instanceof SqlNodeList)) {
-            final SqlInOperator op = (SqlInOperator) call.getOperator();
-            root = convertQueryRecursive(query, false, null);
-            final SqlNode operand = call.operand(0);
-            List<SqlNode> nodes;
-            switch (operand.getKind()) {
-            case ROW:
-              nodes = ((SqlCall) operand).getOperandList();
-              break;
-            default:
-              nodes = ImmutableList.of(operand);
-            }
-            final ImmutableList.Builder<RexNode> builder =
-                ImmutableList.builder();
-            for (SqlNode node : nodes) {
-              builder.add(convertExpression(node));
-            }
-            final RexSubQuery in = RexSubQuery.in(root.rel, builder.build());
-            return op.isNotIn()
-                ? rexBuilder.makeCall(SqlStdOperatorTable.NOT, in)
-                : in;
-          }
-          break;
-
-        case EXISTS:
-          call = (SqlCall) expr;
-          query = Iterables.getOnlyElement(call.getOperandList());
-          root = convertQueryRecursive(query, false, null);
-          RelNode rel = root.rel;
-          while (rel instanceof Project
-              || rel instanceof Sort
-              && ((Sort) rel).fetch == null
-              && ((Sort) rel).offset == null) {
-            rel = ((SingleRel) rel).getInput();
-          }
-          return RexSubQuery.exists(rel);
-
-        case SCALAR_QUERY:
-          call = (SqlCall) expr;
-          query = Iterables.getOnlyElement(call.getOperandList());
-          root = convertQueryRecursive(query, false, null);
-          return RexSubQuery.scalar(root.rel);
-        }
-      }
-
-      switch (kind) {
-      case CURSOR:
-      case IN:
-        subQuery = Preconditions.checkNotNull(getSubQuery(expr));
-        rex = Preconditions.checkNotNull(subQuery.expr);
-        return StandardConvertletTable.castToValidatedType(expr, rex,
-            validator, rexBuilder);
-
-      case SELECT:
-      case EXISTS:
-      case SCALAR_QUERY:
-        subQuery = getSubQuery(expr);
-        assert subQuery != null;
-        rex = subQuery.expr;
-        assert rex != null : "rex != null";
-
-        if (((kind == SqlKind.SCALAR_QUERY)
-            || (kind == SqlKind.EXISTS))
-            && isConvertedSubq(rex)) {
-          // scalar sub-query or EXISTS has been converted to a
-          // constant
-          return rex;
-        }
-
-        // The indicator column is the last field of the sub-query.
-        RexNode fieldAccess =
-            rexBuilder.makeFieldAccess(
-                rex,
-                rex.getType().getFieldCount() - 1);
-
-        // The indicator column will be nullable if it comes from
-        // the null-generating side of the join. For EXISTS, add an
-        // "IS TRUE" check so that the result is "BOOLEAN NOT NULL".
-        if (fieldAccess.getType().isNullable()
-            && kind == SqlKind.EXISTS) {
-          fieldAccess =
-              rexBuilder.makeCall(
-                  SqlStdOperatorTable.IS_NOT_NULL,
-                  fieldAccess);
-        }
-        return fieldAccess;
-
-      case OVER:
-        return convertOver(this, expr);
-
-      default:
-        // fall through
-      }
-
-      // Apply standard conversions.
-      rex = expr.accept(this);
-      return Preconditions.checkNotNull(rex);
-    }
-
-    /**
-     * Converts an item in an ORDER BY clause, extracting DESC, NULLS LAST
-     * and NULLS FIRST flags first.
-     */
-    public RexNode convertSortExpression(SqlNode expr, Set<SqlKind> flags) {
-      switch (expr.getKind()) {
-      case DESCENDING:
-      case NULLS_LAST:
-      case NULLS_FIRST:
-        flags.add(expr.getKind());
-        final SqlNode operand = ((SqlCall) expr).operand(0);
-        return convertSortExpression(operand, flags);
-      default:
-        return convertExpression(expr);
-      }
-    }
-
-    /**
-     * Determines whether a RexNode corresponds to a sub-query that's been
-     * converted to a constant.
-     *
-     * @param rex the expression to be examined
-     * @return true if the expression is a dynamic parameter, a literal, or
-     * a literal that is being cast
-     */
-    private boolean isConvertedSubq(RexNode rex) {
-      if ((rex instanceof RexLiteral)
-          || (rex instanceof RexDynamicParam)) {
-        return true;
-      }
-      if (rex instanceof RexCall) {
-        RexCall call = (RexCall) rex;
-        if (call.getOperator() == SqlStdOperatorTable.CAST) {
-          RexNode operand = call.getOperands().get(0);
-          if (operand instanceof RexLiteral) {
-            return true;
-          }
-        }
-      }
-      return false;
-    }
-
-    public int getGroupCount() {
-      if (agg != null) {
-        return agg.groupExprs.size();
-      }
-      if (window != null) {
-        return window.isAlwaysNonEmpty() ? 1 : 0;
-      }
-      return -1;
-    }
-
-    public RexBuilder getRexBuilder() {
-      return rexBuilder;
-    }
-
-    public RexRangeRef getSubQueryExpr(SqlCall call) {
-      final SubQuery subQuery = getSubQuery(call);
-      assert subQuery != null;
-      return (RexRangeRef) subQuery.expr;
-    }
-
-    public RelDataTypeFactory getTypeFactory() {
-      return typeFactory;
-    }
-
-    public InitializerExpressionFactory getInitializerExpressionFactory() {
-      return initializerExpressionFactory;
-    }
-
-    public SqlValidator getValidator() {
-      return validator;
-    }
-
-    public RexNode convertLiteral(SqlLiteral literal) {
-      return exprConverter.convertLiteral(this, literal);
-    }
-
-    public RexNode convertInterval(SqlIntervalQualifier intervalQualifier) {
-      return exprConverter.convertInterval(this, intervalQualifier);
-    }
-
-    public RexNode visit(SqlLiteral literal) {
-      return exprConverter.convertLiteral(this, literal);
-    }
-
-    public RexNode visit(SqlCall call) {
-      if (agg != null) {
-        final SqlOperator op = call.getOperator();
-        if (window == null
-            && (op.isAggregator() || op.getKind() == SqlKind.FILTER)) {
-          return agg.lookupAggregates(call);
-        }
-      }
-      return exprConverter.convertCall(this,
-          new SqlCallBinding(validator, scope, call).permutedCall());
-    }
-
-    public RexNode visit(SqlNodeList nodeList) {
-      throw new UnsupportedOperationException();
-    }
-
-    public RexNode visit(SqlIdentifier id) {
-      return convertIdentifier(this, id);
-    }
-
-    public RexNode visit(SqlDataTypeSpec type) {
-      throw new UnsupportedOperationException();
-    }
-
-    public RexNode visit(SqlDynamicParam param) {
-      return convertDynamicParam(param);
-    }
-
-    public RexNode visit(SqlIntervalQualifier intervalQualifier) {
-      return convertInterval(intervalQualifier);
-    }
-
-    public List<SqlMonotonicity> getColumnMonotonicities() {
-      return columnMonotonicities;
-    }
-
-  }
-
-  /** Deferred lookup. */
-  private static class DeferredLookup {
-    Blackboard bb;
-    String originalRelName;
-
-    DeferredLookup(
-        Blackboard bb,
-        String originalRelName) {
-      this.bb = bb;
-      this.originalRelName = originalRelName;
-    }
-
-    public RexFieldAccess getFieldAccess(CorrelationId name) {
-      return (RexFieldAccess) bb.mapCorrelateToRex.get(name);
-    }
-
-    public String getOriginalRelName() {
-      return originalRelName;
-    }
-  }
-
-  /**
-   * A default implementation of SubQueryConverter that does no conversion.
-   */
-  private class NoOpSubQueryConverter implements SubQueryConverter {
-    public boolean canConvertSubQuery() {
-      return false;
-    }
-
-    public RexNode convertSubQuery(
-        SqlCall subQuery,
-        SqlToRelConverter parentConverter,
-        boolean isExists,
-        boolean isExplain) {
-      throw new IllegalArgumentException();
-    }
-  }
-
-  /**
-   * Converts expressions to aggregates.
-   *
-   * <p>Consider the expression
-   *
-   * <blockquote>
-   * {@code SELECT deptno, SUM(2 * sal) FROM emp GROUP BY deptno}
-   * </blockquote>
-   *
-   * <p>Then:
-   *
-   * <ul>
-   * <li>groupExprs = {SqlIdentifier(deptno)}</li>
-   * <li>convertedInputExprs = {RexInputRef(deptno), 2 *
-   * RefInputRef(sal)}</li>
-   * <li>inputRefs = {RefInputRef(#0), RexInputRef(#1)}</li>
-   * <li>aggCalls = {AggCall(SUM, {1})}</li>
-   * </ul>
-   */
-  protected class AggConverter implements SqlVisitor<Void> {
-    private final Blackboard bb;
-    public final AggregatingSelectScope aggregatingSelectScope;
-
-    private final Map<String, String> nameMap = Maps.newHashMap();
-
-    /**
-     * The group-by expressions, in {@link SqlNode} format.
-     */
-    private final SqlNodeList groupExprs =
-        new SqlNodeList(SqlParserPos.ZERO);
-
-    /**
-     * The auxiliary group-by expressions.
-     */
-    private final Map<SqlNode, Ord<AuxiliaryConverter>> auxiliaryGroupExprs =
-        new HashMap<>();
-
-    /**
-     * Input expressions for the group columns and aggregates, in
-     * {@link RexNode} format. The first elements of the list correspond to the
-     * elements in {@link #groupExprs}; the remaining elements are for
-     * aggregates. The right field of each pair is the name of the expression,
-     * where the expressions are simple mappings to input fields.
-     */
-    private final List<Pair<RexNode, String>> convertedInputExprs =
-        new ArrayList<>();
-
-    /** Expressions to be evaluated as rows are being placed into the
-     * aggregate's hash table. This is when group functions such as TUMBLE
-     * cause rows to be expanded. */
-    private final List<RexNode> midExprs = new ArrayList<>();
-
-    private final List<AggregateCall> aggCalls = new ArrayList<>();
-    private final Map<SqlNode, RexNode> aggMapping = new HashMap<>();
-    private final Map<AggregateCall, RexNode> aggCallMapping =
-        new HashMap<>();
-
-    /** Are we directly inside a windowed aggregate? */
-    private boolean inOver = false;
-
-    /**
-     * Creates an AggConverter.
-     *
-     * <p>The <code>select</code> parameter provides enough context to name
-     * aggregate calls which are top-level select list items.
-     *
-     * @param bb     Blackboard
-     * @param select Query being translated; provides context to give
-     */
-    public AggConverter(Blackboard bb, SqlSelect select) {
-      this.bb = bb;
-      this.aggregatingSelectScope =
-          (AggregatingSelectScope) bb.getValidator().getSelectScope(select);
-
-      // Collect all expressions used in the select list so that aggregate
-      // calls can be named correctly.
-      final SqlNodeList selectList = select.getSelectList();
-      for (int i = 0; i < selectList.size(); i++) {
-        SqlNode selectItem = selectList.get(i);
-        String name = null;
-        if (SqlUtil.isCallTo(
-            selectItem,
-            SqlStdOperatorTable.AS)) {
-          final SqlCall call = (SqlCall) selectItem;
-          selectItem = call.operand(0);
-          name = call.operand(1).toString();
-        }
-        if (name == null) {
-          name = validator.deriveAlias(selectItem, i);
-        }
-        nameMap.put(selectItem.toString(), name);
-      }
-    }
-
-    public int addGroupExpr(SqlNode expr) {
-      int ref = lookupGroupExpr(expr);
-      if (ref >= 0) {
-        return ref;
-      }
-      final int index = groupExprs.size();
-      groupExprs.add(expr);
-      String name = nameMap.get(expr.toString());
-      RexNode convExpr = bb.convertExpression(expr);
-      addExpr(convExpr, name);
-
-      if (expr instanceof SqlCall) {
-        SqlCall call = (SqlCall) expr;
-        for (Pair<SqlNode, AuxiliaryConverter> p
-            : SqlStdOperatorTable.convertGroupToAuxiliaryCalls(call)) {
-          addAuxiliaryGroupExpr(p.left, index, p.right);
-        }
-      }
-
-      return index;
-    }
-
-    void addAuxiliaryGroupExpr(SqlNode node, int index,
-                               AuxiliaryConverter converter) {
-      for (SqlNode node2 : auxiliaryGroupExprs.keySet()) {
-        if (node2.equalsDeep(node, Litmus.IGNORE)) {
-          return;
-        }
-      }
-      auxiliaryGroupExprs.put(node, Ord.of(index, converter));
-    }
-
-    /**
-     * Adds an expression, deducing an appropriate name if possible.
-     *
-     * @param expr Expression
-     * @param name Suggested name
-     */
-    private void addExpr(RexNode expr, String name) {
-      if ((name == null) && (expr instanceof RexInputRef)) {
-        final int i = ((RexInputRef) expr).getIndex();
-        name = bb.root.getRowType().getFieldList().get(i).getName();
-      }
-      if (Pair.right(convertedInputExprs).contains(name)) {
-        // In case like 'SELECT ... GROUP BY x, y, x', don't add
-        // name 'x' twice.
-        name = null;
-      }
-      convertedInputExprs.add(Pair.of(expr, name));
-    }
-
-    public Void visit(SqlIdentifier id) {
-      return null;
-    }
-
-    public Void visit(SqlNodeList nodeList) {
-      for (int i = 0; i < nodeList.size(); i++) {
-        nodeList.get(i).accept(this);
-      }
-      return null;
-    }
-
-    public Void visit(SqlLiteral lit) {
-      return null;
-    }
-
-    public Void visit(SqlDataTypeSpec type) {
-      return null;
-    }
-
-    public Void visit(SqlDynamicParam param) {
-      return null;
-    }
-
-    public Void visit(SqlIntervalQualifier intervalQualifier) {
-      return null;
-    }
-
-    public Void visit(SqlCall call) {
-      switch (call.getKind()) {
-      case FILTER:
-        translateAgg((SqlCall) call.operand(0), call.operand(1), call);
-        return null;
-      case SELECT:
-        // rchen 2006-10-17:
-        // for now do not detect aggregates in sub-queries.
-        return null;
-      }
-      final boolean prevInOver = inOver;
-      // Ignore window aggregates and ranking functions (associated with OVER
-      // operator). However, do not ignore nested window aggregates.
-      if (call.getOperator().getKind() == SqlKind.OVER) {
-        // Track aggregate nesting levels only within an OVER operator.
-        inOver = true;
-      }
-
-      // Do not translate the top level window aggregate. Only do so for
-      // nested aggregates, if present
-      if (call.getOperator().isAggregator()) {
-        if (inOver) {
-          // Add the parent aggregate level before visiting its children
-          inOver = false;
-        } else {
-          // We're beyond the one ignored level
-          translateAgg(call, null, call);
-          return null;
-        }
-      }
-      for (SqlNode operand : call.getOperandList()) {
-        // Operands are occasionally null, e.g. switched CASE arg 0.
-        if (operand != null) {
-          operand.accept(this);
-        }
-      }
-      // Remove the parent aggregate level after visiting its children
-      inOver = prevInOver;
-      return null;
-    }
-
-    private void translateAgg(SqlCall call, SqlNode filter, SqlCall outerCall) {
-      assert bb.agg == this;
-      final List<Integer> args = new ArrayList<>();
-      int filterArg = -1;
-      final List<RelDataType> argTypes =
-          call.getOperator() instanceof SqlCountAggFunction
-              ? new ArrayList<RelDataType>(call.getOperandList().size())
-              : null;
-      try {
-        // switch out of agg mode
-        bb.agg = null;
-        for (SqlNode operand : call.getOperandList()) {
-
-          // special case for COUNT(*):  delete the *
-          if (operand instanceof SqlIdentifier) {
-            SqlIdentifier id = (SqlIdentifier) operand;
-            if (id.isStar()) {
-              assert call.operandCount() == 1;
-              assert args.isEmpty();
-              break;
-            }
-          }
-          RexNode convertedExpr = bb.convertExpression(operand);
-          assert convertedExpr != null;
-          if (argTypes != null) {
-            argTypes.add(convertedExpr.getType());
-          }
-          args.add(lookupOrCreateGroupExpr(convertedExpr));
-        }
-
-        if (filter != null) {
-          RexNode convertedExpr = bb.convertExpression(filter);
-          assert convertedExpr != null;
-          if (convertedExpr.getType().isNullable()) {
-            convertedExpr =
-                rexBuilder.makeCall(SqlStdOperatorTable.IS_TRUE, convertedExpr);
-          }
-          filterArg = lookupOrCreateGroupExpr(convertedExpr);
-        }
-      } finally {
-        // switch back into agg mode
-        bb.agg = this;
-      }
-
-      final SqlAggFunction aggFunction =
-          (SqlAggFunction) call.getOperator();
-      RelDataType type = validator.deriveType(bb.scope, call);
-      boolean distinct = false;
-      SqlLiteral quantifier = call.getFunctionQuantifier();
-      if ((null != quantifier)
-          && (quantifier.getValue() == SqlSelectKeyword.DISTINCT)) {
-        distinct = true;
-      }
-      final AggregateCall aggCall =
-          AggregateCall.create(
-              aggFunction,
-              distinct,
-              args,
-              filterArg,
-              type,
-              nameMap.get(outerCall.toString()));
-      final AggregatingSelectScope.Resolved r =
-          aggregatingSelectScope.resolved.get();
-      RexNode rex =
-          rexBuilder.addAggCall(
-              aggCall,
-              groupExprs.size(),
-              r.indicator,
-              aggCalls,
-              aggCallMapping,
-              argTypes);
-      aggMapping.put(outerCall, rex);
-    }
-
-    /* OVERRIDE POINT */
-    private boolean isSimpleCount(SqlCall call) {
-      if (call.getOperator().isName("COUNT") && call.operandCount() == 1) {
-        final SqlNode parm = call.operand(0);
-        if ((parm instanceof SqlIdentifier || parm instanceof SqlNumericLiteral) //
-                && call.getFunctionQuantifier() == null) {
-          return true;
-        }
-      }
-      return false;
-    }
-
-    private int lookupOrCreateGroupExpr(RexNode expr) {
-      int index = 0;
-      for (RexNode convertedInputExpr : Pair.left(convertedInputExprs)) {
-        if (expr.toString().equals(convertedInputExpr.toString())) {
-          return index;
-        }
-        ++index;
-      }
-
-      // not found -- add it
-      addExpr(expr, null);
-      return index;
-    }
-
-    /**
-     * If an expression is structurally identical to one of the group-by
-     * expressions, returns a reference to the expression, otherwise returns
-     * null.
-     */
-    public int lookupGroupExpr(SqlNode expr) {
-      for (int i = 0; i < groupExprs.size(); i++) {
-        SqlNode groupExpr = groupExprs.get(i);
-        if (expr.equalsDeep(groupExpr, Litmus.IGNORE)) {
-          return i;
-        }
-      }
-      return -1;
-    }
-
-    public RexNode lookupAggregates(SqlCall call) {
-      // assert call.getOperator().isAggregator();
-      assert bb.agg == this;
-
-      switch (call.getKind()) {
-      case GROUPING:
-      case GROUP_ID:
-        final RelDataType type = validator.getValidatedNodeType(call);
-        if (!aggregatingSelectScope.resolved.get().indicator) {
-          return rexBuilder.makeExactLiteral(
-              TWO.pow(effectiveArgCount(call)).subtract(BigDecimal.ONE), type);
-        } else {
-          final List<Integer> operands;
-          switch (call.getKind()) {
-          case GROUP_ID:
-            operands = ImmutableIntList.range(0, groupExprs.size());
-            break;
-          default:
-            operands = Lists.newArrayList();
-            for (SqlNode operand : call.getOperandList()) {
-              final int x = lookupGroupExpr(operand);
-              assert x >= 0;
-              operands.add(x);
-            }
-          }
-          RexNode node = null;
-          int shift = operands.size();
-          for (int operand : operands) {
-            node = bitValue(node, type, operand, --shift);
-          }
-          return node;
-        }
-      }
-
-      for (Map.Entry<SqlNode, Ord<AuxiliaryConverter>> e
-          : auxiliaryGroupExprs.entrySet()) {
-        if (call.equalsDeep(e.getKey(), Litmus.IGNORE)) {
-          AuxiliaryConverter converter = e.getValue().e;
-          final int groupOrdinal = e.getValue().i;
-          return converter.convert(rexBuilder,
-              convertedInputExprs.get(groupOrdinal).left,
-              rexBuilder.makeInputRef(bb.root, groupOrdinal));
-        }
-      }
-
-      return aggMapping.get(call);
-    }
-
-    private int effectiveArgCount(SqlCall call) {
-      switch (call.getKind()) {
-      case GROUPING:
-        return call.operandCount();
-      case GROUP_ID:
-        return groupExprs.size();
-      default:
-        throw new AssertionError(call.getKind());
-      }
-    }
-
-    private RexNode bitValue(RexNode previous, RelDataType type, int x,
-                             int shift) {
-      final AggregatingSelectScope.Resolved r =
-          aggregatingSelectScope.resolved.get();
-      RexNode node = rexBuilder.makeCall(SqlStdOperatorTable.CASE,
-          rexBuilder.makeInputRef(bb.root, r.groupExprList.size() + x),
-          rexBuilder.makeExactLiteral(BigDecimal.ONE, type),
-          rexBuilder.makeExactLiteral(BigDecimal.ZERO, type));
-      if (shift > 0) {
-        node = rexBuilder.makeCall(SqlStdOperatorTable.MULTIPLY, node,
-            rexBuilder.makeExactLiteral(TWO.pow(shift), type));
-      }
-      if (previous != null) {
-        node = rexBuilder.makeCall(SqlStdOperatorTable.PLUS, previous, node);
-      }
-      return node;
-    }
-
-    public List<Pair<RexNode, String>> getPreExprs() {
-      return convertedInputExprs;
-    }
-
-    public List<AggregateCall> getAggCalls() {
-      return aggCalls;
-    }
-
-    public RelDataTypeFactory getTypeFactory() {
-      return typeFactory;
-    }
-  }
-
-  /**
-   * Context to find a relational expression to a field offset.
-   */
-  private static class LookupContext {
-    private final List<Pair<RelNode, Integer>> relOffsetList =
-        new ArrayList<>();
-
-    /**
-     * Creates a LookupContext with multiple input relational expressions.
-     *
-     * @param bb               Context for translating this sub-query
-     * @param rels             Relational expressions
-     * @param systemFieldCount Number of system fields
-     */
-    LookupContext(Blackboard bb, List<RelNode> rels, int systemFieldCount) {
-      bb.flatten(rels, systemFieldCount, new int[]{0}, relOffsetList);
-    }
-
-    /**
-     * Returns the relational expression with a given offset, and the
-     * ordinal in the combined row of its first field.
-     *
-     * <p>For example, in {@code Emp JOIN Dept}, findRel(1) returns the
-     * relational expression for {@code Dept} and offset 6 (because
-     * {@code Emp} has 6 fields, therefore the first field of {@code Dept}
-     * is field 6.
-     *
-     * @param offset Offset of relational expression in FROM clause
-     * @return Relational expression and the ordinal of its first field
-     */
-    Pair<RelNode, Integer> findRel(int offset) {
-      return relOffsetList.get(offset);
-    }
-  }
-
-  /**
-   * Shuttle which walks over a tree of {@link RexNode}s and applies 'over' to
-   * all agg functions.
-   *
-   * <p>This is necessary because the returned expression is not necessarily a
-   * call to an agg function. For example,
-   *
-   * <blockquote><code>AVG(x)</code></blockquote>
-   *
-   * <p>becomes
-   *
-   * <blockquote><code>SUM(x) / COUNT(x)</code></blockquote>
-   *
-   * <p>Any aggregate functions are converted to calls to the internal <code>
-   * $Histogram</code> aggregation function and accessors such as <code>
-   * $HistogramMin</code>; for example,
-   *
-   * <blockquote><code>MIN(x), MAX(x)</code></blockquote>
-   *
-   * <p>are converted to
-   *
-   * <blockquote><code>$HistogramMin($Histogram(x)),
-   * $HistogramMax($Histogram(x))</code></blockquote>
-   *
-   * <p>Common sub-expression elimination will ensure that only one histogram is
-   * computed.
-   */
-  private class HistogramShuttle extends RexShuttle {
-    /**
-     * Whether to convert calls to MIN(x) to HISTOGRAM_MIN(HISTOGRAM(x)).
-     * Histograms allow rolling computation, but require more space.
-     */
-    static final boolean ENABLE_HISTOGRAM_AGG = false;
-
-    private final List<RexNode> partitionKeys;
-    private final ImmutableList<RexFieldCollation> orderKeys;
-    private final RexWindowBound lowerBound;
-    private final RexWindowBound upperBound;
-    private final SqlWindow window;
-    private final boolean distinct;
-
-    HistogramShuttle(
-            List<RexNode> partitionKeys,
-            ImmutableList<RexFieldCollation> orderKeys,
-            RexWindowBound lowerBound, RexWindowBound upperBound,
-            SqlWindow window,
-            boolean distinct) {
-      this.partitionKeys = partitionKeys;
-      this.orderKeys = orderKeys;
-      this.lowerBound = lowerBound;
-      this.upperBound = upperBound;
-      this.window = window;
-      this.distinct = distinct;
-    }
-
-    public RexNode visitCall(RexCall call) {
-      final SqlOperator op = call.getOperator();
-      if (!(op instanceof SqlAggFunction)) {
-        return super.visitCall(call);
-      }
-      final SqlAggFunction aggOp = (SqlAggFunction) op;
-      final RelDataType type = call.getType();
-      List<RexNode> exprs = call.getOperands();
-
-      SqlFunction histogramOp = !ENABLE_HISTOGRAM_AGG
-          ? null
-          : getHistogramOp(aggOp);
-
-      if (histogramOp != null) {
-        final RelDataType histogramType = computeHistogramType(type);
-
-        // For DECIMAL, since it's already represented as a bigint we
-        // want to do a reinterpretCast instead of a cast to avoid
-        // losing any precision.
-        boolean reinterpretCast =
-            type.getSqlTypeName() == SqlTypeName.DECIMAL;
-
-        // Replace original expression with CAST of not one
-        // of the supported types
-        if (histogramType != type) {
-          exprs = new ArrayList<>(exprs);
-          exprs.set(
-              0,
-              reinterpretCast
-              ? rexBuilder.makeReinterpretCast(histogramType, exprs.get(0),
-                  rexBuilder.makeLiteral(false))
-              : rexBuilder.makeCast(histogramType, exprs.get(0)));
-        }
-
-        RexCallBinding bind =
-            new RexCallBinding(
-                rexBuilder.getTypeFactory(),
-                SqlStdOperatorTable.HISTOGRAM_AGG,
-                exprs,
-                ImmutableList.<RelCollation>of());
-
-        RexNode over =
-            rexBuilder.makeOver(
-                SqlStdOperatorTable.HISTOGRAM_AGG
-                    .inferReturnType(bind),
-                SqlStdOperatorTable.HISTOGRAM_AGG,
-                exprs,
-                partitionKeys,
-                orderKeys,
-                lowerBound,
-                upperBound,
-                window.isRows(),
-                window.isAllowPartial(),
-                false,
-                distinct);
-
-        RexNode histogramCall =
-            rexBuilder.makeCall(
-                histogramType,
-                histogramOp,
-                ImmutableList.of(over));
-
-        // If needed, post Cast result back to original
-        // type.
-        if (histogramType != type) {
-          if (reinterpretCast) {
-            histogramCall =
-                rexBuilder.makeReinterpretCast(
-                    type,
-                    histogramCall,
-                    rexBuilder.makeLiteral(false));
-          } else {
-            histogramCall =
-                rexBuilder.makeCast(type, histogramCall);
-          }
-        }
-
-        return histogramCall;
-      } else {
-        boolean needSum0 = aggOp == SqlStdOperatorTable.SUM
-            && type.isNullable();
-        SqlAggFunction aggOpToUse =
-            needSum0 ? SqlStdOperatorTable.SUM0
-                : aggOp;
-        return rexBuilder.makeOver(
-            type,
-            aggOpToUse,
-            exprs,
-            partitionKeys,
-            orderKeys,
-            lowerBound,
-            upperBound,
-            window.isRows(),
-            window.isAllowPartial(),
-            needSum0,
-            distinct);
-      }
-    }
-
-    /**
-     * Returns the histogram operator corresponding to a given aggregate
-     * function.
-     *
-     * <p>For example, <code>getHistogramOp
-     *({@link SqlStdOperatorTable#MIN}}</code> returns
-     * {@link SqlStdOperatorTable#HISTOGRAM_MIN}.
-     *
-     * @param aggFunction An aggregate function
-     * @return Its histogram function, or null
-     */
-    SqlFunction getHistogramOp(SqlAggFunction aggFunction) {
-      if (aggFunction == SqlStdOperatorTable.MIN) {
-        return SqlStdOperatorTable.HISTOGRAM_MIN;
-      } else if (aggFunction == SqlStdOperatorTable.MAX) {
-        return SqlStdOperatorTable.HISTOGRAM_MAX;
-      } else if (aggFunction == SqlStdOperatorTable.FIRST_VALUE) {
-        return SqlStdOperatorTable.HISTOGRAM_FIRST_VALUE;
-      } else if (aggFunction == SqlStdOperatorTable.LAST_VALUE) {
-        return SqlStdOperatorTable.HISTOGRAM_LAST_VALUE;
-      } else {
-        return null;
-      }
-    }
-
-    /**
-     * Returns the type for a histogram function. It is either the actual
-     * type or an an approximation to it.
-     */
-    private RelDataType computeHistogramType(RelDataType type) {
-      if (SqlTypeUtil.isExactNumeric(type)
-          && type.getSqlTypeName() != SqlTypeName.BIGINT) {
-        return typeFactory.createSqlType(SqlTypeName.BIGINT);
-      } else if (SqlTypeUtil.isApproximateNumeric(type)
-          && type.getSqlTypeName() != SqlTypeName.DOUBLE) {
-        return typeFactory.createSqlType(SqlTypeName.DOUBLE);
-      } else {
-        return type;
-      }
-    }
-  }
-
-  /** A sub-query, whether it needs to be translated using 2- or 3-valued
-   * logic. */
-  private static class SubQuery {
-    final SqlNode node;
-    final RelOptUtil.Logic logic;
-    RexNode expr;
-
-    private SubQuery(SqlNode node, RelOptUtil.Logic logic) {
-      this.node = node;
-      this.logic = logic;
-    }
-  }
-
-  /**
-   * Visitor that collects all aggregate functions in a {@link SqlNode} tree.
-   */
-  private static class AggregateFinder extends SqlBasicVisitor<Void> {
-    final SqlNodeList list = new SqlNodeList(SqlParserPos.ZERO);
-
-    @Override public Void visit(SqlCall call) {
-      // ignore window aggregates and ranking functions (associated with OVER operator)
-      if (call.getOperator().getKind() == SqlKind.OVER) {
-        return null;
-      }
-      if (call.getOperator().isAggregator()) {
-        list.add(call);
-        return null;
-      }
-
-      // Don't traverse into sub-queries, even if they contain aggregate
-      // functions.
-      if (call instanceof SqlSelect) {
-        return null;
-      }
-
-      return call.getOperator().acceptCall(this, call);
-    }
-  }
-
-  /** Use of a row as a correlating variable by a given relational
-   * expression. */
-  private static class CorrelationUse {
-    private final CorrelationId id;
-    private final ImmutableBitSet requiredColumns;
-    private final RelNode r;
-
-    CorrelationUse(CorrelationId id, ImmutableBitSet requiredColumns,
-                   RelNode r) {
-      this.id = id;
-      this.requiredColumns = requiredColumns;
-      this.r = r;
-    }
-  }
-
-  /** Creates a builder for a {@link Config}. */
-  public static ConfigBuilder configBuilder() {
-    return new ConfigBuilder();
-  }
-
-  /**
-   * Interface to define the configuration for a SqlToRelConverter.
-   * Provides methods to set each configuration option.
-   *
-   * @see ConfigBuilder
-   * @see SqlToRelConverter#configBuilder()
-   */
-  public interface Config {
-    /** Default configuration. */
-    Config DEFAULT = configBuilder().build();
-
-    /** Returns the {@code convertTableAccess} option. Controls whether table
-     * access references are converted to physical rels immediately. The
-     * optimizer doesn't like leaf rels to have {@link Convention#NONE}.
-     * However, if we are doing further conversion passes (e.g.
-     * {@link RelStructuredTypeFlattener}), then we may need to defer
-     * conversion. */
-    boolean isConvertTableAccess();
-
-    /** Returns the {@code decorrelationEnabled} option. Controls whether to
-     * disable sub-query decorrelation when needed. e.g. if outer joins are not
-     * supported. */
-    boolean isDecorrelationEnabled();
-
-    /** Returns the {@code trimUnusedFields} option. Controls whether to trim
-     * unused fields as part of the conversion process. */
-    boolean isTrimUnusedFields();
-
-    /** Returns the {@code createValuesRel} option. Controls whether instances
-     * of {@link org.apache.calcite.rel.logical.LogicalValues} are generated.
-     * These may not be supported by all physical implementations. */
-    boolean isCreateValuesRel();
-
-    /** Returns the {@code explain} option. Describes whether the current
-     * statement is part of an EXPLAIN PLAN statement. */
-    boolean isExplain();
-
-    /** Returns the {@code expand} option. Controls whether to expand
-     * sub-queries. If false, each sub-query becomes a
-     * {@link org.apache.calcite.rex.RexSubQuery}. */
-    boolean isExpand();
-
-    /** Returns the {@code inSubQueryThreshold} option,
-     * default {@link #DEFAULT_IN_SUB_QUERY_THRESHOLD}. Controls the list size
-     * threshold under which {@link #convertInToOr} is used. Lists of this size
-     * or greater will instead be converted to use a join against an inline
-     * table ({@link org.apache.calcite.rel.logical.LogicalValues}) rather than
-     * a predicate. A threshold of 0 forces usage of an inline table in all
-     * cases; a threshold of {@link Integer#MAX_VALUE} forces usage of OR in all
-     * cases. */
-    int getInSubQueryThreshold();
-  }
-
-  /** Builder for a {@link Config}. */
-  public static class ConfigBuilder {
-    private boolean convertTableAccess = true;
-    private boolean decorrelationEnabled = true;
-    private boolean trimUnusedFields = false;
-    private boolean createValuesRel = true;
-    private boolean explain;
-    private boolean expand = true;
-    private int inSubQueryThreshold = DEFAULT_IN_SUB_QUERY_THRESHOLD;
-
-    private ConfigBuilder() {}
-
-    /** Sets configuration identical to a given {@link Config}. */
-    public ConfigBuilder withConfig(Config config) {
-      this.convertTableAccess = config.isConvertTableAccess();
-      this.decorrelationEnabled = config.isDecorrelationEnabled();
-      this.trimUnusedFields = config.isTrimUnusedFields();
-      this.createValuesRel = config.isCreateValuesRel();
-      this.explain = config.isExplain();
-      this.expand = config.isExpand();
-      this.inSubQueryThreshold = config.getInSubQueryThreshold();
-      return this;
-    }
-
-    public ConfigBuilder withConvertTableAccess(boolean convertTableAccess) {
-      this.convertTableAccess = convertTableAccess;
-      return this;
-    }
-
-    public ConfigBuilder withDecorrelationEnabled(boolean enabled) {
-      this.decorrelationEnabled = enabled;
-      return this;
-    }
-
-    public ConfigBuilder withTrimUnusedFields(boolean trimUnusedFields) {
-      this.trimUnusedFields = trimUnusedFields;
-      return this;
-    }
-
-    public ConfigBuilder withCreateValuesRel(boolean createValuesRel) {
-      this.createValuesRel = createValuesRel;
-      return this;
-    }
-
-    public ConfigBuilder withExplain(boolean explain) {
-      this.explain = explain;
-      return this;
-    }
-
-    public ConfigBuilder withExpand(boolean expand) {
-      this.expand = expand;
-      return this;
-    }
-
-    @Deprecated // to be removed before 2.0
-    public ConfigBuilder withInSubqueryThreshold(int inSubQueryThreshold) {
-      return withInSubQueryThreshold(inSubQueryThreshold);
-    }
-
-    public ConfigBuilder withInSubQueryThreshold(int inSubQueryThreshold) {
-      this.inSubQueryThreshold = inSubQueryThreshold;
-      return this;
-    }
-
-    /** Builds a {@link Config}. */
-    public Config build() {
-      return new ConfigImpl(convertTableAccess, decorrelationEnabled,
-          trimUnusedFields, createValuesRel, explain, expand,
-          inSubQueryThreshold);
-    }
-  }
-
-  /** Implementation of {@link Config}.
-   * Called by builder; all values are in private final fields. */
-  private static class ConfigImpl implements Config {
-    private final boolean convertTableAccess;
-    private final boolean decorrelationEnabled;
-    private final boolean trimUnusedFields;
-    private final boolean createValuesRel;
-    private final boolean explain;
-    private final int inSubQueryThreshold;
-    private final boolean expand;
-
-    private ConfigImpl(boolean convertTableAccess, boolean decorrelationEnabled,
-        boolean trimUnusedFields, boolean createValuesRel, boolean explain,
-        boolean expand, int inSubQueryThreshold) {
-      this.convertTableAccess = convertTableAccess;
-      this.decorrelationEnabled = decorrelationEnabled;
-      this.trimUnusedFields = trimUnusedFields;
-      this.createValuesRel = createValuesRel;
-      this.explain = explain;
-      this.expand = expand;
-      this.inSubQueryThreshold = inSubQueryThreshold;
-    }
-
-    public boolean isConvertTableAccess() {
-      return convertTableAccess;
-    }
-
-    public boolean isDecorrelationEnabled() {
-      return decorrelationEnabled;
-    }
-
-    public boolean isTrimUnusedFields() {
-      return trimUnusedFields;
-    }
-
-    public boolean isCreateValuesRel() {
-      return createValuesRel;
-    }
-
-    public boolean isExplain() {
-      return explain;
-    }
-
-    public boolean isExpand() {
-      return expand;
-    }
-
-    public int getInSubQueryThreshold() {
-      return inSubQueryThreshold;
-    }
-  }
-}
-
-// End SqlToRelConverter.java
diff --git a/atopcalcite/src/main/java/org/apache/calcite/tools/Programs.java b/atopcalcite/src/main/java/org/apache/calcite/tools/Programs.java
deleted file mode 100644
index 663a82cdcd..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/tools/Programs.java
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
-package org.apache.calcite.tools;
-
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.calcite.adapter.enumerable.EnumerableRules;
-import org.apache.calcite.config.CalciteConnectionConfig;
-import org.apache.calcite.interpreter.NoneToBindableConverterRule;
-import org.apache.calcite.plan.RelOptCostImpl;
-import org.apache.calcite.plan.RelOptLattice;
-import org.apache.calcite.plan.RelOptMaterialization;
-import org.apache.calcite.plan.RelOptPlanner;
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.plan.RelTraitSet;
-import org.apache.calcite.plan.hep.HepMatchOrder;
-import org.apache.calcite.plan.hep.HepPlanner;
-import org.apache.calcite.plan.hep.HepProgram;
-import org.apache.calcite.plan.hep.HepProgramBuilder;
-import org.apache.calcite.prepare.CalcitePrepareImpl;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Calc;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
-import org.apache.calcite.rel.metadata.DefaultRelMetadataProvider;
-import org.apache.calcite.rel.metadata.RelMetadataProvider;
-import org.apache.calcite.rel.rules.AggregateExpandDistinctAggregatesRule;
-import org.apache.calcite.rel.rules.AggregateReduceFunctionsRule;
-import org.apache.calcite.rel.rules.AggregateStarTableRule;
-import org.apache.calcite.rel.rules.CalcMergeRule;
-import org.apache.calcite.rel.rules.FilterAggregateTransposeRule;
-import org.apache.calcite.rel.rules.FilterCalcMergeRule;
-import org.apache.calcite.rel.rules.FilterJoinRule;
-import org.apache.calcite.rel.rules.FilterProjectTransposeRule;
-import org.apache.calcite.rel.rules.FilterTableScanRule;
-import org.apache.calcite.rel.rules.FilterToCalcRule;
-import org.apache.calcite.rel.rules.JoinAssociateRule;
-import org.apache.calcite.rel.rules.JoinCommuteRule;
-import org.apache.calcite.rel.rules.JoinPushThroughJoinRule;
-import org.apache.calcite.rel.rules.JoinToMultiJoinRule;
-import org.apache.calcite.rel.rules.LoptOptimizeJoinRule;
-import org.apache.calcite.rel.rules.MultiJoinOptimizeBushyRule;
-import org.apache.calcite.rel.rules.OLAPJoinPushThroughJoinRule;
-import org.apache.calcite.rel.rules.OLAPJoinPushThroughJoinRule2;
-import org.apache.calcite.rel.rules.ProjectCalcMergeRule;
-import org.apache.calcite.rel.rules.ProjectMergeRule;
-import org.apache.calcite.rel.rules.ProjectToCalcRule;
-import org.apache.calcite.rel.rules.SemiJoinRule;
-import org.apache.calcite.rel.rules.SortProjectTransposeRule;
-import org.apache.calcite.rel.rules.SubQueryRemoveRule;
-import org.apache.calcite.rel.rules.TableScanRule;
-import org.apache.calcite.sql2rel.RelDecorrelator;
-import org.apache.calcite.sql2rel.RelFieldTrimmer;
-import org.apache.calcite.sql2rel.SqlToRelConverter;
-
-import com.google.common.base.Function;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Lists;
-
-/*
- * The code has synced with calcite. Hope one day, we could remove the hardcode override point.
- * OVERRIDE POINT:
- * - add OLAPJoinPushThroughJoinRule OLAPJoinPushThroughJoinRule2 to org.apache.calcite.tools.Programs#subQuery
- */
-
-/**
- * Utilities for creating {@link Program}s.
- */
-public class Programs {
-  private static final Function<RuleSet, Program> RULE_SET_TO_PROGRAM =
-      new Function<RuleSet, Program>() {
-        public Program apply(RuleSet ruleSet) {
-          return of(ruleSet);
-        }
-      };
-
-  public static final ImmutableList<RelOptRule> CALC_RULES =
-      ImmutableList.of(
-          NoneToBindableConverterRule.INSTANCE,
-          EnumerableRules.ENUMERABLE_CALC_RULE,
-          EnumerableRules.ENUMERABLE_FILTER_TO_CALC_RULE,
-          EnumerableRules.ENUMERABLE_PROJECT_TO_CALC_RULE,
-          CalcMergeRule.INSTANCE,
-          FilterCalcMergeRule.INSTANCE,
-          ProjectCalcMergeRule.INSTANCE,
-          FilterToCalcRule.INSTANCE,
-          ProjectToCalcRule.INSTANCE,
-          CalcMergeRule.INSTANCE,
-
-          // REVIEW jvs 9-Apr-2006: Do we still need these two?  Doesn't the
-          // combination of CalcMergeRule, FilterToCalcRule, and
-          // ProjectToCalcRule have the same effect?
-          FilterCalcMergeRule.INSTANCE,
-          ProjectCalcMergeRule.INSTANCE);
-
-  /** Program that converts filters and projects to {@link Calc}s. */
-  public static final Program CALC_PROGRAM =
-      calc(DefaultRelMetadataProvider.INSTANCE);
-
-  /** Program that expands sub-queries. */
-  public static final Program SUB_QUERY_PROGRAM =
-      subQuery(DefaultRelMetadataProvider.INSTANCE);
-
-  public static final ImmutableSet<RelOptRule> RULE_SET =
-      ImmutableSet.of(
-          EnumerableRules.ENUMERABLE_JOIN_RULE,
-          EnumerableRules.ENUMERABLE_MERGE_JOIN_RULE,
-          EnumerableRules.ENUMERABLE_SEMI_JOIN_RULE,
-          EnumerableRules.ENUMERABLE_CORRELATE_RULE,
-          EnumerableRules.ENUMERABLE_PROJECT_RULE,
-          EnumerableRules.ENUMERABLE_FILTER_RULE,
-          EnumerableRules.ENUMERABLE_AGGREGATE_RULE,
-          EnumerableRules.ENUMERABLE_SORT_RULE,
-          EnumerableRules.ENUMERABLE_LIMIT_RULE,
-          EnumerableRules.ENUMERABLE_UNION_RULE,
-          EnumerableRules.ENUMERABLE_INTERSECT_RULE,
-          EnumerableRules.ENUMERABLE_MINUS_RULE,
-          EnumerableRules.ENUMERABLE_TABLE_MODIFICATION_RULE,
-          EnumerableRules.ENUMERABLE_VALUES_RULE,
-          EnumerableRules.ENUMERABLE_WINDOW_RULE,
-          SemiJoinRule.PROJECT,
-          SemiJoinRule.JOIN,
-          TableScanRule.INSTANCE,
-          CalcitePrepareImpl.COMMUTE
-              ? JoinAssociateRule.INSTANCE
-              : ProjectMergeRule.INSTANCE,
-          AggregateStarTableRule.INSTANCE,
-          AggregateStarTableRule.INSTANCE2,
-          FilterTableScanRule.INSTANCE,
-          FilterProjectTransposeRule.INSTANCE,
-          FilterJoinRule.FILTER_ON_JOIN,
-          AggregateExpandDistinctAggregatesRule.INSTANCE,
-          AggregateReduceFunctionsRule.INSTANCE,
-          FilterAggregateTransposeRule.INSTANCE,
-          JoinCommuteRule.INSTANCE,
-          JoinPushThroughJoinRule.RIGHT,
-          JoinPushThroughJoinRule.LEFT,
-          SortProjectTransposeRule.INSTANCE);
-
-  // private constructor for utility class
-  private Programs() {}
-
-  /** Creates a program that executes a rule set. */
-  public static Program of(RuleSet ruleSet) {
-    return new RuleSetProgram(ruleSet);
-  }
-
-  /** Creates a list of programs based on an array of rule sets. */
-  public static List<Program> listOf(RuleSet... ruleSets) {
-    return Lists.transform(Arrays.asList(ruleSets), RULE_SET_TO_PROGRAM);
-  }
-
-  /** Creates a list of programs based on a list of rule sets. */
-  public static List<Program> listOf(List<RuleSet> ruleSets) {
-    return Lists.transform(ruleSets, RULE_SET_TO_PROGRAM);
-  }
-
-  /** Creates a program from a list of rules. */
-  public static Program ofRules(RelOptRule... rules) {
-    return of(RuleSets.ofList(rules));
-  }
-
-  /** Creates a program from a list of rules. */
-  public static Program ofRules(Iterable<? extends RelOptRule> rules) {
-    return of(RuleSets.ofList(rules));
-  }
-
-  /** Creates a program that executes a sequence of programs. */
-  public static Program sequence(Program... programs) {
-    return new SequenceProgram(ImmutableList.copyOf(programs));
-  }
-
-  /** Creates a program that executes a list of rules in a HEP planner. */
-  public static Program hep(Iterable<? extends RelOptRule> rules,
-                            boolean noDag, RelMetadataProvider metadataProvider) {
-    final HepProgramBuilder builder = HepProgram.builder();
-    for (RelOptRule rule : rules) {
-      builder.addRuleInstance(rule);
-    }
-    return of(builder.build(), noDag, metadataProvider);
-  }
-
-  /** Creates a program that executes a {@link HepProgram}. */
-  public static Program of(final HepProgram hepProgram, final boolean noDag,
-                           final RelMetadataProvider metadataProvider) {
-    return new Program() {
-      public RelNode run(RelOptPlanner planner, RelNode rel,
-                         RelTraitSet requiredOutputTraits,
-                         List<RelOptMaterialization> materializations,
-                         List<RelOptLattice> lattices) {
-        final HepPlanner hepPlanner = new HepPlanner(hepProgram,
-            null, noDag, null, RelOptCostImpl.FACTORY);
-
-        List<RelMetadataProvider> list = Lists.newArrayList();
-        if (metadataProvider != null) {
-          list.add(metadataProvider);
-        }
-        hepPlanner.registerMetadataProviders(list);
-        RelMetadataProvider plannerChain =
-            ChainedRelMetadataProvider.of(list);
-        rel.getCluster().setMetadataProvider(plannerChain);
-
-        hepPlanner.setRoot(rel);
-        return hepPlanner.findBestExp();
-      }
-    };
-  }
-
-  /** Creates a program that invokes heuristic join-order optimization
-   * (via {@link org.apache.calcite.rel.rules.JoinToMultiJoinRule},
-   * {@link org.apache.calcite.rel.rules.MultiJoin} and
-   * {@link org.apache.calcite.rel.rules.LoptOptimizeJoinRule})
-   * if there are 6 or more joins (7 or more relations). */
-  public static Program heuristicJoinOrder(
-      final Iterable<? extends RelOptRule> rules,
-      final boolean bushy, final int minJoinCount) {
-    return new Program() {
-      public RelNode run(RelOptPlanner planner, RelNode rel,
-                         RelTraitSet requiredOutputTraits,
-                         List<RelOptMaterialization> materializations,
-                         List<RelOptLattice> lattices) {
-        final int joinCount = RelOptUtil.countJoins(rel);
-        final Program program;
-        if (joinCount < minJoinCount) {
-          program = ofRules(rules);
-        } else {
-          // Create a program that gathers together joins as a MultiJoin.
-          final HepProgram hep = new HepProgramBuilder()
-              .addRuleInstance(FilterJoinRule.FILTER_ON_JOIN)
-              .addMatchOrder(HepMatchOrder.BOTTOM_UP)
-              .addRuleInstance(JoinToMultiJoinRule.INSTANCE)
-              .build();
-          final Program program1 =
-              of(hep, false, DefaultRelMetadataProvider.INSTANCE);
-
-          // Create a program that contains a rule to expand a MultiJoin
-          // into heuristically ordered joins.
-          // We use the rule set passed in, but remove JoinCommuteRule and
-          // JoinPushThroughJoinRule, because they cause exhaustive search.
-          final List<RelOptRule> list = Lists.newArrayList(rules);
-          list.removeAll(
-              ImmutableList.of(JoinCommuteRule.INSTANCE,
-                  JoinAssociateRule.INSTANCE,
-                  JoinPushThroughJoinRule.LEFT,
-                  JoinPushThroughJoinRule.RIGHT));
-          list.add(bushy
-              ? MultiJoinOptimizeBushyRule.INSTANCE
-              : LoptOptimizeJoinRule.INSTANCE);
-          final Program program2 = ofRules(list);
-
-          program = sequence(program1, program2);
-        }
-        return program.run(
-            planner, rel, requiredOutputTraits, materializations, lattices);
-      }
-    };
-  }
-
-  public static Program calc(RelMetadataProvider metadataProvider) {
-    return hep(CALC_RULES, true, metadataProvider);
-  }
-
-  @Deprecated // to be removed before 2.0
-  public static Program subquery(RelMetadataProvider metadataProvider) {
-    return subQuery(metadataProvider);
-  }
-
-  public static Program subQuery(RelMetadataProvider metadataProvider) {
-    return hep(
-        ImmutableList.of((RelOptRule) SubQueryRemoveRule.FILTER,
-            SubQueryRemoveRule.PROJECT,
-            SubQueryRemoveRule.JOIN, OLAPJoinPushThroughJoinRule.INSTANCE,
-            OLAPJoinPushThroughJoinRule2.INSTANCE), true, metadataProvider);
-  }
-
-  public static Program getProgram() {
-    return new Program() {
-      public RelNode run(RelOptPlanner planner, RelNode rel,
-                         RelTraitSet requiredOutputTraits,
-                         List<RelOptMaterialization> materializations,
-                         List<RelOptLattice> lattices) {
-        return null;
-      }
-    };
-  }
-
-  /** Returns the standard program used by Prepare. */
-  public static Program standard() {
-    return standard(DefaultRelMetadataProvider.INSTANCE);
-  }
-
-  /** Returns the standard program with user metadata provider. */
-  public static Program standard(RelMetadataProvider metadataProvider) {
-
-    final Program program1 =
-        new Program() {
-          public RelNode run(RelOptPlanner planner, RelNode rel,
-                             RelTraitSet requiredOutputTraits,
-                             List<RelOptMaterialization> materializations,
-                             List<RelOptLattice> lattices) {
-            planner.setRoot(rel);
-
-            for (RelOptMaterialization materialization : materializations) {
-              planner.addMaterialization(materialization);
-            }
-            for (RelOptLattice lattice : lattices) {
-              planner.addLattice(lattice);
-            }
-
-            final RelNode rootRel2 =
-                rel.getTraitSet().equals(requiredOutputTraits)
-                ? rel
-                : planner.changeTraits(rel, requiredOutputTraits);
-            assert rootRel2 != null;
-
-            planner.setRoot(rootRel2);
-            final RelOptPlanner planner2 = planner.chooseDelegate();
-            final RelNode rootRel3 = planner2.findBestExp();
-            assert rootRel3 != null : "could not implement exp";
-            return rootRel3;
-          }
-        };
-
-    return sequence(subQuery(metadataProvider),
-        new DecorrelateProgram(),
-        new TrimFieldsProgram(),
-        program1,
-
-        // Second planner pass to do physical "tweaks". This the first time that
-        // EnumerableCalcRel is introduced.
-        calc(metadataProvider));
-  }
-
-  /** Program backed by a {@link RuleSet}. */
-  static class RuleSetProgram implements Program {
-    final RuleSet ruleSet;
-
-    private RuleSetProgram(RuleSet ruleSet) {
-      this.ruleSet = ruleSet;
-    }
-
-    public RelNode run(RelOptPlanner planner, RelNode rel,
-                       RelTraitSet requiredOutputTraits,
-                       List<RelOptMaterialization> materializations,
-                       List<RelOptLattice> lattices) {
-      planner.clear();
-      for (RelOptRule rule : ruleSet) {
-        planner.addRule(rule);
-      }
-      for (RelOptMaterialization materialization : materializations) {
-        planner.addMaterialization(materialization);
-      }
-      for (RelOptLattice lattice : lattices) {
-        planner.addLattice(lattice);
-      }
-      if (!rel.getTraitSet().equals(requiredOutputTraits)) {
-        rel = planner.changeTraits(rel, requiredOutputTraits);
-      }
-      planner.setRoot(rel);
-      return planner.findBestExp();
-
-    }
-  }
-
-  /** Program that runs sub-programs, sending the output of the previous as
-   * input to the next. */
-  private static class SequenceProgram implements Program {
-    private final ImmutableList<Program> programs;
-
-    SequenceProgram(ImmutableList<Program> programs) {
-      this.programs = programs;
-    }
-
-    public RelNode run(RelOptPlanner planner, RelNode rel,
-                       RelTraitSet requiredOutputTraits,
-                       List<RelOptMaterialization> materializations,
-                       List<RelOptLattice> lattices) {
-      for (Program program : programs) {
-        rel = program.run(
-            planner, rel, requiredOutputTraits, materializations, lattices);
-      }
-      return rel;
-    }
-  }
-
-  /** Program that de-correlates a query.
-   *
-   * <p>To work around
-   * <a href="https://issues.apache.org/jira/browse/CALCITE-842">[CALCITE-842]
-   * Decorrelator gets field offsets confused if fields have been trimmed</a>,
-   * disable field-trimming in {@link SqlToRelConverter}, and run
-   * {@link TrimFieldsProgram} after this program. */
-  private static class DecorrelateProgram implements Program {
-    public RelNode run(RelOptPlanner planner, RelNode rel,
-                       RelTraitSet requiredOutputTraits,
-                       List<RelOptMaterialization> materializations,
-                       List<RelOptLattice> lattices) {
-      final CalciteConnectionConfig config =
-          planner.getContext().unwrap(CalciteConnectionConfig.class);
-      if (config != null && config.forceDecorrelate()) {
-        return RelDecorrelator.decorrelateQuery(rel);
-      }
-      return rel;
-    }
-  }
-
-  /** Program that trims fields. */
-  private static class TrimFieldsProgram implements Program {
-    public RelNode run(RelOptPlanner planner, RelNode rel,
-                       RelTraitSet requiredOutputTraits,
-                       List<RelOptMaterialization> materializations,
-                       List<RelOptLattice> lattices) {
-      final RelBuilder relBuilder =
-          RelFactories.LOGICAL_BUILDER.create(rel.getCluster(), null);
-      return new RelFieldTrimmer(null, relBuilder).trim(rel);
-    }
-  }
-}
-
-// End Programs.java
diff --git a/atopcalcite/src/main/java/org/apache/calcite/tools/RelUtils.java b/atopcalcite/src/main/java/org/apache/calcite/tools/RelUtils.java
deleted file mode 100644
index 68ad5cd1a4..0000000000
--- a/atopcalcite/src/main/java/org/apache/calcite/tools/RelUtils.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.calcite.tools;
-
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rex.RexCall;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexVisitor;
-import org.apache.calcite.rex.RexVisitorImpl;
-import org.apache.calcite.sql.SqlOperator;
-
-import com.google.common.collect.Lists;
-
-public class RelUtils {
-    public static boolean findOLAPRel(RelNode rel) {
-        Class aClass;
-        try {
-            aClass = Thread.currentThread().getContextClassLoader().loadClass("org.apache.kylin.query.relnode.OLAPRel");
-        } catch (ClassNotFoundException e) {
-            return false;
-        }
-        return findRel(rel, Lists.newArrayList(aClass)) != null;
-    }
-
-    private static RelNode findRel(RelNode rel, List<Class> candidate) {
-        for (Class clazz : candidate) {
-            if (clazz.isInstance(rel)) {
-                return rel;
-            }
-        }
-
-        if (rel.getInputs().size() < 1) {
-            return null;
-        }
-
-        return findRel(rel.getInput(0), candidate);
-    }
-
-    public static int countOperatorCall(final SqlOperator operator, RexNode node) {
-        final AtomicInteger atomicInteger = new AtomicInteger(0);
-        RexVisitor<Void> visitor = new RexVisitorImpl<Void>(true) {
-            public Void visitCall(RexCall call) {
-                if (call.getOperator().equals(operator)) {
-                    atomicInteger.incrementAndGet();
-                }
-                return super.visitCall(call);
-            }
-        };
-        node.accept(visitor);
-        return atomicInteger.get();
-    }
-}
diff --git a/build/bin/check-env.sh b/build/bin/check-env.sh
index cd745d4526..5b14e0af7d 100755
--- a/build/bin/check-env.sh
+++ b/build/bin/check-env.sh
@@ -80,4 +80,7 @@ then
     then
         quit "Failed to create $SPARK_HISTORYLOG_DIR. Please make sure the user has right to access $SPARK_HISTORYLOG_DIR"
     fi
-fi
\ No newline at end of file
+fi
+
+${KYLIN_HOME}/bin/check-port-availability.sh ||  exit 1;
+
diff --git a/build/bin/check-port-availability.sh b/build/bin/check-port-availability.sh
new file mode 100644
index 0000000000..023fdef3b9
--- /dev/null
+++ b/build/bin/check-port-availability.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+source $(cd -P -- "$(dirname -- "$0")" && pwd -P)/header.sh
+
+
+# get port from configuraton
+kylin_port=`grep "<Connector port=" ${KYLIN_HOME}/tomcat/conf/server.xml |grep protocol=\"HTTP/1.1\" | cut -d '=' -f 2 | cut -d \" -f 2`
+
+# check the availability of the port
+kylin_port_in_use=`netstat -tlpn | grep "\b${kylin_port}\b"`
+
+# if not available, prompt error messeage
+[[ -z ${kylin_port_in_use} ]] || quit "ERROR: Port ${kylin_port} is in use, please check the availability of the port and re-start Kylin"
diff --git a/build/bin/find-hive-dependency.sh b/build/bin/find-hive-dependency.sh
index a8793ebb3c..558c9b583c 100755
--- a/build/bin/find-hive-dependency.sh
+++ b/build/bin/find-hive-dependency.sh
@@ -37,6 +37,17 @@ else
     hive_env=`hive ${hive_conf_properties} -e set 2>&1 | grep 'env:CLASSPATH'`
 fi
 
+if [ -z $hive_env ]
+then
+    hive_permission=`hive ${hive_conf_properties} -e set 2>&1 | grep 'No valid credentials provided'`
+    if [ -n "$hive_permission" ]
+    then
+        quit "No valid credentials provided for Hive CLI, please check permission of hive. (e.g. check if Kerberos is expired or not)"
+    else
+        quit "Something wrong with Hive CLI or Beeline, please execute Hive CLI or Beeline CLI in terminal to find the root cause."
+    fi
+fi
+
 hive_classpath=`echo $hive_env | grep 'env:CLASSPATH' | awk -F '=' '{print $2}'`
 arr=(`echo $hive_classpath | cut -d ":" -f 1- | sed 's/:/ /g'`)
 hive_conf_path=
@@ -73,6 +84,11 @@ then
     quit "Couldn't find hive configuration directory. Please set HIVE_CONF to the path which contains hive-site.xml."
 fi
 
+if [ -z "$hive_exec_path" ]
+then
+    quit "Couldn't find hive executable jar. Please check if hive executable jar exists in HIVE_LIB folder."
+fi
+
 # in some versions of hive hcatalog is not in hive's classpath, find it separately
 if [ -z "$HCAT_HOME" ]
 then
@@ -89,7 +105,7 @@ then
     elif [ -n is_aws ] && [ -d "/usr/lib/hive-hcatalog" ]; then
       # special handling for Amazon EMR
       hcatalog_home=/usr/lib/hive-hcatalog
-    else 
+    else
       quit "Couldn't locate hcatalog installation, please make sure it is installed and set HCAT_HOME to the path."
     fi
 else
@@ -106,7 +122,27 @@ fi
 
 function checkFileExist()
 {
-    files=(`echo $1 | cut -d ":" -f 1- | sed 's/:/ /g'`)
+    msg_hint=""
+    if [ "$1" == "hive_lib" ]
+    then
+        msg_hint=", please check jar files in current HIVE_LIB or export HIVE_LIB='YOUR_LOCAL_HIVE_LIB'"
+    elif [ "$1" == "hcatalog" ]
+    then
+        msg_hint=", please check jar files in current HCAT_HOME or export HCAT_HOME='YOUR_LOCAL_HCAT_HOME'"
+    fi
+
+    if [ -z "$2" ]
+    then
+        if [ "$1" == "hive_lib" ]
+        then
+            quit "Current HIVE_LIB is not valid, please export HIVE_LIB='YOUR_LOCAL_HIVE_LIB'"
+        elif [ "$1" == "hcatalog" ]
+        then
+            quit "Current HCAT_HOME is not valid, please export HCAT_HOME='YOUR_LOCAL_HCAT_HOME'"
+        fi
+    fi
+
+    files=(`echo $2 | cut -d ":" -f 1- | sed 's/:/ /g'`)
     misFiles=0
     outputMissFiles=
     for file in ${files}
@@ -119,7 +155,7 @@ function checkFileExist()
     done
     if [ 0 != ${misFiles} ]; then
         times=`expr ${allFiles} / ${misFiles}`
-        [[ ${times} -gt 10 ]] || quit "A couple of hive jars can't be found: ${outputMisFiles}, please export HIVE_LIB='YOUR_LOCAL_HIVE_LIB'"
+        [[ ${times} -gt 10 ]] || quit "A couple of hive jars can't be found: ${outputMisFiles}${msg_hint}"
     fi
 }
 
@@ -146,15 +182,20 @@ function validateDirectory()
 if [ -z "$HIVE_LIB" ]
 then
     verbose "HIVE_LIB is not set, try to retrieve hive lib from hive_exec_path"
-    hive_lib_dir="$(dirname $hive_exec_path)"
+    if [[ $hive_exec_path =~ ^\/.*hive.*\/lib\/hive-exec[a-z0-9A-Z\.-]*.jar ]]
+    then
+        hive_lib_dir="$(dirname $hive_exec_path)"
+    else
+        quit "HIVE_LIB not found, please check hive installation or export HIVE_LIB='YOUR_LOCAL_HIVE_LIB'."
+    fi
 else
     hive_lib_dir="$HIVE_LIB"
 fi
 hive_lib=`find -L ${hive_lib_dir} -name '*.jar' ! -name '*druid*' ! -name '*slf4j*' ! -name '*avatica*' ! -name '*calcite*' ! -name '*jackson-datatype-joda*' ! -name '*derby*' -printf '%p:' | sed 's/:$//'`
 
 validateDirectory ${hive_conf_path}
-checkFileExist ${hive_lib}
-checkFileExist ${hcatalog}
+checkFileExist hive_lib ${hive_lib}
+checkFileExist hcatalog ${hcatalog}
 
 hive_dependency=${hive_conf_path}:${hive_lib}:${hcatalog}
 verbose "hive dependency is $hive_dependency"
diff --git a/build/bin/kylin.sh b/build/bin/kylin.sh
index b65cb958a5..13eafe6492 100755
--- a/build/bin/kylin.sh
+++ b/build/bin/kylin.sh
@@ -27,10 +27,11 @@ if [ "$verbose" = true ]; then
     shift
 fi
 
-source ${dir}/check-env.sh
 mkdir -p ${KYLIN_HOME}/logs
 mkdir -p ${KYLIN_HOME}/ext
 
+source ${dir}/set-java-home.sh
+
 function retrieveDependency() {
     #retrive $hive_dependency and $hbase_dependency
     source ${dir}/find-hive-dependency.sh
@@ -66,7 +67,9 @@ then
           quit "Kylin is running, stop it first"
         fi
     fi
-    
+
+    source ${dir}/check-env.sh
+
     tomcat_root=${dir}/../tomcat
     export tomcat_root
 
diff --git a/build/bin/set-java-home.sh b/build/bin/set-java-home.sh
new file mode 100644
index 0000000000..6a50525e71
--- /dev/null
+++ b/build/bin/set-java-home.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+if [ -z "${JAVA_HOME}" ]
+then
+    JAVA_HOME=$(readlink -nf $(which java) | xargs dirname | xargs dirname | xargs dirname)
+    if [ ! -e "$JAVA_HOME" ]  # nonexistent home
+    then
+        JAVA_HOME=""
+    fi
+    export JAVA_HOME=$JAVA_HOME
+fi
+
+# Validate kylin JDK version
+JAVA_VERSION=$(hbase -version 2>&1  | awk -F '"' '/version/ {print $2}' | awk -F "." '{print $1$2}')
+if [ "$JAVA_VERSION" -lt 18 ]
+then
+    quit "Kylin requires JDK 1.8+, please install or upgrade your JDK"
+fi
diff --git a/build/script/download-tomcat.sh b/build/script/download-tomcat.sh
index 54617a0a6d..6c79ff910b 100755
--- a/build/script/download-tomcat.sh
+++ b/build/script/download-tomcat.sh
@@ -27,8 +27,8 @@ if [[ `uname -a` =~ "Darwin" ]]; then
     alias md5cmd="md5 -q"
 fi
 
-tomcat_pkg_version="7.0.90"
-tomcat_pkg_md5="cd4890e4e6a212dafd970da37d040877"
+tomcat_pkg_version="7.0.91"
+tomcat_pkg_md5="8bfbb358b51f90374067879f8db1e91c"
 
 if [ ! -f "build/apache-tomcat-${tomcat_pkg_version}.tar.gz" ]
 then
diff --git a/build/script/prepare-libs.sh b/build/script/prepare-libs.sh
index ae5c5e4c07..789a12047a 100644
--- a/build/script/prepare-libs.sh
+++ b/build/script/prepare-libs.sh
@@ -34,9 +34,11 @@ cp assembly/target/kylin-assembly-${version}-job.jar build/lib/kylin-job-${versi
 cp storage-hbase/target/kylin-storage-hbase-${version}-coprocessor.jar build/lib/kylin-coprocessor-${version}.jar
 cp jdbc/target/kylin-jdbc-${version}.jar build/lib/kylin-jdbc-${version}.jar
 cp tool-assembly/target/kylin-tool-assembly-${version}-assembly.jar build/tool/kylin-tool-${version}.jar
+cp datasource-sdk/target/kylin-datasource-sdk-${version}-lib.jar build/lib/kylin-datasource-sdk-${version}.jar
 
 # Copied file becomes 000 for some env (e.g. my Cygwin)
 chmod 644 build/lib/kylin-job-${version}.jar
 chmod 644 build/lib/kylin-coprocessor-${version}.jar
 chmod 644 build/lib/kylin-jdbc-${version}.jar
 chmod 644 build/tool/kylin-tool-${version}.jar
+chmod 644 build/lib/kylin-datasource-sdk-${version}.jar
diff --git a/build/smoke-test/testBuildCube.py b/build/smoke-test/testBuildCube.py
index 693d678d5a..9452cf2c51 100644
--- a/build/smoke-test/testBuildCube.py
+++ b/build/smoke-test/testBuildCube.py
@@ -72,7 +72,7 @@ def testBuild(self):
             job_info = json.loads(job_response.text)
             job_status = job_info['job_status']
             try_time = 1
-            while job_status in ('RUNNING', 'PENDING') and try_time <= 20:
+            while job_status in ('RUNNING', 'PENDING') and try_time <= 30:
                 print 'Wait for job complete, try_time = ' + str(try_time)
                 try:
                     job_response = requests.request("GET", job_url, headers=headers)
diff --git a/cache/pom.xml b/cache/pom.xml
new file mode 100644
index 0000000000..8e31435958
--- /dev/null
+++ b/cache/pom.xml
@@ -0,0 +1,94 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+     http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>kylin-cache</artifactId>
+    <packaging>jar</packaging>
+    <name>Apache Kylin - Cache</name>
+    <description>Apache Kylin - Cache</description>
+
+    <parent>
+        <groupId>org.apache.kylin</groupId>
+        <artifactId>kylin</artifactId>
+        <version>2.6.0-SNAPSHOT</version>
+    </parent>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.kylin</groupId>
+            <artifactId>kylin-core-common</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.kylin</groupId>
+            <artifactId>kylin-core-metrics</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>io.dropwizard.metrics</groupId>
+            <artifactId>metrics-core</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>io.dropwizard.metrics</groupId>
+            <artifactId>metrics-jvm</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-context-support</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-test</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>net.sf.ehcache</groupId>
+            <artifactId>ehcache</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>net.spy</groupId>
+            <artifactId>spymemcached</artifactId>
+        </dependency>
+
+        <!-- Test & Env -->
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.kylin</groupId>
+            <artifactId>kylin-core-common</artifactId>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-core</artifactId>
+            <scope>test</scope>
+            <!--MRUnit relies on older version of mockito, so cannot manage it globally-->
+            <version>${mockito.version}</version>
+        </dependency>
+    </dependencies>
+</project>
\ No newline at end of file
diff --git a/cache/src/main/java/net/spy/memcached/RefinedKetamaNodeLocator.java b/cache/src/main/java/net/spy/memcached/RefinedKetamaNodeLocator.java
new file mode 100644
index 0000000000..43f31bccc4
--- /dev/null
+++ b/cache/src/main/java/net/spy/memcached/RefinedKetamaNodeLocator.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package net.spy.memcached;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import net.spy.memcached.compat.SpyObject;
+import net.spy.memcached.util.DefaultKetamaNodeLocatorConfiguration;
+import net.spy.memcached.util.KetamaNodeLocatorConfiguration;
+
+/**
+ * Copyright (C) 2006-2009 Dustin Sallings
+ * Copyright (C) 2009-2011 Couchbase, Inc.
+ *
+ * This is a modified version of the Ketama consistent hash strategy from
+ * last.fm. This implementation may not be compatible with libketama as hashing
+ * is considered separate from node location.
+ *
+ * The only modified method is the getSequence(). 
+ * The previous 7 may be too small to reduce the chance to get all down nodes.
+ *
+ * Note that this implementation does not currently supported weighted nodes.
+ *
+ * @see <a href="http://www.last.fm/user/RJ/journal/2007/04/10/392555/">RJ's
+ *      blog post</a>
+ */
+public final class RefinedKetamaNodeLocator extends SpyObject implements NodeLocator {
+
+    private final HashAlgorithm hashAlg;
+    private final Map<InetSocketAddress, Integer> weights;
+    private final boolean isWeightedKetama;
+    private final KetamaNodeLocatorConfiguration config;
+    private volatile TreeMap<Long, MemcachedNode> ketamaNodes;
+    private volatile Collection<MemcachedNode> allNodes;
+
+    /**
+     * Create a new KetamaNodeLocator using specified nodes and the specifed hash
+     * algorithm.
+     *
+     * @param nodes The List of nodes to use in the Ketama consistent hash
+     *          continuum
+     * @param alg The hash algorithm to use when choosing a node in the Ketama
+     *          consistent hash continuum
+     */
+    public RefinedKetamaNodeLocator(List<MemcachedNode> nodes, HashAlgorithm alg) {
+        this(nodes, alg, KetamaNodeKeyFormatter.Format.SPYMEMCACHED, new HashMap<InetSocketAddress, Integer>());
+    }
+
+    /**
+     * Create a new KetamaNodeLocator with specific nodes, hash, node key format,
+     * and weight
+     *
+     * @param nodes The List of nodes to use in the Ketama consistent hash
+     *          continuum
+     * @param alg The hash algorithm to use when choosing a node in the Ketama
+     *          consistent hash continuum
+     * @param nodeKeyFormat the format used to name the nodes in Ketama, either
+     *          SPYMEMCACHED or LIBMEMCACHED
+     * @param weights node weights for ketama, a map from InetSocketAddress to
+     *          weight as Integer
+     */
+    public RefinedKetamaNodeLocator(List<MemcachedNode> nodes, HashAlgorithm alg,
+            KetamaNodeKeyFormatter.Format nodeKeyFormat, Map<InetSocketAddress, Integer> weights) {
+        this(nodes, alg, weights, new DefaultKetamaNodeLocatorConfiguration(new KetamaNodeKeyFormatter(nodeKeyFormat)));
+    }
+
+    /**
+     * Create a new KetamaNodeLocator using specified nodes and the specifed hash
+     * algorithm and configuration.
+     *
+     * @param nodes The List of nodes to use in the Ketama consistent hash
+     *          continuum
+     * @param alg The hash algorithm to use when choosing a node in the Ketama
+     *          consistent hash continuum
+     * @param conf
+     */
+    public RefinedKetamaNodeLocator(List<MemcachedNode> nodes, HashAlgorithm alg, KetamaNodeLocatorConfiguration conf) {
+        this(nodes, alg, new HashMap<InetSocketAddress, Integer>(), conf);
+    }
+
+    /**
+     * Create a new KetamaNodeLocator with specific nodes, hash, node key format,
+     * and weight
+     *
+     * @param nodes The List of nodes to use in the Ketama consistent hash
+     *          continuum
+     * @param alg The hash algorithm to use when choosing a node in the Ketama
+     *          consistent hash continuum
+     * @param nodeWeights node weights for ketama, a map from InetSocketAddress to
+     *          weight as Integer
+     * @param configuration node locator configuration
+     */
+    public RefinedKetamaNodeLocator(List<MemcachedNode> nodes, HashAlgorithm alg,
+            Map<InetSocketAddress, Integer> nodeWeights, KetamaNodeLocatorConfiguration configuration) {
+        super();
+        allNodes = nodes;
+        hashAlg = alg;
+        config = configuration;
+        weights = nodeWeights;
+        isWeightedKetama = !weights.isEmpty();
+        setKetamaNodes(nodes);
+    }
+
+    private RefinedKetamaNodeLocator(TreeMap<Long, MemcachedNode> smn, Collection<MemcachedNode> an, HashAlgorithm alg,
+            Map<InetSocketAddress, Integer> nodeWeights, KetamaNodeLocatorConfiguration conf) {
+        super();
+        ketamaNodes = smn;
+        allNodes = an;
+        hashAlg = alg;
+        config = conf;
+        weights = nodeWeights;
+        isWeightedKetama = !weights.isEmpty();
+    }
+
+    public Collection<MemcachedNode> getAll() {
+        return allNodes;
+    }
+
+    public MemcachedNode getPrimary(final String k) {
+        MemcachedNode rv = getNodeForKey(hashAlg.hash(k));
+        assert rv != null : "Found no node for key " + k;
+        return rv;
+    }
+
+    long getMaxKey() {
+        return getKetamaNodes().lastKey();
+    }
+
+    MemcachedNode getNodeForKey(long hash) {
+        final MemcachedNode rv;
+        if (!ketamaNodes.containsKey(hash)) {
+            // Java 1.6 adds a ceilingKey method, but I'm still stuck in 1.5
+            // in a lot of places, so I'm doing this myself.
+            SortedMap<Long, MemcachedNode> tailMap = getKetamaNodes().tailMap(hash);
+            if (tailMap.isEmpty()) {
+                hash = getKetamaNodes().firstKey();
+            } else {
+                hash = tailMap.firstKey();
+            }
+        }
+        rv = getKetamaNodes().get(hash);
+        return rv;
+    }
+
+    /**
+     * the previous 7 may be too small to reduce the chance to get all down nodes
+     * @param k
+     * @return
+     */
+    public Iterator<MemcachedNode> getSequence(String k) {
+        // Seven searches gives us a 1 in 2^maxTry chance of hitting the
+        // same dead node all of the time.
+        int maxTry = config.getNodeRepetitions() + 1;
+        if (maxTry < 20) {
+            maxTry = 20;
+        }
+        return new KetamaIterator(k, maxTry, getKetamaNodes(), hashAlg);
+    }
+
+    public NodeLocator getReadonlyCopy() {
+        TreeMap<Long, MemcachedNode> smn = new TreeMap<Long, MemcachedNode>(getKetamaNodes());
+        Collection<MemcachedNode> an = new ArrayList<MemcachedNode>(allNodes.size());
+
+        // Rewrite the values a copy of the map.
+        for (Map.Entry<Long, MemcachedNode> me : smn.entrySet()) {
+            smn.put(me.getKey(), new MemcachedNodeROImpl(me.getValue()));
+        }
+
+        // Copy the allNodes collection.
+        for (MemcachedNode n : allNodes) {
+            an.add(new MemcachedNodeROImpl(n));
+        }
+
+        return new RefinedKetamaNodeLocator(smn, an, hashAlg, weights, config);
+    }
+
+    @Override
+    public void updateLocator(List<MemcachedNode> nodes) {
+        allNodes = nodes;
+        setKetamaNodes(nodes);
+    }
+
+    /**
+     * @return the ketamaNodes
+     */
+    protected TreeMap<Long, MemcachedNode> getKetamaNodes() {
+        return ketamaNodes;
+    }
+
+    /**
+     * Setup the KetamaNodeLocator with the list of nodes it should use.
+     *
+     * @param nodes a List of MemcachedNodes for this KetamaNodeLocator to use in
+     *          its continuum
+     */
+    protected void setKetamaNodes(List<MemcachedNode> nodes) {
+        TreeMap<Long, MemcachedNode> newNodeMap = new TreeMap<Long, MemcachedNode>();
+        int numReps = config.getNodeRepetitions();
+        int nodeCount = nodes.size();
+        int totalWeight = 0;
+
+        if (isWeightedKetama) {
+            for (MemcachedNode node : nodes) {
+                totalWeight += weights.get(node.getSocketAddress());
+            }
+        }
+
+        for (MemcachedNode node : nodes) {
+            if (isWeightedKetama) {
+
+                int thisWeight = weights.get(node.getSocketAddress());
+                float percent = (totalWeight == 0 ? 0f : (float) thisWeight / (float) totalWeight);
+                int pointerPerServer = (int) ((Math.floor(
+                        (float) (percent * (float) config.getNodeRepetitions() / 4 * (float) nodeCount + 0.0000000001)))
+                        * 4);
+                for (int i = 0; i < pointerPerServer / 4; i++) {
+                    for (long position : ketamaNodePositionsAtIteration(node, i)) {
+                        newNodeMap.put(position, node);
+                        getLogger().debug("Adding node %s with weight %s in position %d", node, thisWeight, position);
+                    }
+                }
+            } else {
+                // Ketama does some special work with md5 where it reuses chunks.
+                // Check to be backwards compatible, the hash algorithm does not
+                // matter for Ketama, just the placement should always be done using
+                // MD5
+                if (hashAlg == DefaultHashAlgorithm.KETAMA_HASH) {
+                    for (int i = 0; i < numReps / 4; i++) {
+                        for (long position : ketamaNodePositionsAtIteration(node, i)) {
+                            newNodeMap.put(position, node);
+                            getLogger().debug("Adding node %s in position %d", node, position);
+                        }
+                    }
+                } else {
+                    for (int i = 0; i < numReps; i++) {
+                        newNodeMap.put(hashAlg.hash(config.getKeyForNode(node, i)), node);
+                    }
+                }
+            }
+        }
+        assert newNodeMap.size() == numReps * nodes.size();
+        ketamaNodes = newNodeMap;
+    }
+
+    private List<Long> ketamaNodePositionsAtIteration(MemcachedNode node, int iteration) {
+        List<Long> positions = new ArrayList<Long>();
+        byte[] digest = DefaultHashAlgorithm.computeMd5(config.getKeyForNode(node, iteration));
+        for (int h = 0; h < 4; h++) {
+            Long k = ((long) (digest[3 + h * 4] & 0xFF) << 24) | ((long) (digest[2 + h * 4] & 0xFF) << 16);
+            k |= ((long) (digest[1 + h * 4] & 0xFF) << 8) | (digest[h * 4] & 0xFF);
+            positions.add(k);
+        }
+        return positions;
+    }
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/cachemanager/CacheConstants.java b/cache/src/main/java/org/apache/kylin/cache/cachemanager/CacheConstants.java
new file mode 100644
index 0000000000..07b15a5b7a
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/cachemanager/CacheConstants.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.cachemanager;
+
+public class CacheConstants {
+    public static final String QUERY_CACHE = "StorageCache";
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/cachemanager/InstrumentedEhCacheCacheManager.java b/cache/src/main/java/org/apache/kylin/cache/cachemanager/InstrumentedEhCacheCacheManager.java
new file mode 100644
index 0000000000..4f0911f92a
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/cachemanager/InstrumentedEhCacheCacheManager.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.cachemanager;
+
+import java.util.Collection;
+import java.util.Map;
+
+import org.apache.kylin.cache.ehcache.InstrumentedEhCacheCache;
+import org.apache.kylin.common.KylinConfig;
+import org.springframework.cache.Cache;
+import org.springframework.cache.ehcache.EhCacheCache;
+import org.springframework.cache.support.AbstractCacheManager;
+import org.springframework.util.Assert;
+
+import com.google.common.collect.Sets;
+
+import net.sf.ehcache.Ehcache;
+import net.sf.ehcache.Status;
+
+/**
+ * CacheManager backed by an EhCache {@link net.sf.ehcache.CacheManager}.
+ *
+ */
+public class InstrumentedEhCacheCacheManager extends AbstractCacheManager {
+
+    private net.sf.ehcache.CacheManager cacheManager;
+    private Map<String, String> metricsConfig = KylinConfig.getInstanceFromEnv().getKylinMetricsConf();
+    private boolean enableMetrics = false;
+
+    /**
+     * Return the backing EhCache {@link net.sf.ehcache.CacheManager}.
+     */
+    public net.sf.ehcache.CacheManager getCacheManager() {
+        return this.cacheManager;
+    }
+
+    /**
+     * Set the backing EhCache {@link net.sf.ehcache.CacheManager}.
+     */
+    public void setCacheManager(net.sf.ehcache.CacheManager cacheManager) {
+        this.cacheManager = cacheManager;
+        if ("true".equalsIgnoreCase(metricsConfig.get("ehcache.enabled"))) {
+            enableMetrics = true;
+        }
+    }
+
+    @Override
+    protected Collection<Cache> loadCaches() {
+        Assert.notNull(this.cacheManager, "A backing EhCache CacheManager is required");
+        Status status = this.cacheManager.getStatus();
+        Assert.isTrue(Status.STATUS_ALIVE.equals(status),
+                "An 'alive' EhCache CacheManager is required - current cache is " + status.toString());
+
+        String[] names = this.cacheManager.getCacheNames();
+        Collection<Cache> caches = Sets.newLinkedHashSetWithExpectedSize(names.length);
+        for (String name : names) {
+            if (enableMetrics) {
+                caches.add(new InstrumentedEhCacheCache(this.cacheManager.getEhcache(name)));
+            } else {
+                caches.add(new EhCacheCache(this.cacheManager.getEhcache(name)));
+            }
+        }
+        return caches;
+    }
+
+    @Override
+    public Cache getCache(String name) {
+        Cache cache = super.getCache(name);
+        if (cache == null) {
+            // check the EhCache cache again
+            // (in case the cache was added at runtime)
+            Ehcache ehcache = this.cacheManager.getEhcache(name);
+            if (ehcache != null) {
+                if (enableMetrics) {
+                    cache = new InstrumentedEhCacheCache(ehcache);
+                } else {
+                    cache = new EhCacheCache(ehcache);
+                }
+                addCache(cache);
+            }
+        }
+        return cache;
+    }
+
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/cachemanager/MemcachedCacheManager.java b/cache/src/main/java/org/apache/kylin/cache/cachemanager/MemcachedCacheManager.java
new file mode 100644
index 0000000000..a4e1ffe189
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/cachemanager/MemcachedCacheManager.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.cachemanager;
+
+import java.net.SocketAddress;
+import java.util.Collection;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.lang3.SerializationUtils;
+import org.apache.kylin.cache.memcached.MemcachedCache;
+import org.apache.kylin.cache.memcached.MemcachedCacheConfig;
+import org.apache.kylin.cache.memcached.MemcachedChunkingCache;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.cache.Cache;
+import org.springframework.cache.support.AbstractCacheManager;
+import org.springframework.cache.support.SimpleValueWrapper;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+import net.spy.memcached.MemcachedClientIF;
+
+public class MemcachedCacheManager extends AbstractCacheManager {
+
+    private static final Logger logger = LoggerFactory.getLogger(MemcachedCacheManager.class);
+    private static final Long ONE_MINUTE = 60 * 1000L;
+
+    @Autowired
+    private MemcachedCacheConfig memcachedCacheConfig;
+
+    private ScheduledExecutorService timer = Executors.newScheduledThreadPool(1,
+            new ThreadFactoryBuilder().setNameFormat("Memcached-HealthChecker").build());
+    private AtomicBoolean clusterHealth = new AtomicBoolean(true);
+
+    @Override
+    protected Collection<? extends Cache> loadCaches() {
+        Cache successCache = new MemCachedCacheAdaptor(
+                new MemcachedChunkingCache(MemcachedCache.create(memcachedCacheConfig, CacheConstants.QUERY_CACHE)));
+
+        addCache(successCache);
+
+        Collection<String> names = getCacheNames();
+        Collection<Cache> caches = Lists.newArrayList();
+        for (String name : names) {
+            caches.add(getCache(name));
+        }
+
+        timer.scheduleWithFixedDelay(new MemcachedClusterHealthChecker(), ONE_MINUTE, ONE_MINUTE,
+                TimeUnit.MILLISECONDS);
+        return caches;
+    }
+
+    public boolean isClusterDown() {
+        return !clusterHealth.get();
+    }
+
+    @VisibleForTesting
+    void setClusterHealth(boolean ifHealth) {
+        clusterHealth.set(ifHealth);
+    }
+
+    public static class MemCachedCacheAdaptor implements Cache {
+        private MemcachedCache memcachedCache;
+
+        public MemCachedCacheAdaptor(MemcachedCache memcachedCache) {
+            this.memcachedCache = memcachedCache;
+        }
+
+        @Override
+        public String getName() {
+            return memcachedCache.getName();
+        }
+
+        @Override
+        public Object getNativeCache() {
+            return memcachedCache.getNativeCache();
+        }
+
+        @Override
+        public ValueWrapper get(Object key) {
+            byte[] value = memcachedCache.get(key);
+            if (value == null) {
+                return null;
+            }
+            return new SimpleValueWrapper(SerializationUtils.deserialize(value));
+        }
+
+        @Override
+        public void put(Object key, Object value) {
+            memcachedCache.put(key, value);
+        }
+
+        @Override
+        public void evict(Object key) {
+            memcachedCache.evict(key);
+        }
+
+        @Override
+        public void clear() {
+            memcachedCache.clear();
+        }
+
+        @Override
+        @SuppressWarnings("unchecked")
+        public <T> T get(Object key, Class<T> type) {
+            byte[] value = memcachedCache.get(key);
+            if (value == null) {
+                return null;
+            }
+            Object obj = SerializationUtils.deserialize(value);
+            if (obj != null && type != null && !type.isInstance(value)) {
+                throw new IllegalStateException(
+                        "Cached value is not of required type [" + type.getName() + "]: " + value);
+            }
+            return (T) obj;
+        }
+
+        @Override
+        //TODO
+        public <T> T get(Object key, Callable<T> valueLoader) {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        //TODO implementation here doesn't guarantee the atomicity.
+        //Without atomicity, this method should not be invoked
+        public ValueWrapper putIfAbsent(Object key, Object value) {
+            byte[] existing = memcachedCache.get(key);
+            if (existing == null) {
+                memcachedCache.put(key, value);
+                return null;
+            } else {
+                return new SimpleValueWrapper(SerializationUtils.deserialize(existing));
+            }
+        }
+
+    }
+
+    private class MemcachedClusterHealthChecker implements Runnable {
+        @Override
+        public void run() {
+            Cache cache = getCache(CacheConstants.QUERY_CACHE);
+            MemcachedClientIF cacheClient = (MemcachedClientIF) cache.getNativeCache();
+            Collection<SocketAddress> liveServers = cacheClient.getAvailableServers();
+            Collection<SocketAddress> deadServers = cacheClient.getUnavailableServers();
+            if (liveServers.size() == 0) {
+                clusterHealth.set(false);
+                logger.error("All the servers in MemcachedCluster is down, UnavailableServers: " + deadServers);
+            } else {
+                clusterHealth.set(true);
+                if (deadServers.size() > liveServers.size()) {
+                    logger.warn("Half of the servers in MemcachedCluster is down, LiveServers: " + liveServers
+                            + ", UnavailableServers: " + deadServers);
+                }
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/cachemanager/RemoteLocalFailOverCacheManager.java b/cache/src/main/java/org/apache/kylin/cache/cachemanager/RemoteLocalFailOverCacheManager.java
new file mode 100644
index 0000000000..f9b7ef6ec3
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/cachemanager/RemoteLocalFailOverCacheManager.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.cachemanager;
+
+import java.util.Collection;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.cache.Cache;
+import org.springframework.cache.CacheManager;
+import org.springframework.cache.support.AbstractCacheManager;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+public class RemoteLocalFailOverCacheManager extends AbstractCacheManager {
+    private static final Logger logger = LoggerFactory.getLogger(RemoteLocalFailOverCacheManager.class);
+
+    @Autowired
+    private MemcachedCacheManager remoteCacheManager;
+
+    @Autowired
+    private CacheManager localCacheManager;
+
+    @Override
+    public void afterPropertiesSet() {
+        Preconditions.checkNotNull(localCacheManager, "localCacheManager is not injected yet");
+    }
+
+    @Override
+    protected Collection<? extends Cache> loadCaches() {
+        return null;
+    }
+
+    @Override
+    public Cache getCache(String name) {
+        if (remoteCacheManager == null || remoteCacheManager.isClusterDown()) {
+            logger.info("use local cache, because remote cache is not configured or down");
+            return localCacheManager.getCache(name);
+        } else {
+            return remoteCacheManager.getCache(name);
+        }
+    }
+
+    @VisibleForTesting
+    void disableRemoteCacheManager() {
+        remoteCacheManager.setClusterHealth(false);
+    }
+
+    @VisibleForTesting
+    void enableRemoteCacheManager() {
+        remoteCacheManager.setClusterHealth(true);
+    }
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/ehcache/InstrumentedEhCacheCache.java b/cache/src/main/java/org/apache/kylin/cache/ehcache/InstrumentedEhCacheCache.java
new file mode 100644
index 0000000000..7a9b58544b
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/ehcache/InstrumentedEhCacheCache.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.ehcache;
+
+import static org.apache.kylin.metrics.lib.impl.MetricsSystem.Metrics;
+import static org.apache.kylin.metrics.lib.impl.MetricsSystem.name;
+
+import java.util.concurrent.Callable;
+
+import org.springframework.cache.Cache;
+import org.springframework.cache.ehcache.EhCacheCache;
+import org.springframework.cache.support.SimpleValueWrapper;
+import org.springframework.util.Assert;
+
+import com.codahale.metrics.Gauge;
+
+import net.sf.ehcache.Ehcache;
+import net.sf.ehcache.Element;
+import net.sf.ehcache.Status;
+
+/**
+ * {@link Cache} implementation on top of an {@link Ehcache} instance.
+ *
+ */
+public class InstrumentedEhCacheCache implements Cache {
+
+    private final Ehcache cache;
+
+    /**
+     * Create an {@link EhCacheCache} instance.
+     * @param ehcache backing Ehcache instance
+     */
+    public InstrumentedEhCacheCache(Ehcache ehcache) {
+        Assert.notNull(ehcache, "Ehcache must not be null");
+        Status status = ehcache.getStatus();
+        Assert.isTrue(Status.STATUS_ALIVE.equals(status),
+                "An 'alive' Ehcache is required - current cache is " + status.toString());
+        this.cache = ehcache;
+
+        final String prefix = name(cache.getClass(), cache.getName());
+        Metrics.register(name(prefix, "hits"), new Gauge<Long>() {
+            @Override
+            public Long getValue() {
+                return cache.getStatistics().cacheHitCount();
+            }
+        });
+
+        Metrics.register(name(prefix, "in-memory-hits"), new Gauge<Long>() {
+            @Override
+            public Long getValue() {
+                return cache.getStatistics().localHeapHitCount();
+            }
+        });
+
+        Metrics.register(name(prefix, "misses"), new Gauge<Long>() {
+            @Override
+            public Long getValue() {
+                return cache.getStatistics().cacheMissCount();
+            }
+        });
+
+        Metrics.register(name(prefix, "in-memory-misses"), new Gauge<Long>() {
+            @Override
+            public Long getValue() {
+                return cache.getStatistics().localHeapMissCount();
+            }
+        });
+
+        Metrics.register(name(prefix, "objects"), new Gauge<Long>() {
+            @Override
+            public Long getValue() {
+                return cache.getStatistics().getSize();
+            }
+        });
+
+        Metrics.register(name(prefix, "in-memory-objects"), new Gauge<Long>() {
+            @Override
+            public Long getValue() {
+                return cache.getStatistics().getLocalHeapSize();
+            }
+        });
+
+        Metrics.register(name(prefix, "mean-get-time"), new Gauge<Double>() {
+            @Override
+            public Double getValue() {
+                return cache.getStatistics().cacheGetOperation().latency().average().value();
+            }
+        });
+
+        Metrics.register(name(prefix, "mean-search-time"), new Gauge<Double>() {
+            @Override
+            public Double getValue() {
+                return cache.getStatistics().cacheSearchOperation().latency().average().value();
+            }
+        });
+
+        Metrics.register(name(prefix, "eviction-count"), new Gauge<Long>() {
+            @Override
+            public Long getValue() {
+                return cache.getStatistics().cacheEvictionOperation().count().value();
+            }
+        });
+
+        Metrics.register(name(prefix, "writer-queue-size"), new Gauge<Long>() {
+            @Override
+            public Long getValue() {
+                return cache.getStatistics().getWriterQueueLength();
+            }
+        });
+    }
+
+    public String getName() {
+        return this.cache.getName();
+    }
+
+    public Ehcache getNativeCache() {
+        return this.cache;
+    }
+
+    public ValueWrapper get(Object key) {
+        Element element = this.cache.get(key);
+        return (element != null ? new SimpleValueWrapper(element.getObjectValue()) : null);
+    }
+
+    public void put(Object key, Object value) {
+        this.cache.put(new Element(key, value));
+    }
+
+    public void evict(Object key) {
+        this.cache.remove(key);
+    }
+
+    public void clear() {
+        this.cache.removeAll();
+    }
+
+    @Override
+    @SuppressWarnings("unchecked")
+    public <T> T get(Object key, Class<T> type) {
+        Element element = lookup(key);
+        Object value = (element != null ? element.getObjectValue() : null);
+        if (value != null && type != null && !type.isInstance(value)) {
+            throw new IllegalStateException("Cached value is not of required type [" + type.getName() + "]: " + value);
+        }
+        return (T) value;
+    }
+
+    @Override
+    @SuppressWarnings("unchecked")
+    public <T> T get(Object key, Callable<T> valueLoader) {
+        Element element = lookup(key);
+        if (element != null) {
+            return (T) element.getObjectValue();
+        } else {
+            this.cache.acquireWriteLockOnKey(key);
+            try {
+                element = lookup(key); // One more attempt with the write lock
+                if (element != null) {
+                    return (T) element.getObjectValue();
+                } else {
+                    return loadValue(key, valueLoader);
+                }
+            } finally {
+                this.cache.releaseWriteLockOnKey(key);
+            }
+        }
+    }
+
+    @Override
+    public ValueWrapper putIfAbsent(Object key, Object value) {
+        Element existingElement = this.cache.putIfAbsent(new Element(key, value));
+        return (existingElement != null ? new SimpleValueWrapper(existingElement.getObjectValue()) : null);
+    }
+
+    private Element lookup(Object key) {
+        return this.cache.get(key);
+    }
+
+    private <T> T loadValue(Object key, Callable<T> valueLoader) {
+        T value;
+        try {
+            value = valueLoader.call();
+        } catch (Throwable ex) {
+            throw new ValueRetrievalException(key, valueLoader, ex);
+        }
+        put(key, value);
+        return value;
+    }
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/memcached/CacheStats.java b/cache/src/main/java/org/apache/kylin/cache/memcached/CacheStats.java
new file mode 100644
index 0000000000..c91ba45e11
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/memcached/CacheStats.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.memcached;
+
+public class CacheStats {
+    private final long numHits;
+    private final long numMisses;
+    private final long getBytes;
+    private final long getTime;
+    private final long numPut;
+    private final long putBytes;
+    private final long numEvictions;
+    private final long numTimeouts;
+    private final long numErrors;
+
+    public CacheStats(long getBytes, long getTime, long numPut, long putBytes, long numHits, long numMisses,
+            long numEvictions, long numTimeouts, long numErrors) {
+        this.getBytes = getBytes;
+        this.getTime = getTime;
+        this.numPut = numPut;
+        this.putBytes = putBytes;
+        this.numHits = numHits;
+        this.numMisses = numMisses;
+        this.numEvictions = numEvictions;
+        this.numTimeouts = numTimeouts;
+        this.numErrors = numErrors;
+    }
+
+    public long getNumHits() {
+        return numHits;
+    }
+
+    public long getNumMisses() {
+        return numMisses;
+    }
+
+    public long getNumGet() {
+        return numHits + numMisses;
+    }
+
+    public long getNumGetBytes() {
+        return getBytes;
+    }
+
+    public long getNumPutBytes() {
+        return putBytes;
+    }
+
+    public long getNumPut() {
+        return numPut;
+    }
+
+    public long getNumEvictions() {
+        return numEvictions;
+    }
+
+    public long getNumTimeouts() {
+        return numTimeouts;
+    }
+
+    public long getNumErrors() {
+        return numErrors;
+    }
+
+    public long numLookups() {
+        return numHits + numMisses;
+    }
+
+    public double hitRate() {
+        long lookups = numLookups();
+        return lookups == 0 ? 0 : numHits / (double) lookups;
+    }
+
+    public long avgGetBytes() {
+        return getBytes == 0 ? 0 : getBytes / numLookups();
+    }
+
+    public long getAvgGetTime() {
+        return getTime / numLookups();
+    }
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/memcached/KeyHookLookup.java b/cache/src/main/java/org/apache/kylin/cache/memcached/KeyHookLookup.java
new file mode 100644
index 0000000000..b9bdf5c54a
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/memcached/KeyHookLookup.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.memcached;
+
+import java.io.Serializable;
+import java.util.Arrays;
+
+/**
+ * A Class implement this interface indicates that the key information need to be calculated from a first lookup from cache itself to get
+ * a hook.
+ *
+ */
+public interface KeyHookLookup {
+    KeyHook lookupKeyHook(String key);
+
+    public static class KeyHook implements Serializable {
+        private static final long serialVersionUID = 2400159460862757991L;
+
+        private String[] chunkskey;
+        private byte[] values;
+
+        /**
+         * For de-serialization
+         */
+        public KeyHook() {
+        }
+
+        public KeyHook(String[] chunkskey, byte[] values) {
+            super();
+            this.chunkskey = chunkskey;
+            this.values = values;
+        }
+
+        public String[] getChunkskey() {
+            return chunkskey;
+        }
+
+        public void setChunkskey(String[] chunkskey) {
+            this.chunkskey = chunkskey;
+        }
+
+        public byte[] getValues() {
+            return values;
+        }
+
+        public void setValues(byte[] values) {
+            this.values = values;
+        }
+
+        @Override
+        public int hashCode() {
+            final int prime = 31;
+            int result = 1;
+            result = prime * result + Arrays.hashCode(chunkskey);
+            result = prime * result + Arrays.hashCode(values);
+            return result;
+        }
+
+        @Override
+        public boolean equals(Object obj) {
+            if (this == obj)
+                return true;
+            if (obj == null)
+                return false;
+            if (getClass() != obj.getClass())
+                return false;
+            KeyHook other = (KeyHook) obj;
+            if (!Arrays.equals(chunkskey, other.chunkskey))
+                return false;
+            if (!Arrays.equals(values, other.values))
+                return false;
+            return true;
+        }
+
+        @Override
+        public String toString() {
+            StringBuilder builder = new StringBuilder();
+            if (chunkskey != null) {
+                builder.append("chunkskey_length:" + chunkskey.length);
+            } else {
+                builder.append("chunkskey_is_null");
+            }
+            builder.append("|");
+            if (values != null) {
+                builder.append("value_length:" + values.length);
+            } else {
+                builder.append("value_is_null");
+            }
+            return builder.toString();
+        }
+
+        //        @Override
+        //        public void writeExternal(ObjectOutput out) throws IOException {
+        //            if(chunkskey == null){
+        //                out.writeInt(0);
+        //            }else{
+        //                out.writeInt(chunkskey.length);
+        //                for (String chunkKey : chunkskey) {
+        //                    out.writeUTF(chunkKey);
+        //                }
+        //            }
+        //            if(values != null){
+        //                out.write(values);
+        //            }
+        //        }
+        //        
+        //        @Override
+        //        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        //            int keySize = in.readInt();
+        //            if(keySize > 0){
+        //                chunkskey = new String[keySize];
+        //                for (int i = 0; i < keySize; i++){
+        //                    chunkskey[i] = in.readUTF();
+        //                }
+        //            }
+        //            int available = in.available();
+        //            if(available > 0){
+        //                values = new byte[available];
+        //                in.read(values);
+        //            }
+        //        }
+    }
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedCache.java b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedCache.java
new file mode 100644
index 0000000000..43bdc8d108
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedCache.java
@@ -0,0 +1,374 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.memcached;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.zip.DataFormatException;
+
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.lang3.SerializationUtils;
+import org.apache.kylin.common.KylinConfig;
+import org.apache.kylin.common.util.CompressionUtils;
+import org.apache.kylin.common.util.JsonUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Charsets;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.base.Throwables;
+import com.google.common.primitives.Ints;
+import com.google.common.primitives.Shorts;
+
+import net.spy.memcached.AddrUtil;
+import net.spy.memcached.ConnectionFactory;
+import net.spy.memcached.ConnectionFactoryBuilder;
+import net.spy.memcached.DefaultHashAlgorithm;
+import net.spy.memcached.FailureMode;
+import net.spy.memcached.MemcachedClient;
+import net.spy.memcached.MemcachedClientIF;
+import net.spy.memcached.ops.ArrayOperationQueueFactory;
+import net.spy.memcached.ops.LinkedOperationQueueFactory;
+import net.spy.memcached.ops.OperationQueueFactory;
+import net.spy.memcached.transcoders.SerializingTranscoder;
+
+/**
+ * Cache backend by Memcached. The implementation leverages spymemcached client to talk to the servers.
+ * Memcached itself has a limitation to the size of the key. So the real key for cache lookup is hashed from the orginal key.
+ * The implementation provdes a way for hash collsion detection. It can also compress/decompress the value bytes based on the preconfigred compression threshold to save network bandwidth and storage space.
+ *
+ * @author mingmwang
+ *
+ */
+public class MemcachedCache {
+    public static final int MAX_PREFIX_LENGTH = MemcachedClientIF.MAX_KEY_LENGTH - 40 // length of namespace hash
+            - 40 // length of key hash
+            - 2; // length of separators
+    private static final Logger logger = LoggerFactory.getLogger(MemcachedCache.class);
+    private static final int DEFAULT_TTL = 7 * 24 * 3600;
+
+    private static final String UNABLE_TO_QUEUE_CACHE_OPERATION = "Unable to queue cache operation.";
+
+    protected final MemcachedCacheConfig config;
+    protected final MemcachedClientIF client;
+    protected final String memcachedPrefix;
+    protected final int compressThreshold;
+    protected final AtomicLong hitCount = new AtomicLong(0);
+    protected final AtomicLong missCount = new AtomicLong(0);
+    protected final AtomicLong readBytes = new AtomicLong(0);
+    protected final AtomicLong timeoutCount = new AtomicLong(0);
+    protected final AtomicLong errorCount = new AtomicLong(0);
+    protected final AtomicLong putCount = new AtomicLong(0);
+    protected final AtomicLong putBytes = new AtomicLong(0);
+    private final int timeToLiveSeconds;
+    protected AtomicLong cacheGetTime = new AtomicLong(0);
+
+    public MemcachedCache(final MemcachedClientIF client, final MemcachedCacheConfig config,
+            final String memcachedPrefix, int timeToLiveSeconds) {
+        Preconditions.checkArgument(memcachedPrefix.length() <= MAX_PREFIX_LENGTH,
+                "memcachedPrefix length [%d] exceeds maximum length [%d]", memcachedPrefix.length(), MAX_PREFIX_LENGTH);
+        this.memcachedPrefix = memcachedPrefix;
+        this.client = client;
+        this.config = config;
+        this.compressThreshold = config.getMaxObjectSize() / 2;
+        this.timeToLiveSeconds = timeToLiveSeconds;
+    }
+
+    public MemcachedCache(MemcachedCache cache) {
+        this(cache.client, cache.config, cache.memcachedPrefix, cache.timeToLiveSeconds);
+    }
+
+    /**
+     * Create and return the MemcachedCache. Each time call this method will create a new instance.
+     * @param config            The MemcachedCache configuration to control the cache behavior.
+     * @return
+     */
+    public static MemcachedCache create(final MemcachedCacheConfig config, String memcachedPrefix) {
+        return create(config, memcachedPrefix, DEFAULT_TTL);
+    }
+
+    public static MemcachedCache create(final MemcachedCacheConfig config, String memcachedPrefix, int timeToLive) {
+        try {
+            SerializingTranscoder transcoder = new SerializingTranscoder(config.getMaxObjectSize());
+            // always no compression inside, we compress/decompress outside
+            transcoder.setCompressionThreshold(Integer.MAX_VALUE);
+
+            OperationQueueFactory opQueueFactory;
+            int maxQueueSize = config.getMaxOperationQueueSize();
+            if (maxQueueSize > 0) {
+                opQueueFactory = new ArrayOperationQueueFactory(maxQueueSize);
+            } else {
+                opQueueFactory = new LinkedOperationQueueFactory();
+            }
+            String hostsStr = config.getHosts();
+            ConnectionFactory connectionFactory = new MemcachedConnectionFactoryBuilder()
+                    .setProtocol(ConnectionFactoryBuilder.Protocol.BINARY)
+                    .setHashAlg(DefaultHashAlgorithm.FNV1A_64_HASH)
+                    .setLocatorType(ConnectionFactoryBuilder.Locator.CONSISTENT).setDaemon(true)
+                    .setFailureMode(FailureMode.Redistribute).setTranscoder(transcoder).setShouldOptimize(true)
+                    .setOpQueueMaxBlockTime(config.getTimeout()).setOpTimeout(config.getTimeout())
+                    .setReadBufferSize(config.getReadBufferSize()).setOpQueueFactory(opQueueFactory).build();
+            return new MemcachedCache(new MemcachedClient(new MemcachedConnectionFactory(connectionFactory),
+                    AddrUtil.getAddresses(hostsStr)), config, memcachedPrefix, timeToLive);
+        } catch (IOException e) {
+            logger.error("Unable to create MemcachedCache instance.", e);
+            throw Throwables.propagate(e);
+        }
+    }
+
+    public String getName() {
+        return memcachedPrefix;
+    }
+
+    public Object getNativeCache() {
+        return client;
+    }
+
+    protected String serializeKey(Object key) {
+        try {
+            return JsonUtil.writeValueAsString(key);
+        } catch (JsonProcessingException e) {
+            logger.warn("Can not convert key to String.", e);
+        }
+        return null;
+    }
+
+    protected byte[] serializeValue(Object value) {
+        return SerializationUtils.serialize((Serializable) value);
+    }
+
+    @VisibleForTesting
+    byte[] encodeValue(String keyS, Object value) {
+        if (keyS == null) {
+            return null;
+        }
+        return encodeValue(keyS.getBytes(Charsets.UTF_8), serializeValue(value));
+    }
+
+    /**
+     * This method is used to get value object based on key from the Cache. It converts key to json string first.
+     * And then it calls getBinary() method to compute hashed key from the original key string, and use this as the real key to do lookup from internal Cache.
+     * Then decodes the real values bytes from the cache lookup result, and leverages object serializer to convert value bytes to object.
+     */
+    public byte[] get(Object key) {
+        return get(serializeKey(key));
+    }
+
+    /**
+     * @param keyS should be the serialized string
+     */
+    public byte[] get(String keyS) {
+        return getBinary(keyS);
+    }
+
+    /**
+     * This method is used to put key/value objects to the Cache. It converts key to json string and leverages object serializer to convert value object to bytes.
+     * And then it calls putBinary() method to compute hashed key from the original key string and encode the original key bytes into value bytes for hash conflicts detection.
+     */
+    public void put(Object key, Object value) {
+        put(serializeKey(key), value);
+    }
+
+    /**
+     * @param keyS should be the serialized string
+     */
+    public void put(String keyS, Object value) {
+        if (keyS != null) {
+            putBinary(keyS, serializeValue(value), timeToLiveSeconds);
+        }
+    }
+
+    public void evict(Object key) {
+        if (key == null)
+            return;
+        evict(serializeKey(key));
+    }
+
+    /**
+     * @param keyS should be the serialized string
+     */
+    public void evict(String keyS) {
+        if (keyS == null)
+            return;
+        client.delete(computeKeyHash(keyS));
+    }
+
+    public void clear() {
+        logger.warn("Clear Remote Cache!");
+        Future<Boolean> resultFuture = client.flush();
+        try {
+            boolean result = resultFuture.get();
+            logger.warn("Clear Remote Cache returned with result: " + result);
+        } catch (Exception e) {
+            logger.warn("Can't clear Remote Cache.", e);
+        }
+    }
+
+    public CacheStats getStats() {
+        return new CacheStats(readBytes.get(), cacheGetTime.get(), putCount.get(), putBytes.get(), hitCount.get(),
+                missCount.get(), 0, timeoutCount.get(), errorCount.get());
+    }
+
+    /**
+     * @param keyS should be the serialized string
+     * @return the serialized value
+     */
+    protected byte[] getBinary(String keyS) {
+        if (Strings.isNullOrEmpty(keyS)) {
+            return null;
+        }
+        byte[] bytes = internalGet(computeKeyHash(keyS));
+        return decodeValue(keyS.getBytes(Charsets.UTF_8), bytes);
+    }
+
+    /**
+     * @param keyS should be the serialized string
+     * @param valueB should be the serialized value
+     */
+    protected void putBinary(String keyS, byte[] valueB, int expiration) {
+        if (Strings.isNullOrEmpty(keyS)) {
+            return;
+        }
+        internalPut(computeKeyHash(keyS), encodeValue(keyS.getBytes(Charsets.UTF_8), valueB), expiration);
+    }
+
+    protected byte[] internalGet(String hashedKey) {
+        Future<Object> future;
+        long start = System.currentTimeMillis();
+        try {
+            future = client.asyncGet(hashedKey);
+        } catch (IllegalStateException e) {
+            // operation did not get queued in time (queue is full)
+            errorCount.incrementAndGet();
+            logger.error(UNABLE_TO_QUEUE_CACHE_OPERATION, e);
+            return null;
+        } catch (Throwable t) {
+            errorCount.incrementAndGet();
+            logger.error(UNABLE_TO_QUEUE_CACHE_OPERATION, t);
+            return null;
+        }
+
+        try {
+            byte[] result = (byte[]) future.get(config.getTimeout(), TimeUnit.MILLISECONDS);
+            cacheGetTime.addAndGet(System.currentTimeMillis() - start);
+            if (result != null) {
+                hitCount.incrementAndGet();
+                readBytes.addAndGet(result.length);
+            } else {
+                missCount.incrementAndGet();
+            }
+            return result;
+        } catch (TimeoutException e) {
+            timeoutCount.incrementAndGet();
+            future.cancel(false);
+            return null;
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            throw Throwables.propagate(e);
+        } catch (ExecutionException e) {
+            errorCount.incrementAndGet();
+            logger.error("ExecutionException when pulling key meta from cache.", e);
+            return null;
+        }
+    }
+
+    private void internalPut(String hashedKey, byte[] encodedValue, int expiration) {
+        try {
+            client.set(hashedKey, expiration, encodedValue);
+            putCount.incrementAndGet();
+            putBytes.addAndGet(encodedValue.length);
+        } catch (IllegalStateException e) {
+            // operation did not get queued in time (queue is full)
+            errorCount.incrementAndGet();
+            logger.error(UNABLE_TO_QUEUE_CACHE_OPERATION, e);
+        } catch (Throwable t) {
+            errorCount.incrementAndGet();
+            logger.error(UNABLE_TO_QUEUE_CACHE_OPERATION, t);
+        }
+    }
+
+    protected byte[] encodeValue(byte[] key, byte[] valueB) {
+        byte[] compressed = null;
+        if (config.isEnableCompression() && (valueB.length + Ints.BYTES + key.length > compressThreshold)) {
+            try {
+                compressed = CompressionUtils.compress(ByteBuffer.allocate(Ints.BYTES + key.length + valueB.length)
+                        .putInt(key.length).put(key).put(valueB).array());
+            } catch (IOException e) {
+                compressed = null;
+                logger.warn("Compressing value bytes error.", e);
+            }
+        }
+        if (compressed != null) {
+            return ByteBuffer.allocate(Shorts.BYTES + compressed.length).putShort((short) 1).put(compressed).array();
+        } else {
+            return ByteBuffer.allocate(Shorts.BYTES + Ints.BYTES + key.length + valueB.length).putShort((short) 0)
+                    .putInt(key.length).put(key).put(valueB).array();
+        }
+    }
+
+    protected byte[] decodeValue(byte[] key, byte[] valueE) {
+        if (valueE == null)
+            return null;
+        ByteBuffer buf = ByteBuffer.wrap(valueE);
+        short enableCompression = buf.getShort();
+        byte[] uncompressed = null;
+        if (enableCompression == 1) {
+            byte[] value = new byte[buf.remaining()];
+            buf.get(value);
+            try {
+                uncompressed = CompressionUtils.decompress(value);
+            } catch (IOException | DataFormatException e) {
+                logger.error("Decompressing value bytes error.", e);
+                return null;
+            }
+        }
+        if (uncompressed != null) {
+            buf = ByteBuffer.wrap(uncompressed);
+        }
+        final int keyLength = buf.getInt();
+        byte[] keyBytes = new byte[keyLength];
+        buf.get(keyBytes);
+        if (!Arrays.equals(keyBytes, key)) {
+            logger.error("Keys do not match, possible hash collision!");
+            return null;
+        }
+        byte[] value = new byte[buf.remaining()];
+        buf.get(value);
+        return value;
+    }
+
+    protected String computeKeyHash(String key) {
+        // hash keys to keep things under 250 characters for memcached
+        return Joiner.on(":").skipNulls().join(KylinConfig.getInstanceFromEnv().getDeployEnv(), this.memcachedPrefix,
+                DigestUtils.shaHex(key));
+
+    }
+
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedCacheConfig.java b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedCacheConfig.java
new file mode 100644
index 0000000000..d71c279dc4
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedCacheConfig.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.memcached;
+
+import net.spy.memcached.DefaultConnectionFactory;
+
+public class MemcachedCacheConfig {
+    private long timeout = 500L;
+
+    // comma delimited list of memcached servers, given as host:port combination
+    private String hosts;
+
+    private int maxChunkSize = 1024;
+
+    private int maxObjectSize = 1024 * 1024;
+
+    // memcached client read buffer size, -1 uses the spymemcached library default
+    private int readBufferSize = DefaultConnectionFactory.DEFAULT_READ_BUFFER_SIZE;
+
+    // maximum operation queue size. 0 means unbounded
+    private int maxOperationQueueSize = 0;
+
+    // whether enable compress the value data or not
+    private boolean enableCompression = true;
+
+    public long getTimeout() {
+        return timeout;
+    }
+
+    public void setTimeout(long timeout) {
+        this.timeout = timeout;
+    }
+
+    public String getHosts() {
+        return hosts;
+    }
+
+    public void setHosts(String hosts) {
+        this.hosts = hosts;
+    }
+
+    public int getMaxChunkSize() {
+        return maxChunkSize;
+    }
+
+    public void setMaxChunkSize(int maxChunkSize) {
+        this.maxChunkSize = maxChunkSize;
+    }
+
+    public int getMaxObjectSize() {
+        return maxObjectSize;
+    }
+
+    public void setMaxObjectSize(int maxObjectSize) {
+        this.maxObjectSize = maxObjectSize;
+    }
+
+    public int getMaxOperationQueueSize() {
+        return maxOperationQueueSize;
+    }
+
+    public void setMaxOperationQueueSize(int maxOperationQueueSize) {
+        this.maxOperationQueueSize = maxOperationQueueSize;
+    }
+
+    public int getReadBufferSize() {
+        return readBufferSize;
+    }
+
+    public void setReadBufferSize(int readBufferSize) {
+        this.readBufferSize = readBufferSize;
+    }
+
+    public boolean isEnableCompression() {
+        return enableCompression;
+    }
+
+    public void setEnableCompression(boolean enableCompression) {
+        this.enableCompression = enableCompression;
+    }
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedChunkingCache.java b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedChunkingCache.java
new file mode 100644
index 0000000000..e79e7172a6
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedChunkingCache.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.memcached;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.commons.lang3.SerializationUtils;
+import org.apache.kylin.common.util.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import com.google.common.base.Throwables;
+import com.google.common.collect.Maps;
+import com.google.common.primitives.Ints;
+import com.google.common.primitives.Shorts;
+
+import net.spy.memcached.internal.BulkFuture;
+
+/**
+ * Subclass of MemcachedCache. It supports storing large objects.  Memcached itself has a limitation to the value size with default value of 1M.
+ * This implement extends the limit to 1G and can split huge bytes to multiple chunks. It will take care of the data integrity if part of the chunks lost(due to server restart or other reasons)
+ *
+ * @author mingmwang
+ */
+public class MemcachedChunkingCache extends MemcachedCache implements KeyHookLookup {
+    private static final Logger logger = LoggerFactory.getLogger(MemcachedChunkingCache.class);
+
+    public MemcachedChunkingCache(MemcachedCache cache) {
+        super(cache);
+        Preconditions.checkArgument(config.getMaxChunkSize() > 1, "maxChunkSize [%d] must be greater than 1",
+                config.getMaxChunkSize());
+        Preconditions.checkArgument(config.getMaxObjectSize() > 261, "maxObjectSize [%d] must be greater than 261",
+                config.getMaxObjectSize());
+    }
+
+    protected static byte[][] splitBytes(final byte[] data, final int nSplit) {
+        byte[][] dest = new byte[nSplit][];
+
+        final int splitSize = (data.length - 1) / nSplit + 1;
+        for (int i = 0; i < nSplit - 1; i++) {
+            dest[i] = Arrays.copyOfRange(data, i * splitSize, (i + 1) * splitSize);
+        }
+        dest[nSplit - 1] = Arrays.copyOfRange(data, (nSplit - 1) * splitSize, data.length);
+
+        return dest;
+    }
+
+    protected static int getValueSplit(MemcachedCacheConfig config, String keyS, int valueBLen) {
+        // the number 6 means the chunk number size never exceeds 6 bytes
+        final int valueSize = config.getMaxObjectSize() - Shorts.BYTES - Ints.BYTES
+                - keyS.getBytes(Charsets.UTF_8).length - 6;
+        final int maxValueSize = config.getMaxChunkSize() * valueSize;
+        Preconditions.checkArgument(valueBLen <= maxValueSize,
+                "the value bytes length [%d] exceeds maximum value size [%d]", valueBLen, maxValueSize);
+        return (valueBLen - 1) / valueSize + 1;
+    }
+
+    protected static Pair<KeyHook, byte[][]> getKeyValuePair(int nSplit, String keyS, byte[] valueB) {
+        KeyHook keyHook;
+        byte[][] splitValueB = null;
+        if (nSplit > 1) {
+            if (logger.isDebugEnabled()) {
+                logger.debug("Enable chunking for putting large cached object values, chunk size = " + nSplit
+                        + ", original value bytes size = " + valueB.length);
+            }
+            String[] chunkKeySs = new String[nSplit];
+            for (int i = 0; i < nSplit; i++) {
+                chunkKeySs[i] = keyS + i;
+            }
+            keyHook = new KeyHook(chunkKeySs, null);
+            splitValueB = splitBytes(valueB, nSplit);
+        } else {
+            if (logger.isDebugEnabled()) {
+                logger.debug(
+                        "Chunking not enabled, put the original value bytes to keyhook directly, original value bytes size = "
+                                + valueB.length);
+            }
+            keyHook = new KeyHook(null, valueB);
+        }
+
+        return new Pair<>(keyHook, splitValueB);
+    }
+
+    /**
+     * This method overrides the parent getBinary(), it gets the KeyHook from the Cache first and check the KeyHook that whether chunking is enabled or not.
+     */
+    @Override
+    public byte[] getBinary(String keyS) {
+        if (Strings.isNullOrEmpty(keyS)) {
+            return null;
+        }
+        KeyHook keyHook = lookupKeyHook(keyS);
+        if (keyHook == null) {
+            return null;
+        }
+
+        if (keyHook.getChunkskey() == null || keyHook.getChunkskey().length == 0) {
+            if (logger.isDebugEnabled()) {
+                logger.debug("Chunking not enabled, return the value bytes in the keyhook directly, value bytes size = "
+                        + keyHook.getValues().length);
+            }
+            return keyHook.getValues();
+        }
+
+        BulkFuture<Map<String, Object>> bulkFuture;
+        long start = System.currentTimeMillis();
+
+        if (logger.isDebugEnabled()) {
+            logger.debug("Chunking enabled, chunk size = " + keyHook.getChunkskey().length);
+        }
+
+        Map<String, String> keyLookup = computeKeyHash(Arrays.asList(keyHook.getChunkskey()));
+        try {
+            bulkFuture = client.asyncGetBulk(keyLookup.keySet());
+        } catch (IllegalStateException e) {
+            // operation did not get queued in time (queue is full)
+            errorCount.incrementAndGet();
+            logger.error("Unable to queue cache operation.", e);
+            return null;
+        } catch (Throwable t) {
+            errorCount.incrementAndGet();
+            logger.error("Unable to queue cache operation.", t);
+            return null;
+        }
+
+        try {
+            Map<String, Object> bulkResult = bulkFuture.get(config.getTimeout(), TimeUnit.MILLISECONDS);
+            cacheGetTime.addAndGet(System.currentTimeMillis() - start);
+            if (bulkResult.size() != keyHook.getChunkskey().length) {
+                missCount.incrementAndGet();
+                logger.warn("Some paritial chunks missing for query key:" + keyS);
+                //remove all the partital chunks here.
+                for (String partitalKey : bulkResult.keySet()) {
+                    client.delete(partitalKey);
+                }
+                deleteKeyHook(keyS);
+                return null;
+            }
+            hitCount.getAndAdd(keyHook.getChunkskey().length);
+            byte[][] bytesArray = new byte[keyHook.getChunkskey().length][];
+            for (Map.Entry<String, Object> entry : bulkResult.entrySet()) {
+                byte[] bytes = (byte[]) entry.getValue();
+                readBytes.addAndGet(bytes.length);
+                String originalKeyS = keyLookup.get(entry.getKey());
+                int idx = Integer.parseInt(originalKeyS.substring(keyS.length()));
+                bytesArray[idx] = decodeValue(originalKeyS.getBytes(Charsets.UTF_8), bytes);
+            }
+            return concatBytes(bytesArray);
+        } catch (TimeoutException e) {
+            timeoutCount.incrementAndGet();
+            bulkFuture.cancel(false);
+            return null;
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+            throw Throwables.propagate(e);
+        } catch (ExecutionException e) {
+            errorCount.incrementAndGet();
+            logger.error("ExecutionException when pulling item from cache.", e);
+            return null;
+        }
+    }
+
+    /**
+     * This method overrides the parent putBinary() method. It will split the large value bytes into multiple chunks to fit into the internal Cache.
+     * It generates a KeyHook to store the splitted chunked keys.
+     */
+    @Override
+    public void putBinary(String keyS, byte[] valueB, int expiration) {
+        if (Strings.isNullOrEmpty(keyS)) {
+            return;
+        }
+        int nSplit = getValueSplit(config, keyS, valueB.length);
+        Pair<KeyHook, byte[][]> keyValuePair = getKeyValuePair(nSplit, keyS, valueB);
+        KeyHook keyHook = keyValuePair.getFirst();
+        byte[][] splitValueB = keyValuePair.getSecond();
+
+        if (logger.isDebugEnabled()) {
+            logger.debug("put key hook:{} to cache for hash key", keyHook);
+        }
+        super.putBinary(keyS, serializeValue(keyHook), expiration);
+        if (nSplit > 1) {
+            for (int i = 0; i < nSplit; i++) {
+                if (logger.isDebugEnabled()) {
+                    logger.debug("Chunk[" + i + "] bytes size before encoding  = " + splitValueB[i].length);
+                }
+                super.putBinary(keyHook.getChunkskey()[i], splitValueB[i], expiration);
+            }
+        }
+    }
+
+    public void evict(String keyS) {
+        if (Strings.isNullOrEmpty(keyS)) {
+            return;
+        }
+        KeyHook keyHook = lookupKeyHook(keyS);
+        if (keyHook == null) {
+            return;
+        }
+
+        if (keyHook.getChunkskey() != null && keyHook.getChunkskey().length > 0) {
+            String[] chunkKeys = keyHook.getChunkskey();
+            for (String chunkKey : chunkKeys) {
+                super.evict(chunkKey);
+            }
+        }
+        super.evict(keyS);
+    }
+
+    protected Map<String, String> computeKeyHash(List<String> keySList) {
+        return Maps.uniqueIndex(keySList, new Function<String, String>() {
+            @Override
+            public String apply(String keyS) {
+                return computeKeyHash(keyS);
+            }
+        });
+    }
+
+    private void deleteKeyHook(String keyS) {
+        try {
+            super.evict(keyS);
+        } catch (IllegalStateException e) {
+            // operation did not get queued in time (queue is full)
+            errorCount.incrementAndGet();
+            logger.error("Unable to queue cache operation: ", e);
+        }
+    }
+
+    private byte[] concatBytes(byte[]... bytesArray) {
+        int length = 0;
+        for (byte[] bytes : bytesArray) {
+            length += bytes.length;
+        }
+        byte[] result = new byte[length];
+        int destPos = 0;
+        for (byte[] bytes : bytesArray) {
+            System.arraycopy(bytes, 0, result, destPos, bytes.length);
+            destPos += bytes.length;
+        }
+        if (logger.isDebugEnabled()) {
+            logger.debug("Original value bytes size for all chunks  = " + result.length);
+        }
+
+        return result;
+    }
+
+    @Override
+    public KeyHook lookupKeyHook(String keyS) {
+        byte[] bytes = super.getBinary(keyS);
+        if (bytes == null) {
+            return null;
+        }
+        return (KeyHook) SerializationUtils.deserialize(bytes);
+    }
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedConnectionFactory.java b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedConnectionFactory.java
new file mode 100644
index 0000000000..fe48d3e207
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedConnectionFactory.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.memcached;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.nio.channels.SocketChannel;
+import java.util.Collection;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+
+import org.apache.kylin.common.KylinConfig;
+
+import net.spy.memcached.ConnectionFactory;
+import net.spy.memcached.ConnectionObserver;
+import net.spy.memcached.DefaultConnectionFactory;
+import net.spy.memcached.FailureMode;
+import net.spy.memcached.HashAlgorithm;
+import net.spy.memcached.MemcachedConnection;
+import net.spy.memcached.MemcachedNode;
+import net.spy.memcached.NodeLocator;
+import net.spy.memcached.OperationFactory;
+import net.spy.memcached.auth.AuthDescriptor;
+import net.spy.memcached.compat.SpyObject;
+import net.spy.memcached.metrics.MetricCollector;
+import net.spy.memcached.metrics.MetricType;
+import net.spy.memcached.metrics.NoopMetricCollector;
+import net.spy.memcached.ops.Operation;
+import net.spy.memcached.transcoders.Transcoder;
+
+public class MemcachedConnectionFactory extends SpyObject implements ConnectionFactory {
+    private ConnectionFactory underlying;
+    private Map<String, String> metricsConfig = KylinConfig.getInstanceFromEnv().getKylinMetricsConf();
+
+    public MemcachedConnectionFactory(ConnectionFactory underlying) {
+        this.underlying = underlying;
+    }
+
+    @Override
+    public MetricType enableMetrics() {
+        String metricType = metricsConfig.get("memcached.metricstype");
+        return metricType == null ? DefaultConnectionFactory.DEFAULT_METRIC_TYPE
+                : MetricType.valueOf(metricType.toUpperCase(Locale.ROOT));
+    }
+
+    @Override
+    public MetricCollector getMetricCollector() {
+        String enableMetrics = metricsConfig.get("memcached.enabled");
+        if (enableMetrics().equals(MetricType.OFF) || enableMetrics == null
+                || "false".equalsIgnoreCase(enableMetrics)) {
+            getLogger().debug("Memcached metrics collection disabled.");
+            return new NoopMetricCollector();
+        } else {
+            getLogger().info("Memcached metrics collection enabled (Profile " + enableMetrics() + ").");
+            return new MemcachedMetrics();
+        }
+    }
+
+    @Override
+    public MemcachedConnection createConnection(List<InetSocketAddress> addrs) throws IOException {
+        return underlying.createConnection(addrs);
+    }
+
+    @Override
+    public MemcachedNode createMemcachedNode(SocketAddress sa, SocketChannel c, int bufSize) {
+        return underlying.createMemcachedNode(sa, c, bufSize);
+    }
+
+    @Override
+    public BlockingQueue<Operation> createOperationQueue() {
+        return underlying.createOperationQueue();
+    }
+
+    @Override
+    public BlockingQueue<Operation> createReadOperationQueue() {
+        return underlying.createReadOperationQueue();
+    }
+
+    @Override
+    public BlockingQueue<Operation> createWriteOperationQueue() {
+        return underlying.createWriteOperationQueue();
+    }
+
+    @Override
+    public long getOpQueueMaxBlockTime() {
+        return underlying.getOpQueueMaxBlockTime();
+    }
+
+    @Override
+    public ExecutorService getListenerExecutorService() {
+        return underlying.getListenerExecutorService();
+    }
+
+    @Override
+    public boolean isDefaultExecutorService() {
+        return underlying.isDefaultExecutorService();
+    }
+
+    @Override
+    public NodeLocator createLocator(List<MemcachedNode> nodes) {
+        return underlying.createLocator(nodes);
+    }
+
+    @Override
+    public OperationFactory getOperationFactory() {
+        return underlying.getOperationFactory();
+    }
+
+    @Override
+    public long getOperationTimeout() {
+        return underlying.getOperationTimeout();
+    }
+
+    @Override
+    public boolean isDaemon() {
+        return underlying.isDaemon();
+    }
+
+    @Override
+    public boolean useNagleAlgorithm() {
+        return underlying.useNagleAlgorithm();
+    }
+
+    @Override
+    public Collection<ConnectionObserver> getInitialObservers() {
+        return underlying.getInitialObservers();
+    }
+
+    @Override
+    public FailureMode getFailureMode() {
+        return underlying.getFailureMode();
+    }
+
+    @Override
+    public Transcoder<Object> getDefaultTranscoder() {
+        return underlying.getDefaultTranscoder();
+    }
+
+    @Override
+    public boolean shouldOptimize() {
+        return underlying.shouldOptimize();
+    }
+
+    @Override
+    public int getReadBufSize() {
+        return underlying.getReadBufSize();
+    }
+
+    @Override
+    public HashAlgorithm getHashAlg() {
+        return underlying.getHashAlg();
+    }
+
+    @Override
+    public long getMaxReconnectDelay() {
+        return underlying.getMaxReconnectDelay();
+    }
+
+    @Override
+    public AuthDescriptor getAuthDescriptor() {
+        return underlying.getAuthDescriptor();
+    }
+
+    @Override
+    public int getTimeoutExceptionThreshold() {
+        return underlying.getTimeoutExceptionThreshold();
+    }
+
+    @Override
+    public long getAuthWaitTime() {
+        return underlying.getAuthWaitTime();
+    }
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedConnectionFactoryBuilder.java b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedConnectionFactoryBuilder.java
new file mode 100644
index 0000000000..97af4a6103
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedConnectionFactoryBuilder.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.memcached;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ExecutorService;
+
+import net.spy.memcached.ArrayModNodeLocator;
+import net.spy.memcached.ConnectionFactory;
+import net.spy.memcached.ConnectionFactoryBuilder;
+import net.spy.memcached.ConnectionObserver;
+import net.spy.memcached.DefaultConnectionFactory;
+import net.spy.memcached.FailureMode;
+import net.spy.memcached.HashAlgorithm;
+import net.spy.memcached.MemcachedNode;
+import net.spy.memcached.NodeLocator;
+import net.spy.memcached.OperationFactory;
+import net.spy.memcached.RefinedKetamaNodeLocator;
+import net.spy.memcached.auth.AuthDescriptor;
+import net.spy.memcached.metrics.MetricCollector;
+import net.spy.memcached.metrics.MetricType;
+import net.spy.memcached.ops.Operation;
+import net.spy.memcached.transcoders.Transcoder;
+
+public class MemcachedConnectionFactoryBuilder extends ConnectionFactoryBuilder {
+    /**
+     * Get the ConnectionFactory set up with the provided parameters.
+     */
+    public ConnectionFactory build() {
+        return new DefaultConnectionFactory() {
+
+            @Override
+            public BlockingQueue<Operation> createOperationQueue() {
+                return opQueueFactory == null ? super.createOperationQueue() : opQueueFactory.create();
+            }
+
+            @Override
+            public BlockingQueue<Operation> createReadOperationQueue() {
+                return readQueueFactory == null ? super.createReadOperationQueue() : readQueueFactory.create();
+            }
+
+            @Override
+            public BlockingQueue<Operation> createWriteOperationQueue() {
+                return writeQueueFactory == null ? super.createReadOperationQueue() : writeQueueFactory.create();
+            }
+
+            @Override
+            public NodeLocator createLocator(List<MemcachedNode> nodes) {
+                switch (locator) {
+                case ARRAY_MOD:
+                    return new ArrayModNodeLocator(nodes, getHashAlg());
+                case CONSISTENT:
+                    return new RefinedKetamaNodeLocator(nodes, getHashAlg());
+                default:
+                    throw new IllegalStateException("Unhandled locator type: " + locator);
+                }
+            }
+
+            @Override
+            public Transcoder<Object> getDefaultTranscoder() {
+                return transcoder == null ? super.getDefaultTranscoder() : transcoder;
+            }
+
+            @Override
+            public FailureMode getFailureMode() {
+                return failureMode == null ? super.getFailureMode() : failureMode;
+            }
+
+            @Override
+            public HashAlgorithm getHashAlg() {
+                return hashAlg == null ? super.getHashAlg() : hashAlg;
+            }
+
+            public Collection<ConnectionObserver> getInitialObservers() {
+                return initialObservers;
+            }
+
+            @Override
+            public OperationFactory getOperationFactory() {
+                return opFact == null ? super.getOperationFactory() : opFact;
+            }
+
+            @Override
+            public long getOperationTimeout() {
+                return opTimeout == -1 ? super.getOperationTimeout() : opTimeout;
+            }
+
+            @Override
+            public int getReadBufSize() {
+                return readBufSize == -1 ? super.getReadBufSize() : readBufSize;
+            }
+
+            @Override
+            public boolean isDaemon() {
+                return isDaemon;
+            }
+
+            @Override
+            public boolean shouldOptimize() {
+                return shouldOptimize;
+            }
+
+            @Override
+            public boolean useNagleAlgorithm() {
+                return useNagle;
+            }
+
+            @Override
+            public long getMaxReconnectDelay() {
+                return maxReconnectDelay;
+            }
+
+            @Override
+            public AuthDescriptor getAuthDescriptor() {
+                return authDescriptor;
+            }
+
+            @Override
+            public long getOpQueueMaxBlockTime() {
+                return opQueueMaxBlockTime > -1 ? opQueueMaxBlockTime : super.getOpQueueMaxBlockTime();
+            }
+
+            @Override
+            public int getTimeoutExceptionThreshold() {
+                return timeoutExceptionThreshold;
+            }
+
+            @Override
+            public MetricType enableMetrics() {
+                return metricType == null ? super.enableMetrics() : metricType;
+            }
+
+            @Override
+            public MetricCollector getMetricCollector() {
+                return collector == null ? super.getMetricCollector() : collector;
+            }
+
+            @Override
+            public ExecutorService getListenerExecutorService() {
+                return executorService == null ? super.getListenerExecutorService() : executorService;
+            }
+
+            @Override
+            public boolean isDefaultExecutorService() {
+                return executorService == null;
+            }
+
+            @Override
+            public long getAuthWaitTime() {
+                return authWaitTime;
+            }
+        };
+
+    }
+}
\ No newline at end of file
diff --git a/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedMetrics.java b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedMetrics.java
new file mode 100644
index 0000000000..ada9144465
--- /dev/null
+++ b/cache/src/main/java/org/apache/kylin/cache/memcached/MemcachedMetrics.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.memcached;
+
+import static org.apache.kylin.metrics.lib.impl.MetricsSystem.Metrics;
+
+import java.util.Map;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.Meter;
+import com.google.common.collect.Maps;
+
+import net.spy.memcached.metrics.AbstractMetricCollector;
+import net.spy.memcached.metrics.DefaultMetricCollector;
+import net.spy.memcached.metrics.MetricCollector;
+
+/**
+ * A {@link MetricCollector} that uses the Codahale Metrics library.
+ *
+ * The following system properies can be used to customize the behavior
+ * of the collector during runtime:
+ */
+public final class MemcachedMetrics extends AbstractMetricCollector {
+
+    /**
+     * Contains all registered {@link Counter}s.
+     */
+    private Map<String, Counter> counters;
+
+    /**
+     * Contains all registered {@link Meter}s.
+     */
+    private Map<String, Meter> meters;
+
+    /**
+     * Contains all registered {@link Histogram}s.
+     */
+    private Map<String, Histogram> histograms;
+
+    /**
+     * Create a new {@link DefaultMetricCollector}.
+     *
+     * Note that when this constructor is called, the reporter is also
+     * automatically established.
+     */
+    public MemcachedMetrics() {
+        counters = Maps.newConcurrentMap();
+        meters = Maps.newConcurrentMap();
+        histograms = Maps.newConcurrentMap();
+    }
+
+    @Override
+    public void addCounter(String name) {
+        if (!counters.containsKey(name)) {
+            counters.put(name, Metrics.counter(name));
+        }
+    }
+
+    @Override
+    public void removeCounter(String name) {
+        if (!counters.containsKey(name)) {
+            Metrics.remove(name);
+            counters.remove(name);
+        }
+    }
+
+    @Override
+    public void incrementCounter(String name, int amount) {
+        if (counters.containsKey(name)) {
+            counters.get(name).inc(amount);
+        }
+    }
+
+    @Override
+    public void decrementCounter(String name, int amount) {
+        if (counters.containsKey(name)) {
+            counters.get(name).dec(amount);
+        }
+    }
+
+    @Override
+    public void addMeter(String name) {
+        if (!meters.containsKey(name)) {
+            meters.put(name, Metrics.meter(name));
+        }
+    }
+
+    @Override
+    public void removeMeter(String name) {
+        if (meters.containsKey(name)) {
+            meters.remove(name);
+        }
+    }
+
+    @Override
+    public void markMeter(String name) {
+        if (meters.containsKey(name)) {
+            meters.get(name).mark();
+        }
+    }
+
+    @Override
+    public void addHistogram(String name) {
+        if (!histograms.containsKey(name)) {
+            histograms.put(name, Metrics.histogram(name));
+        }
+    }
+
+    @Override
+    public void removeHistogram(String name) {
+        if (histograms.containsKey(name)) {
+            histograms.remove(name);
+        }
+    }
+
+    @Override
+    public void updateHistogram(String name, int amount) {
+        if (histograms.containsKey(name)) {
+            histograms.get(name).update(amount);
+        }
+    }
+}
\ No newline at end of file
diff --git a/cache/src/test/java/org/apache/kylin/cache/cachemanager/RemoteLocalFailOverCacheManagerTest.java b/cache/src/test/java/org/apache/kylin/cache/cachemanager/RemoteLocalFailOverCacheManagerTest.java
new file mode 100644
index 0000000000..243e3861e0
--- /dev/null
+++ b/cache/src/test/java/org/apache/kylin/cache/cachemanager/RemoteLocalFailOverCacheManagerTest.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.cachemanager;
+
+import static org.apache.kylin.cache.cachemanager.CacheConstants.QUERY_CACHE;
+
+import org.apache.kylin.common.util.LocalFileMetadataTestCase;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.beans.factory.annotation.Qualifier;
+import org.springframework.cache.ehcache.EhCacheCache;
+import org.springframework.test.context.ActiveProfiles;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(locations = { "classpath:cacheContext.xml" })
+@ActiveProfiles("testing-memcached")
+public class RemoteLocalFailOverCacheManagerTest {
+
+    @Autowired
+    @Qualifier("cacheManager")
+    RemoteLocalFailOverCacheManager cacheManager;
+
+    @BeforeClass
+    public static void setupResource() throws Exception {
+        LocalFileMetadataTestCase.staticCreateTestMetadata();
+    }
+
+    @AfterClass
+    public static void tearDownResource() {
+    }
+
+    @Test
+    public void testCacheManager() {
+        cacheManager.disableRemoteCacheManager();
+        Assert.assertTrue("Memcached failover to ehcache", cacheManager.getCache(QUERY_CACHE) instanceof EhCacheCache);
+        cacheManager.enableRemoteCacheManager();
+        Assert.assertTrue("Memcached enabled",
+                cacheManager.getCache(QUERY_CACHE) instanceof MemcachedCacheManager.MemCachedCacheAdaptor);
+    }
+}
\ No newline at end of file
diff --git a/cache/src/test/java/org/apache/kylin/cache/memcached/MemcachedCacheTest.java b/cache/src/test/java/org/apache/kylin/cache/memcached/MemcachedCacheTest.java
new file mode 100644
index 0000000000..40571a723f
--- /dev/null
+++ b/cache/src/test/java/org/apache/kylin/cache/memcached/MemcachedCacheTest.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.memcached;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.kylin.cache.cachemanager.CacheConstants;
+import org.apache.kylin.cache.cachemanager.MemcachedCacheManager.MemCachedCacheAdaptor;
+import org.apache.kylin.common.util.LocalFileMetadataTestCase;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+import net.spy.memcached.MemcachedClient;
+import net.spy.memcached.internal.GetFuture;
+
+public class MemcachedCacheTest extends LocalFileMetadataTestCase {
+
+    private Map<String, String> keyValueMap;
+    private MemCachedCacheAdaptor memCachedAdaptor;
+
+    @Before
+    public void setUp() throws Exception {
+        this.createTestMetadata();
+
+        keyValueMap = Maps.newHashMap();
+        keyValueMap.put("sql1", "value1");
+        keyValueMap.put("sql11", "value11");
+
+        MemcachedCacheConfig cacheConfig = new MemcachedCacheConfig();
+        MemcachedClient memcachedClient = mock(MemcachedClient.class);
+        MemcachedCache memcachedCache = new MemcachedCache(memcachedClient, cacheConfig, CacheConstants.QUERY_CACHE,
+                7 * 24 * 3600);
+        memCachedAdaptor = new MemCachedCacheAdaptor(memcachedCache);
+
+        //Mock put to cache
+        for (String key : keyValueMap.keySet()) {
+            String keyS = memcachedCache.serializeKey(key);
+            String hashedKey = memcachedCache.computeKeyHash(keyS);
+
+            String value = keyValueMap.get(key);
+            byte[] valueE = memcachedCache.encodeValue(keyS, value);
+
+            GetFuture<Object> future = mock(GetFuture.class);
+            when(future.get(cacheConfig.getTimeout(), TimeUnit.MILLISECONDS)).thenReturn(valueE);
+            when(memcachedClient.asyncGet(hashedKey)).thenReturn(future);
+        }
+    }
+
+    @After
+    public void after() throws Exception {
+        this.cleanupTestMetadata();
+    }
+
+    @Test
+    public void testGet() {
+        for (String key : keyValueMap.keySet()) {
+            Assert.assertEquals("The value should not change", keyValueMap.get(key), memCachedAdaptor.get(key).get());
+        }
+    }
+}
\ No newline at end of file
diff --git a/cache/src/test/java/org/apache/kylin/cache/memcached/MemcachedChunkingCacheTest.java b/cache/src/test/java/org/apache/kylin/cache/memcached/MemcachedChunkingCacheTest.java
new file mode 100644
index 0000000000..295b20cd6f
--- /dev/null
+++ b/cache/src/test/java/org/apache/kylin/cache/memcached/MemcachedChunkingCacheTest.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.cache.memcached;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.Arrays;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.kylin.cache.cachemanager.CacheConstants;
+import org.apache.kylin.cache.cachemanager.MemcachedCacheManager.MemCachedCacheAdaptor;
+import org.apache.kylin.common.util.LocalFileMetadataTestCase;
+import org.apache.kylin.common.util.Pair;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Strings;
+import com.google.common.collect.Maps;
+
+import net.spy.memcached.MemcachedClient;
+import net.spy.memcached.internal.BulkFuture;
+import net.spy.memcached.internal.GetFuture;
+
+public class MemcachedChunkingCacheTest extends LocalFileMetadataTestCase {
+
+    private Map<String, String> smallValueMap;
+    private Map<String, String> largeValueMap;
+    private MemCachedCacheAdaptor memCachedAdaptor;
+
+    @Before
+    public void setUp() throws Exception {
+        this.createTestMetadata();
+        final int maxObjectSize = 300;
+
+        smallValueMap = Maps.newHashMap();
+        smallValueMap.put("sql1", "value1");
+
+        largeValueMap = Maps.newHashMap();
+        largeValueMap.put("sql2", Strings.repeat("value2", maxObjectSize));
+
+        MemcachedCacheConfig cacheConfig = new MemcachedCacheConfig();
+        cacheConfig.setMaxObjectSize(maxObjectSize);
+        MemcachedClient memcachedClient = mock(MemcachedClient.class);
+        MemcachedCache memcachedCache = new MemcachedCache(memcachedClient, cacheConfig, CacheConstants.QUERY_CACHE,
+                7 * 24 * 3600);
+        MemcachedChunkingCache memcachedChunkingCache = new MemcachedChunkingCache(memcachedCache);
+        memCachedAdaptor = new MemCachedCacheAdaptor(memcachedChunkingCache);
+
+        //Mock put to cache
+        for (String key : smallValueMap.keySet()) {
+            String keyS = memcachedCache.serializeKey(key);
+            String hashedKey = memcachedCache.computeKeyHash(keyS);
+
+            String value = smallValueMap.get(key);
+            byte[] valueB = memcachedCache.serializeValue(value);
+            KeyHookLookup.KeyHook keyHook = new KeyHookLookup.KeyHook(null, valueB);
+            byte[] valueE = memcachedCache.encodeValue(keyS, keyHook);
+
+            GetFuture<Object> future = mock(GetFuture.class);
+            when(memcachedClient.asyncGet(hashedKey)).thenReturn(future);
+
+            when(future.get(cacheConfig.getTimeout(), TimeUnit.MILLISECONDS)).thenReturn(valueE);
+        }
+
+        //Mock put large value to cache
+        for (String key : largeValueMap.keySet()) {
+            String keyS = memcachedCache.serializeKey(key);
+            String hashedKey = memcachedCache.computeKeyHash(keyS);
+
+            String value = largeValueMap.get(key);
+            byte[] valueB = memcachedCache.serializeValue(value);
+            int nSplit = MemcachedChunkingCache.getValueSplit(cacheConfig, keyS, valueB.length);
+            Pair<KeyHookLookup.KeyHook, byte[][]> keyValuePair = MemcachedChunkingCache.getKeyValuePair(nSplit, keyS,
+                    valueB);
+            KeyHookLookup.KeyHook keyHook = keyValuePair.getFirst();
+            byte[][] splitValueB = keyValuePair.getSecond();
+
+            //For key
+            byte[] valueE = memcachedCache.encodeValue(keyS, keyHook);
+            GetFuture<Object> future = mock(GetFuture.class);
+            when(memcachedClient.asyncGet(hashedKey)).thenReturn(future);
+            when(future.get(cacheConfig.getTimeout(), TimeUnit.MILLISECONDS)).thenReturn(valueE);
+
+            //For splits
+            Map<String, String> keyLookup = memcachedChunkingCache
+                    .computeKeyHash(Arrays.asList(keyHook.getChunkskey()));
+            Map<String, Object> bulkResult = Maps.newHashMap();
+            for (int i = 0; i < nSplit; i++) {
+                String splitKeyS = keyHook.getChunkskey()[i];
+                bulkResult.put(memcachedCache.computeKeyHash(splitKeyS),
+                        memcachedCache.encodeValue(splitKeyS.getBytes(Charsets.UTF_8), splitValueB[i]));
+            }
+
+            BulkFuture<Map<String, Object>> bulkFuture = mock(BulkFuture.class);
+            when(memcachedClient.asyncGetBulk(keyLookup.keySet())).thenReturn(bulkFuture);
+            when(bulkFuture.get(cacheConfig.getTimeout(), TimeUnit.MILLISECONDS)).thenReturn(bulkResult);
+        }
+    }
+
+    @After
+    public void after() throws Exception {
+        this.cleanupTestMetadata();
+    }
+
+    @Test
+    public void testGet() {
+        for (String key : smallValueMap.keySet()) {
+            Assert.assertEquals("The value should not change", smallValueMap.get(key), memCachedAdaptor.get(key).get());
+        }
+        for (String key : largeValueMap.keySet()) {
+            Assert.assertEquals("The value should not change", largeValueMap.get(key), memCachedAdaptor.get(key).get());
+        }
+    }
+
+    @Test
+    public void testSplitBytes() {
+        byte[] data = new byte[8];
+        for (int i = 0; i < data.length; i++) {
+            data[i] = (byte) i;
+        }
+
+        int nSplit;
+        byte[][] dataSplits;
+
+        nSplit = 2;
+        dataSplits = MemcachedChunkingCache.splitBytes(data, nSplit);
+        Assert.assertEquals(nSplit, dataSplits.length);
+        Assert.assertArrayEquals(dataSplits[0], new byte[] { 0, 1, 2, 3 });
+        Assert.assertArrayEquals(dataSplits[1], new byte[] { 4, 5, 6, 7 });
+
+        nSplit = 3;
+        dataSplits = MemcachedChunkingCache.splitBytes(data, nSplit);
+        Assert.assertEquals(nSplit, dataSplits.length);
+        Assert.assertArrayEquals(dataSplits[0], new byte[] { 0, 1, 2 });
+        Assert.assertArrayEquals(dataSplits[1], new byte[] { 3, 4, 5 });
+        Assert.assertArrayEquals(dataSplits[2], new byte[] { 6, 7 });
+    }
+}
\ No newline at end of file
diff --git a/cache/src/test/resources/cacheContext.xml b/cache/src/test/resources/cacheContext.xml
new file mode 100644
index 0000000000..a2fb9e95ee
--- /dev/null
+++ b/cache/src/test/resources/cacheContext.xml
@@ -0,0 +1,47 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<beans xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:context="http://www.springframework.org/schema/context"
+       xmlns:cache="http://www.springframework.org/schema/cache"
+       xmlns:p="http://www.springframework.org/schema/p"
+       xmlns="http://www.springframework.org/schema/beans"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans
+    http://www.springframework.org/schema/beans/spring-beans-4.3.xsd
+    http://www.springframework.org/schema/context
+    http://www.springframework.org/schema/context/spring-context-4.3.xsd
+    http://www.springframework.org/schema/cache
+    http://www.springframework.org/schema/cache/spring-cache.xsd">
+
+    <description>Kylin Rest Service</description>
+    <context:annotation-config/>
+
+    <!-- Cache Config -->
+    <cache:annotation-driven/>
+
+    <beans profile="testing-memcached">
+        <bean id="ehcache"
+              class="org.springframework.cache.ehcache.EhCacheManagerFactoryBean"
+              p:configLocation="classpath:ehcache-test.xml" p:shared="true"/>
+        <bean id="localCacheManager" class="org.apache.kylin.cache.cachemanager.InstrumentedEhCacheCacheManager"
+              p:cacheManager-ref="ehcache"/>
+
+        <bean id="remoteCacheManager" class="org.apache.kylin.cache.cachemanager.MemcachedCacheManager"/>
+        <bean id="memcachedCacheConfig" class="org.apache.kylin.cache.memcached.MemcachedCacheConfig">
+            <property name="timeout" value="500"/>
+            <property name="hosts" value="localhost:11211"/>
+        </bean>
+
+        <bean id="cacheManager" class="org.apache.kylin.cache.cachemanager.RemoteLocalFailOverCacheManager"/>
+    </beans>
+
+</beans>
\ No newline at end of file
diff --git a/cache/src/test/resources/ehcache-test.xml b/cache/src/test/resources/ehcache-test.xml
new file mode 100644
index 0000000000..90299ec02a
--- /dev/null
+++ b/cache/src/test/resources/ehcache-test.xml
@@ -0,0 +1,21 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<ehcache maxBytesLocalHeap="256M">>
+    <cache name="StorageCache"
+           eternal="false"
+           timeToIdleSeconds="86400"
+           memoryStoreEvictionPolicy="LRU"
+    >
+        <persistence strategy="none"/>
+    </cache>
+</ehcache>
\ No newline at end of file
diff --git a/core-common/src/main/java/org/apache/kylin/common/BackwardCompatibilityConfig.java b/core-common/src/main/java/org/apache/kylin/common/BackwardCompatibilityConfig.java
index cbe2e4c0c5..751d2f7da6 100644
--- a/core-common/src/main/java/org/apache/kylin/common/BackwardCompatibilityConfig.java
+++ b/core-common/src/main/java/org/apache/kylin/common/BackwardCompatibilityConfig.java
@@ -40,15 +40,16 @@
     private static final Logger logger = LoggerFactory.getLogger(BackwardCompatibilityConfig.class);
 
     private static final String KYLIN_BACKWARD_COMPATIBILITY = "kylin-backward-compatibility";
+    private static final String PROPERTIES_SUFFIX = ".properties";
 
     private final Map<String, String> old2new = Maps.newConcurrentMap();
     private final Map<String, String> old2newPrefix = Maps.newConcurrentMap();
 
     public BackwardCompatibilityConfig() {
         ClassLoader loader = Thread.currentThread().getContextClassLoader();
-        init(loader.getResourceAsStream(KYLIN_BACKWARD_COMPATIBILITY + ".properties"));
+        init(loader.getResourceAsStream(KYLIN_BACKWARD_COMPATIBILITY + PROPERTIES_SUFFIX));
         for (int i = 0; i < 10; i++) {
-            init(loader.getResourceAsStream(KYLIN_BACKWARD_COMPATIBILITY + (i) + ".properties"));
+            init(loader.getResourceAsStream(KYLIN_BACKWARD_COMPATIBILITY + (i) + PROPERTIES_SUFFIX));
         }
     }
 
@@ -143,26 +144,21 @@ private static void generateFindAndReplaceScript(String kylinRepoPath, String ou
         BackwardCompatibilityConfig bcc = new BackwardCompatibilityConfig();
         File repoDir = new File(kylinRepoPath).getCanonicalFile();
         File outputDir = new File(outputPath).getCanonicalFile();
-        PrintWriter out = null;
 
         // generate sed file
         File sedFile = new File(outputDir, "upgrade-old-config.sed");
-        try {
-            out = new PrintWriter(sedFile, "UTF-8");
+        try (PrintWriter out = new PrintWriter(sedFile, "UTF-8")) {
             for (Entry<String, String> e : bcc.old2new.entrySet()) {
                 out.println("s/" + quote(e.getKey()) + "/" + e.getValue() + "/g");
             }
             for (Entry<String, String> e : bcc.old2newPrefix.entrySet()) {
                 out.println("s/" + quote(e.getKey()) + "/" + e.getValue() + "/g");
             }
-        } finally {
-            IOUtils.closeQuietly(out);
         }
 
         // generate sh file
         File shFile = new File(outputDir, "upgrade-old-config.sh");
-        try {
-            out = new PrintWriter(shFile, "UTF-8");
+        try (PrintWriter out = new PrintWriter(shFile, "UTF-8")) {
             out.println("#!/bin/bash");
             Stack<File> stack = new Stack<>();
             stack.push(repoDir);
@@ -178,8 +174,6 @@ private static void generateFindAndReplaceScript(String kylinRepoPath, String ou
                         out.println("sed -i -f upgrade-old-config.sed " + f.getAbsolutePath());
                 }
             }
-        } finally {
-            IOUtils.closeQuietly(out);
         }
 
         System.out.println("Files generated:");
@@ -213,6 +207,6 @@ else if (name.endsWith("-site.xml"))
             return false;
         else
             return name.endsWith(".java") || name.endsWith(".js") || name.endsWith(".sh")
-                    || name.endsWith(".properties") || name.endsWith(".xml");
+                    || name.endsWith(PROPERTIES_SUFFIX) || name.endsWith(".xml");
     }
 }
diff --git a/core-common/src/main/java/org/apache/kylin/common/KylinConfig.java b/core-common/src/main/java/org/apache/kylin/common/KylinConfig.java
index e09ce26149..4db1748910 100644
--- a/core-common/src/main/java/org/apache/kylin/common/KylinConfig.java
+++ b/core-common/src/main/java/org/apache/kylin/common/KylinConfig.java
@@ -6,15 +6,15 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * 
+ *
  *     http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
-*/
+ */
 
 package org.apache.kylin.common;
 
@@ -50,6 +50,7 @@
 public class KylinConfig extends KylinConfigBase {
     private static final long serialVersionUID = 1L;
     private static final Logger logger = LoggerFactory.getLogger(KylinConfig.class);
+    private static final String METADATA_URI_PREFIX = "Metadata uri : ";
 
     /**
      * Kylin properties file name
@@ -61,20 +62,23 @@
     // static cached instances
     private static KylinConfig SYS_ENV_INSTANCE = null;
 
+    // static default Ordered Properties, only need load from classpath once
+    private static OrderedProperties defaultOrderedProperties = new OrderedProperties();
+
     // thread-local instances, will override SYS_ENV_INSTANCE
     private static transient ThreadLocal<KylinConfig> THREAD_ENV_INSTANCE = new ThreadLocal<>();
 
     static {
         /*
          * Make Calcite to work with Unicode.
-         * 
+         *
          * Sets default char set for string literals in SQL and row types of
          * RelNode. This is more a label used to compare row type equality. For
          * both SQL string and row record, they are passed to Calcite in String
          * object and does not require additional codec.
-         * 
+         *
          * Ref SaffronProperties.defaultCharset
-         * Ref SqlUtil.translateCharacterSetName() 
+         * Ref SqlUtil.translateCharacterSetName()
          * Ref NlsString constructor()
          */
         // copied from org.apache.calcite.util.ConversionUtil.NATIVE_UTF16_CHARSET_NAME
@@ -82,6 +86,35 @@
         System.setProperty("saffron.default.charset", NATIVE_UTF16_CHARSET_NAME);
         System.setProperty("saffron.default.nationalcharset", NATIVE_UTF16_CHARSET_NAME);
         System.setProperty("saffron.default.collation.name", NATIVE_UTF16_CHARSET_NAME + "$en_US");
+
+    }
+
+    /**
+     * Build default ordered properties from classpath, due to those files exist in core-common.jar, no need to load them each time.
+     */
+    private static void buildDefaultOrderedProperties() {
+        // 1. load default configurations from classpath.
+        // we have a kylin-defaults.properties in kylin/core-common/src/main/resources
+        try{
+            URL resource = Thread.currentThread().getContextClassLoader().getResource(KYLIN_DEFAULT_CONF_PROPERTIES_FILE);
+            Preconditions.checkNotNull(resource);
+            logger.info("Loading kylin-defaults.properties from {}", resource.getPath());
+            loadPropertiesFromInputStream(resource.openStream(), defaultOrderedProperties);
+
+            // 2. load additional default configurations from classpath.
+            // This is old logic, will load kylin-defaults(0~9).properties in kylin/core-common/src/main/resources
+            // Suggest remove this logic if no needed.
+            for (int i = 0; i < 10; i++) {
+                String fileName = "kylin-defaults" + (i) + ".properties";
+                URL additionalResource = Thread.currentThread().getContextClassLoader().getResource(fileName);
+                if (additionalResource != null) {
+                    logger.info("Loading {} from {} ", fileName, additionalResource.getPath());
+                    loadPropertiesFromInputStream(additionalResource.openStream(), defaultOrderedProperties);
+                }
+            }
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
     }
 
     public static KylinConfig getInstanceFromEnv() {
@@ -93,6 +126,10 @@ public static KylinConfig getInstanceFromEnv() {
 
             if (SYS_ENV_INSTANCE == null) {
                 try {
+                    //build default ordered properties will only be called once.
+                    //This logic no need called by CoProcessor due to it didn't call getInstanceFromEnv.
+                    buildDefaultOrderedProperties();
+
                     config = new KylinConfig();
                     config.reloadKylinConfig(buildSiteProperties());
 
@@ -156,20 +193,20 @@ private static UriType decideUriType(String metaUri) {
                         return UriType.PROPERTIES_FILE;
                     } else {
                         throw new IllegalStateException(
-                                "Metadata uri : " + metaUri + " is a local file but not kylin.properties");
+                                METADATA_URI_PREFIX + metaUri + " is a local file but not kylin.properties");
                     }
                 } else {
                     throw new IllegalStateException(
-                            "Metadata uri : " + metaUri + " looks like a file but it's neither a file nor a directory");
+                            METADATA_URI_PREFIX + metaUri + " looks like a file but it's neither a file nor a directory");
                 }
             } else {
                 if (RestClient.matchFullRestPattern(metaUri))
                     return UriType.REST_ADDR;
                 else
-                    throw new IllegalStateException("Metadata uri : " + metaUri + " is not a valid REST URI address");
+                    throw new IllegalStateException(METADATA_URI_PREFIX + metaUri + " is not a valid REST URI address");
             }
         } catch (Exception e) {
-            throw new IllegalStateException("Metadata uri : " + metaUri + " is not recognized", e);
+            throw new IllegalStateException(METADATA_URI_PREFIX + metaUri + " is not recognized", e);
         }
     }
 
@@ -243,7 +280,7 @@ public static void setKylinConfigInEnvIfMissing(String propsInStr) throws IOExce
     public static SetAndUnsetThreadLocalConfig setAndUnsetThreadLocalConfig(KylinConfig config) {
         return new SetAndUnsetThreadLocalConfig(config);
     }
-    
+
     public static class SetAndUnsetThreadLocalConfig implements AutoCloseable {
 
         public SetAndUnsetThreadLocalConfig(KylinConfig config) {
@@ -325,22 +362,11 @@ private static Properties buildSiteProperties() {
     private static OrderedProperties buildSiteOrderedProps() {
 
         try {
-            // 1. load default configurations from classpath. 
-            // we have a kylin-defaults.properties in kylin/core-common/src/main/resources 
-            URL resource = Thread.currentThread().getContextClassLoader().getResource("kylin-defaults.properties");
-            Preconditions.checkNotNull(resource);
-            logger.info("Loading kylin-defaults.properties from {}", resource.getPath());
+            // 1. load default configurations from classpath.
+            // we have kylin-defaults.properties in kylin/core-common/src/main/resources
+            // Load them each time will caused thread block when multiple query request to Kylin
             OrderedProperties orderedProperties = new OrderedProperties();
-            loadPropertiesFromInputStream(resource.openStream(), orderedProperties);
-
-            for (int i = 0; i < 10; i++) {
-                String fileName = "kylin-defaults" + (i) + ".properties";
-                URL additionalResource = Thread.currentThread().getContextClassLoader().getResource(fileName);
-                if (additionalResource != null) {
-                    logger.info("Loading {} from {} ", fileName, additionalResource.getPath());
-                    loadPropertiesFromInputStream(additionalResource.openStream(), orderedProperties);
-                }
-            }
+            orderedProperties.putAll(defaultOrderedProperties);
 
             // 2. load site conf, to keep backward compatibility it's still named kylin.properties
             // actually it's better to be named kylin-site.properties
@@ -368,9 +394,8 @@ private static OrderedProperties buildSiteOrderedProps() {
      */
     private static void loadPropertiesFromInputStream(InputStream inputStream, OrderedProperties properties) {
         Preconditions.checkNotNull(properties);
-        BufferedReader confReader = null;
-        try {
-            confReader = new BufferedReader(new InputStreamReader(inputStream, "UTF-8"));
+
+        try (BufferedReader confReader = new BufferedReader(new InputStreamReader(inputStream, "UTF-8"))) {
             OrderedProperties temp = new OrderedProperties();
             temp.load(confReader);
             temp = BCC.check(temp);
@@ -378,8 +403,6 @@ private static void loadPropertiesFromInputStream(InputStream inputStream, Order
             properties.putAll(temp);
         } catch (Exception e) {
             throw new RuntimeException(e);
-        } finally {
-            IOUtils.closeQuietly(confReader);
         }
     }
 
@@ -399,7 +422,7 @@ public static void setSandboxEnvIfPossible() {
     }
 
     // ============================================================================
-    
+
     transient Map<Class, Object> managersCache = new ConcurrentHashMap<>();
 
     private KylinConfig() {
@@ -418,19 +441,19 @@ protected KylinConfig(Properties props, boolean force) {
         if (managersCache == null) {
             managersCache = new ConcurrentHashMap<>();
         }
-        
+
         Object mgr = managersCache.get(clz);
         if (mgr != null)
             return (T) mgr;
-        
+
         synchronized (clz) {
             mgr = managersCache.get(clz);
             if (mgr != null)
                 return (T) mgr;
-            
+
             try {
                 logger.info("Creating new manager instance of " + clz);
-                
+
                 // new manager via static Manager.newInstance()
                 Method method = clz.getDeclaredMethod("newInstance", KylinConfig.class);
                 method.setAccessible(true); // override accessibility
@@ -442,14 +465,14 @@ protected KylinConfig(Properties props, boolean force) {
         }
         return (T) mgr;
     }
-    
+
     public void clearManagers() {
         KylinConfig base = base();
         if (base != this) {
             base.clearManagers();
             return;
         }
-        
+
         managersCache.clear();
     }
 
@@ -467,11 +490,7 @@ public String exportAllToString() throws IOException {
         for (Map.Entry<Object, Object> entry : allProps.entrySet()) {
             String key = entry.getKey().toString();
             String value = entry.getValue().toString();
-            if (!orderedProperties.containsProperty(key)) {
-                orderedProperties.setProperty(key, value);
-            } else if (!orderedProperties.getProperty(key).equalsIgnoreCase(value)) {
-                orderedProperties.setProperty(key, value);
-            }
+            orderedProperties.setProperty(key, value);
         }
 
         final StringBuilder sb = new StringBuilder();
@@ -500,19 +519,15 @@ public String exportToString(Collection<String> propertyKeys) throws IOException
     }
 
     public void exportToFile(File file) throws IOException {
-        FileOutputStream fos = null;
-        try {
-            fos = new FileOutputStream(file);
+        try (FileOutputStream fos = new FileOutputStream(file)) {
             getAllProperties().store(fos, file.getAbsolutePath());
-        } finally {
-            IOUtils.closeQuietly(fos);
         }
     }
 
     public synchronized void reloadFromSiteProperties() {
         reloadKylinConfig(buildSiteProperties());
     }
-    
+
     public KylinConfig base() {
         return this;
     }
diff --git a/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java b/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java
index 5577307ac3..f67f6b3479 100644
--- a/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java
+++ b/core-common/src/main/java/org/apache/kylin/common/KylinConfigBase.java
@@ -62,6 +62,8 @@
     private static final String DEFAULT = "default";
     private static final String KYLIN_ENGINE_MR_JOB_JAR = "kylin.engine.mr.job-jar";
     private static final String KYLIN_STORAGE_HBASE_COPROCESSOR_LOCAL_JAR = "kylin.storage.hbase.coprocessor-local-jar";
+    private static final String FILE_SCHEME = "file:";
+    private static final String MAPRFS_SCHEME = "maprfs:";
 
     /*
      * DON'T DEFINE CONSTANTS FOR PROPERTY KEYS!
@@ -238,6 +240,7 @@ public String getDeployEnv() {
     }
 
     private String cachedHdfsWorkingDirectory;
+    private String cachedBigCellDirectory;
 
     public String getHdfsWorkingDirectory() {
         if (cachedHdfsWorkingDirectory != null)
@@ -249,7 +252,6 @@ public String getHdfsWorkingDirectory() {
         if (!path.isAbsolute())
             throw new IllegalArgumentException("kylin.env.hdfs-working-dir must be absolute, but got " + root);
 
-        // make sure path is qualified
         try {
             FileSystem fs = path.getFileSystem(HadoopUtil.getCurrentConfiguration());
             path = fs.makeQualified(path);
@@ -264,14 +266,53 @@ public String getHdfsWorkingDirectory() {
             root += "/";
 
         cachedHdfsWorkingDirectory = root;
-        if (cachedHdfsWorkingDirectory.startsWith("file:")) {
-            cachedHdfsWorkingDirectory = cachedHdfsWorkingDirectory.replace("file:", "file://");
-        } else if (cachedHdfsWorkingDirectory.startsWith("maprfs:")) {
-            cachedHdfsWorkingDirectory = cachedHdfsWorkingDirectory.replace("maprfs:", "maprfs://");
+        if (cachedHdfsWorkingDirectory.startsWith(FILE_SCHEME)) {
+            cachedHdfsWorkingDirectory = cachedHdfsWorkingDirectory.replace(FILE_SCHEME, "file://");
+        } else if (cachedHdfsWorkingDirectory.startsWith(MAPRFS_SCHEME)) {
+            cachedHdfsWorkingDirectory = cachedHdfsWorkingDirectory.replace(MAPRFS_SCHEME, "maprfs://");
         }
         return cachedHdfsWorkingDirectory;
     }
 
+    public String getMetastoreBigCellHdfsDirectory() {
+
+        if (cachedBigCellDirectory != null)
+            return cachedBigCellDirectory;
+
+        String root = getOptional("kylin.env.hdfs-metastore-bigcell-dir");
+
+        if (root == null) {
+            return getJdbcHdfsWorkingDirectory();
+        }
+
+        Path path = new Path(root);
+        if (!path.isAbsolute())
+            throw new IllegalArgumentException(
+                    "kylin.env.hdfs-metastore-bigcell-dir must be absolute, but got " + root);
+
+        // make sure path is qualified
+        try {
+            FileSystem fs = HadoopUtil.getReadFileSystem();
+            path = fs.makeQualified(path);
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+
+        root = new Path(path, StringUtils.replaceChars(getMetadataUrlPrefix(), ':', '-')).toString();
+
+        if (!root.endsWith("/"))
+            root += "/";
+
+        cachedBigCellDirectory = root;
+        if (cachedBigCellDirectory.startsWith(FILE_SCHEME)) {
+            cachedBigCellDirectory = cachedBigCellDirectory.replace(FILE_SCHEME, "file://");
+        } else if (cachedBigCellDirectory.startsWith(MAPRFS_SCHEME)) {
+            cachedBigCellDirectory = cachedBigCellDirectory.replace(MAPRFS_SCHEME, "maprfs://");
+        }
+
+        return cachedBigCellDirectory;
+    }
+
     public String getReadHdfsWorkingDirectory() {
         if (StringUtils.isNotEmpty(getHBaseClusterFs())) {
             Path workingDir = new Path(getHdfsWorkingDirectory());
@@ -282,6 +323,19 @@ public String getReadHdfsWorkingDirectory() {
         return getHdfsWorkingDirectory();
     }
 
+    private String getJdbcHdfsWorkingDirectory() {
+        if (StringUtils.isNotEmpty(getJdbcFileSystem())) {
+            Path workingDir = new Path(getReadHdfsWorkingDirectory());
+            return new Path(getJdbcFileSystem(), Path.getPathWithoutSchemeAndAuthority(workingDir)).toString() + "/";
+        }
+
+        return getReadHdfsWorkingDirectory();
+    }
+
+    private String getJdbcFileSystem() {
+        return getOptional("kylin.storage.columnar.jdbc.file-system", "");
+    }
+
     public String getHdfsWorkingDirectory(String project) {
         if (isProjectIsolationEnabled() && project != null) {
             return new Path(getHdfsWorkingDirectory(), project).toString() + "/";
@@ -358,6 +412,26 @@ public String getMetadataUrlPrefix() {
         return r;
     }
 
+    public boolean isResourceStoreReconnectEnabled() {
+        return Boolean.parseBoolean(getOptional("kylin.resourcestore.reconnect-enabled", FALSE));
+    }
+
+    public int getResourceStoreReconnectBaseMs() {
+        return Integer.parseInt(getOptional("kylin.resourcestore.reconnect-base-ms", "1000"));
+    }
+
+    public int getResourceStoreReconnectMaxMs() {
+        return Integer.parseInt(getOptional("kylin.resourcestore.reconnect-max-ms", "60000"));
+    }
+
+    public int getResourceStoreReconnectTimeoutMs() {
+        return Integer.parseInt(getOptional("kylin.resourcestore.reconnect-timeout-ms", "3600000"));
+    }
+
+    public String getResourceStoreConnectionExceptions() {
+        return getOptional("kylin.resourcestore.connection-exceptions", "");
+    }
+
     public String getDataModelImpl() {
         return getOptional("kylin.metadata.data-model-impl", null);
     }
@@ -461,6 +535,10 @@ public double getExtTableSnapshotLocalCacheMaxSizeGB() {
         return Double.parseDouble(getOptional("kylin.snapshot.ext.local.cache.max-size-gb", "200"));
     }
 
+    public long getExtTableSnapshotLocalCacheCheckVolatileRange() {
+        return Long.parseLong(getOptional("kylin.snapshot.ext.local.cache.check.volatile", "3600000"));
+    }
+
     public boolean isShrunkenDictFromGlobalEnabled() {
         return Boolean.parseBoolean(this.getOptional("kylin.dictionary.shrunken-from-global-enabled", FALSE));
     }
@@ -732,6 +810,10 @@ public boolean isJobAutoReadyCubeEnabled() {
         return Boolean.parseBoolean(getOptional("kylin.job.cube-auto-ready-enabled", TRUE));
     }
 
+    public String getCubeInMemBuilderClass() {
+        return getOptional("kylin.job.cube-inmem-builder-class", "org.apache.kylin.cube.inmemcubing.DoggedCubeBuilder");
+    }
+
     // ============================================================================
     // SOURCE.HIVE
     // ============================================================================
@@ -746,6 +828,7 @@ public int getDefaultSource() {
         r.put(0, "org.apache.kylin.source.hive.HiveSource");
         r.put(1, "org.apache.kylin.source.kafka.KafkaSource");
         r.put(8, "org.apache.kylin.source.jdbc.JdbcSource");
+        r.put(16, "org.apache.kylin.source.jdbc.extensible.JdbcSource");
         r.putAll(convertKeyToInteger(getPropertiesByPrefix("kylin.source.provider.")));
         return r;
     }
@@ -1363,6 +1446,14 @@ public int getScanThreshold() {
         return Integer.parseInt(getOptional("kylin.query.scan-threshold", "10000000"));
     }
 
+    public boolean isLazyQueryEnabled() {
+        return Boolean.parseBoolean(getOptional("kylin.query.lazy-query-enabled", FALSE));
+    }
+
+    public long getLazyQueryWaitingTimeoutMilliSeconds() {
+        return Long.parseLong(getOptional("kylin.query.lazy-query-waiting-timeout-milliseconds", "60000"));
+    }
+
     public int getQueryConcurrentRunningThresholdForProject() {
         // by default there's no limitation
         return Integer.parseInt(getOptional("kylin.query.project-concurrent-running-threshold", "0"));
@@ -1449,6 +1540,23 @@ public boolean isQueryIgnoreUnknownFunction() {
         return Boolean.parseBoolean(this.getOptional("kylin.query.ignore-unknown-function", FALSE));
     }
 
+    public String getMemCachedHosts() {
+        return getRequired("kylin.cache.memcached.hosts");
+    }
+
+    public boolean isQuerySegmentCacheEnabled() {
+        return Boolean.parseBoolean(getOptional("kylin.query.segment-cache-enabled", FALSE));
+    }
+
+    public int getQuerySegmentCacheTimeout() {
+        return Integer.parseInt(getOptional("kylin.query.segment-cache-timeout", "2000"));
+    }
+
+    // define the maximum size for each segment in one query that can be cached, in megabytes
+    public int getQuerySegmentCacheMaxSize() {
+        return Integer.parseInt(getOptional("kylin.query.segment-cache-max-size", "200"));
+    }
+
     public String getQueryAccessController() {
         return getOptional("kylin.query.access-controller", null);
     }
@@ -1553,6 +1661,15 @@ public String getQueryRealizationFilter() {
         return getOptional("kylin.query.realization-filter", null);
     }
 
+    public String getSQLResponseSignatureClass() {
+        return this.getOptional("kylin.query.signature-class",
+                "org.apache.kylin.rest.signature.FactTableRealizationSetCalculator");
+    }
+
+    public boolean isQueryCacheSignatureEnabled() {
+        return Boolean.parseBoolean(this.getOptional("kylin.query.cache-signature-enabled", FALSE));
+    }
+
     // ============================================================================
     // SERVER
     // ============================================================================
@@ -1753,6 +1870,10 @@ public String getKylinMetricsSubjectQueryRpcCall() {
                 + getKylinMetricsSubjectSuffix();
     }
 
+    public Map<String, String> getKylinMetricsConf() {
+        return getPropertiesByPrefix("kylin.metrics.");
+    }
+
     // ============================================================================
     // tool
     // ============================================================================
@@ -1800,4 +1921,8 @@ public int getSmallCellMetadataErrorThreshold() {
     public int getJdbcResourceStoreMaxCellSize() {
         return Integer.parseInt(getOptional("kylin.metadata.jdbc.max-cell-size", "1048576")); // 1mb
     }
+
+    public String getJdbcSourceAdaptor() {
+        return getOptional("kylin.source.jdbc.adaptor");
+    }
 }
diff --git a/core-common/src/main/java/org/apache/kylin/common/KylinVersion.java b/core-common/src/main/java/org/apache/kylin/common/KylinVersion.java
index ae18d0b57b..bbdb3a8645 100644
--- a/core-common/src/main/java/org/apache/kylin/common/KylinVersion.java
+++ b/core-common/src/main/java/org/apache/kylin/common/KylinVersion.java
@@ -34,8 +34,8 @@
 import com.google.common.collect.Iterables;
 
 public class KylinVersion implements Comparable {
-    private static final String COMMIT_SHA1_v15 = "commit_SHA1";
-    private static final String COMMIT_SHA1_v13 = "commit.sha1";
+    private static final String COMMIT_SHA1_V15 = "commit_SHA1";
+    private static final String COMMIT_SHA1_V13 = "commit.sha1";
 
     public final int major;
     public final int minor;
@@ -194,9 +194,9 @@ public static String getKylinClientInformation() {
 
     public static String getGitCommitInfo() {
         try {
-            File commitFile = new File(KylinConfig.getKylinHome(), COMMIT_SHA1_v15);
+            File commitFile = new File(KylinConfig.getKylinHome(), COMMIT_SHA1_V15);
             if (!commitFile.exists()) {
-                commitFile = new File(KylinConfig.getKylinHome(), COMMIT_SHA1_v13);
+                commitFile = new File(KylinConfig.getKylinHome(), COMMIT_SHA1_V13);
             }
             List<String> lines = FileUtils.readLines(commitFile, Charset.defaultCharset());
             StringBuilder sb = new StringBuilder();
diff --git a/core-common/src/main/java/org/apache/kylin/common/QueryContext.java b/core-common/src/main/java/org/apache/kylin/common/QueryContext.java
index ef288c7d6d..000f7bf28a 100644
--- a/core-common/src/main/java/org/apache/kylin/common/QueryContext.java
+++ b/core-common/src/main/java/org/apache/kylin/common/QueryContext.java
@@ -22,6 +22,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -39,6 +40,9 @@
 public class QueryContext {
 
     private static final Logger logger = LoggerFactory.getLogger(QueryContext.class);
+    private static final String CSSR_SHOULD_BE_INIT_FOR_CONTEXT = "CubeSegmentStatisticsResult should be initialized for context {}";
+    private static final String CSSM_SHOULD_BE_INIT_FOR_CSSR = "cubeSegmentStatisticsMap should be initialized for CubeSegmentStatisticsResult with query type {}";
+    private static final String INPUT = " input ";
 
     public interface QueryStopListener {
         void stop(QueryContext query);
@@ -179,7 +183,7 @@ public Throwable getThrowable() {
     }
 
     public void addContext(int ctxId, String type, boolean ifCube) {
-        Map<String, Map<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap = null;
+        ConcurrentMap<String, ConcurrentMap<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap = null;
         if (ifCube) {
             cubeSegmentStatisticsMap = Maps.newConcurrentMap();
         }
@@ -204,6 +208,50 @@ public void setContextRealization(int ctxId, String realizationName, int realiza
         return Lists.newArrayList(cubeSegmentStatisticsResultMap.values());
     }
 
+    public CubeSegmentStatistics getCubeSegmentStatistics(int ctxId, String cubeName, String segmentName) {
+        CubeSegmentStatisticsResult cubeSegmentStatisticsResult = cubeSegmentStatisticsResultMap.get(ctxId);
+        if (cubeSegmentStatisticsResult == null) {
+            logger.warn(CSSR_SHOULD_BE_INIT_FOR_CONTEXT, ctxId);
+            return null;
+        }
+        ConcurrentMap<String, ConcurrentMap<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap = cubeSegmentStatisticsResult.cubeSegmentStatisticsMap;
+        if (cubeSegmentStatisticsMap == null) {
+            logger.warn(CSSM_SHOULD_BE_INIT_FOR_CSSR, cubeSegmentStatisticsResult.queryType);
+            return null;
+        }
+        ConcurrentMap<String, CubeSegmentStatistics> segmentStatisticsMap = cubeSegmentStatisticsMap.get(cubeName);
+        if (segmentStatisticsMap == null) {
+            logger.warn(
+                    "cubeSegmentStatistic should be initialized for cube {}", cubeName);
+            return null;
+        }
+        CubeSegmentStatistics segmentStatistics = segmentStatisticsMap.get(segmentName);
+        if (segmentStatistics == null) {
+            logger.warn(
+                    "segmentStatistics should be initialized for cube {} with segment{}", cubeName, segmentName);
+            return null;
+        }
+        return segmentStatistics;
+    }
+
+    public void addCubeSegmentStatistics(int ctxId, CubeSegmentStatistics cubeSegmentStatistics) {
+        CubeSegmentStatisticsResult cubeSegmentStatisticsResult = cubeSegmentStatisticsResultMap.get(ctxId);
+        if (cubeSegmentStatisticsResult == null) {
+            logger.warn(CSSR_SHOULD_BE_INIT_FOR_CONTEXT, ctxId);
+            return;
+        }
+        ConcurrentMap<String, ConcurrentMap<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap = cubeSegmentStatisticsResult.cubeSegmentStatisticsMap;
+        if (cubeSegmentStatisticsMap == null) {
+            logger.warn(CSSM_SHOULD_BE_INIT_FOR_CSSR, cubeSegmentStatisticsResult.queryType);
+            return;
+        }
+        String cubeName = cubeSegmentStatistics.cubeName;
+        cubeSegmentStatisticsMap.putIfAbsent(cubeName, Maps.<String, CubeSegmentStatistics> newConcurrentMap());
+        ConcurrentMap<String, CubeSegmentStatistics> segmentStatisticsMap = cubeSegmentStatisticsMap.get(cubeName);
+
+        segmentStatisticsMap.put(cubeSegmentStatistics.getSegmentName(), cubeSegmentStatistics);
+    }
+
     public void addRPCStatistics(int ctxId, String rpcServer, String cubeName, String segmentName, long sourceCuboidId,
             long targetCuboidId, long filterMask, Exception e, long rpcCallTimeMs, long skippedRows, long scannedRows,
             long returnedRows, long aggregatedRows, long scannedBytes) {
@@ -215,40 +263,35 @@ public void addRPCStatistics(int ctxId, String rpcServer, String cubeName, Strin
 
         CubeSegmentStatisticsResult cubeSegmentStatisticsResult = cubeSegmentStatisticsResultMap.get(ctxId);
         if (cubeSegmentStatisticsResult == null) {
-            logger.warn("CubeSegmentStatisticsResult should be initialized for context " + ctxId);
+            logger.warn(CSSR_SHOULD_BE_INIT_FOR_CONTEXT, ctxId);
             return;
         }
-        Map<String, Map<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap = cubeSegmentStatisticsResult.cubeSegmentStatisticsMap;
+        ConcurrentMap<String, ConcurrentMap<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap = cubeSegmentStatisticsResult.cubeSegmentStatisticsMap;
         if (cubeSegmentStatisticsMap == null) {
-            logger.warn(
-                    "cubeSegmentStatisticsMap should be initialized for CubeSegmentStatisticsResult with query type "
-                            + cubeSegmentStatisticsResult.queryType);
+            logger.warn(CSSM_SHOULD_BE_INIT_FOR_CSSR, cubeSegmentStatisticsResult.queryType);
             return;
         }
-        Map<String, CubeSegmentStatistics> segmentStatisticsMap = cubeSegmentStatisticsMap.get(cubeName);
-        if (segmentStatisticsMap == null) {
-            segmentStatisticsMap = Maps.newConcurrentMap();
-            cubeSegmentStatisticsMap.put(cubeName, segmentStatisticsMap);
-        }
+        cubeSegmentStatisticsMap.putIfAbsent(cubeName, Maps.<String, CubeSegmentStatistics> newConcurrentMap());
+        ConcurrentMap<String, CubeSegmentStatistics> segmentStatisticsMap = cubeSegmentStatisticsMap.get(cubeName);
+
+        CubeSegmentStatistics old = segmentStatisticsMap.putIfAbsent(segmentName, new CubeSegmentStatistics());
         CubeSegmentStatistics segmentStatistics = segmentStatisticsMap.get(segmentName);
-        if (segmentStatistics == null) {
-            segmentStatistics = new CubeSegmentStatistics();
-            segmentStatisticsMap.put(segmentName, segmentStatistics);
+        if (old == null) {
             segmentStatistics.setWrapper(cubeName, segmentName, sourceCuboidId, targetCuboidId, filterMask);
-        }
-        if (segmentStatistics.sourceCuboidId != sourceCuboidId || segmentStatistics.targetCuboidId != targetCuboidId
+        } else if (segmentStatistics.sourceCuboidId != sourceCuboidId
+                || segmentStatistics.targetCuboidId != targetCuboidId
                 || segmentStatistics.filterMask != filterMask) {
             StringBuilder inconsistency = new StringBuilder();
             if (segmentStatistics.sourceCuboidId != sourceCuboidId) {
                 inconsistency.append(
-                        "sourceCuboidId exist " + segmentStatistics.sourceCuboidId + " input " + sourceCuboidId);
+                        "sourceCuboidId exist " + segmentStatistics.sourceCuboidId + INPUT + sourceCuboidId);
             }
             if (segmentStatistics.targetCuboidId != targetCuboidId) {
                 inconsistency.append(
-                        "targetCuboidId exist " + segmentStatistics.targetCuboidId + " input " + targetCuboidId);
+                        "targetCuboidId exist " + segmentStatistics.targetCuboidId + INPUT + targetCuboidId);
             }
             if (segmentStatistics.filterMask != filterMask) {
-                inconsistency.append("filterMask exist " + segmentStatistics.filterMask + " input " + filterMask);
+                inconsistency.append("filterMask exist " + segmentStatistics.filterMask + INPUT + filterMask);
             }
             logger.error("cube segment statistics wrapper is not consistent due to " + inconsistency.toString());
             return;
@@ -397,8 +440,8 @@ public void setWrapper(String cubeName, String segmentName, long sourceCuboidId,
             this.filterMask = filterMask;
         }
 
-        public void addRPCStats(long callTimeMs, long skipCount, long scanCount, long returnCount, long aggrCount,
-                long scanBytes, boolean ifSuccess) {
+        public synchronized void addRPCStats(long callTimeMs, long skipCount, long scanCount, long returnCount,
+                long aggrCount, long scanBytes, boolean ifSuccess) {
             this.callCount++;
             this.callTimeSum += callTimeMs;
             if (this.callTimeMax < callTimeMs) {
@@ -536,7 +579,7 @@ public String toString() {
         protected static final long serialVersionUID = 1L;
 
         private String queryType;
-        private Map<String, Map<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap;
+        private ConcurrentMap<String, ConcurrentMap<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap;
         private String realization;
         private int realizationType;
 
@@ -544,7 +587,7 @@ public CubeSegmentStatisticsResult() {
         }
 
         public CubeSegmentStatisticsResult(String queryType,
-                Map<String, Map<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap) {
+                ConcurrentMap<String, ConcurrentMap<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap) {
             this.queryType = queryType;
             this.cubeSegmentStatisticsMap = cubeSegmentStatisticsMap;
         }
@@ -570,7 +613,7 @@ public void setQueryType(String queryType) {
         }
 
         public void setCubeSegmentStatisticsMap(
-                Map<String, Map<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap) {
+                ConcurrentMap<String, ConcurrentMap<String, CubeSegmentStatistics>> cubeSegmentStatisticsMap) {
             this.cubeSegmentStatisticsMap = cubeSegmentStatisticsMap;
         }
 
@@ -579,7 +622,7 @@ public String getQueryType() {
 
         }
 
-        public Map<String, Map<String, CubeSegmentStatistics>> getCubeSegmentStatisticsMap() {
+        public ConcurrentMap<String, ConcurrentMap<String, CubeSegmentStatistics>> getCubeSegmentStatisticsMap() {
             return cubeSegmentStatisticsMap;
         }
 
diff --git a/core-common/src/main/java/org/apache/kylin/common/debug/BackdoorToggles.java b/core-common/src/main/java/org/apache/kylin/common/debug/BackdoorToggles.java
index 47fbbcd9fb..be0f7a6a86 100644
--- a/core-common/src/main/java/org/apache/kylin/common/debug/BackdoorToggles.java
+++ b/core-common/src/main/java/org/apache/kylin/common/debug/BackdoorToggles.java
@@ -86,6 +86,10 @@ public static boolean getDisableCache() {
         return getBoolean(DEBUG_TOGGLE_DISABLE_QUERY_CACHE);
     }
 
+    public static boolean getDisableSegmentCache() {
+        return getBoolean(DEBUG_TOGGLE_DISABLE_QUERY_SEGMENT_CACHE);
+    }
+
     public static boolean getDisableFuzzyKey() {
         return getBoolean(DEBUG_TOGGLE_DISABLE_FUZZY_KEY);
     }
@@ -214,6 +218,18 @@ public static Properties getJdbcDriverClientCalciteProps() {
      */
     public final static String DEBUG_TOGGLE_DISABLE_QUERY_CACHE = "DEBUG_TOGGLE_DISABLE_QUERY_CACHE";
 
+    /**
+     * set DEBUG_TOGGLE_DISABLE_QUERY_SEGMENT_CACHE=true to prevent using segment cache for current query
+     *
+     *
+     *
+     example:(put it into request body)
+     "backdoorToggles": {
+     "DEBUG_TOGGLE_DISABLE_QUERY_SEGMENT_CACHE": "true"
+     }
+     */
+    public final static String DEBUG_TOGGLE_DISABLE_QUERY_SEGMENT_CACHE = "DEBUG_TOGGLE_DISABLE_QUERY_SEGMENT_CACHE";
+
     /**
      * set DEBUG_TOGGLE_HBASE_CUBE_QUERY_VERSION=v1/v2 to control which version CubeStorageQuery to use
      *
diff --git a/core-common/src/main/java/org/apache/kylin/common/metrics/metrics2/JsonFileMetricsReporter.java b/core-common/src/main/java/org/apache/kylin/common/metrics/metrics2/JsonFileMetricsReporter.java
index 95c51162c6..40cb0a67f9 100644
--- a/core-common/src/main/java/org/apache/kylin/common/metrics/metrics2/JsonFileMetricsReporter.java
+++ b/core-common/src/main/java/org/apache/kylin/common/metrics/metrics2/JsonFileMetricsReporter.java
@@ -98,20 +98,14 @@ public void run() {
                         return;
                     }
 
-                    BufferedWriter bw = null;
-                    try {
+                    try (BufferedWriter bw = new BufferedWriter(
+                            new OutputStreamWriter(fs.create(tmpPath, true), StandardCharsets.UTF_8))) {
                         fs.delete(tmpPath, true);
-                        bw = new BufferedWriter(
-                                new OutputStreamWriter(fs.create(tmpPath, true), StandardCharsets.UTF_8));
                         bw.write(json);
                         fs.setPermission(tmpPath, FsPermission.createImmutable((short) 0644));
                     } catch (IOException e) {
                         LOGGER.error("Unable to write to temp file " + tmpPath, e);
                         return;
-                    } finally {
-                        if (bw != null) {
-                            bw.close();
-                        }
                     }
 
                     try {
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/BrokenInputStream.java b/core-common/src/main/java/org/apache/kylin/common/persistence/BrokenInputStream.java
deleted file mode 100644
index 9eddba8d43..0000000000
--- a/core-common/src/main/java/org/apache/kylin/common/persistence/BrokenInputStream.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.kylin.common.persistence;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.kylin.common.util.JsonUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-
-public class BrokenInputStream extends InputStream {
-    private static Logger logger = LoggerFactory.getLogger(BrokenInputStream.class);
-    private final ByteArrayInputStream in;
-
-    public BrokenInputStream(BrokenEntity brokenEntity) {
-        final ByteArrayOutputStream out = new ByteArrayOutputStream();
-        try {
-            IOUtils.write(BrokenEntity.MAGIC, out);
-            IOUtils.write(JsonUtil.writeValueAsBytes(brokenEntity), out);
-        } catch (IOException e) {
-            logger.error("There is something error when we serialize BrokenEntity: ", e);
-            throw new RuntimeException("There is something error when we serialize BrokenEntity.");
-        }
-
-        in = new ByteArrayInputStream(out.toByteArray());
-    }
-
-    @Override
-    public int read() {
-        return in.read();
-    }
-
-    @Override
-    public void close() throws IOException {
-        in.close();
-        super.close();
-    }
-}
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/BrokenEntity.java b/core-common/src/main/java/org/apache/kylin/common/persistence/ContentReader.java
similarity index 53%
rename from core-common/src/main/java/org/apache/kylin/common/persistence/BrokenEntity.java
rename to core-common/src/main/java/org/apache/kylin/common/persistence/ContentReader.java
index b86b0d984d..d404171282 100644
--- a/core-common/src/main/java/org/apache/kylin/common/persistence/BrokenEntity.java
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/ContentReader.java
@@ -18,39 +18,33 @@
 
 package org.apache.kylin.common.persistence;
 
-import com.fasterxml.jackson.annotation.JsonProperty;
+import java.io.DataInputStream;
+import java.io.IOException;
 
-public class BrokenEntity extends RootPersistentEntity {
+import org.apache.commons.io.IOUtils;
 
-    protected static final byte[] MAGIC = new byte[]{'B', 'R', 'O', 'K', 'E', 'N'};
+public class ContentReader<T extends RootPersistentEntity> {
 
-    @JsonProperty("resPath")
-    private String resPath;
+    final private Serializer<T> serializer;
 
-    @JsonProperty("errorMsg")
-    private String errorMsg;
-
-    public BrokenEntity() {
-    }
-
-    public BrokenEntity(String resPath, String errorMsg) {
-        this.resPath = resPath;
-        this.errorMsg = errorMsg;
-    }
-
-    public String getResPath() {
-        return resPath;
-    }
-
-    public void setResPath(String resPath) {
-        this.resPath = resPath;
-    }
-
-    public String getErrorMsg() {
-        return errorMsg;
+    public ContentReader(Serializer<T> serializer) {
+        this.serializer = serializer;
     }
 
-    public void setErrorMsg(String errorMsg) {
-        this.errorMsg = errorMsg;
+    public T readContent(RawResource res) throws IOException {
+        if (res == null)
+            return null;
+
+        DataInputStream din = new DataInputStream(res.content());
+        try {
+            T r = serializer.deserialize(din);
+            if (r != null) {
+                r.setLastModified(res.lastModified());
+            }
+            return r;
+        } finally {
+            IOUtils.closeQuietly(din);
+            IOUtils.closeQuietly(res.content());
+        }
     }
 }
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/ContentWriter.java b/core-common/src/main/java/org/apache/kylin/common/persistence/ContentWriter.java
new file mode 100644
index 0000000000..c7d963d40c
--- /dev/null
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/ContentWriter.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.common.persistence;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.commons.io.IOUtils;
+
+abstract public class ContentWriter {
+
+    public static ContentWriter create(byte[] data) {
+        return create(new ByteArrayInputStream(data));
+    }
+
+    public static ContentWriter create(final InputStream is) {
+        return new ContentWriter() {
+            @Override
+            void write(DataOutputStream out) throws IOException {
+                int n = IOUtils.copy(is, out);
+                bytesWritten += (n < 0 ? Integer.MAX_VALUE : n);
+            }
+        };
+    }
+
+    public static <T extends RootPersistentEntity> ContentWriter create(final T obj, final Serializer<T> serializer) {
+        return new ContentWriter() {
+            @Override
+            void write(DataOutputStream out) throws IOException {
+                int pos = out.size();
+                serializer.serialize(obj, out);
+                bytesWritten += (out.size() - pos);
+            }
+        };
+    }
+
+    private boolean isBigContent = false;
+    protected long bytesWritten = 0;
+
+    abstract void write(DataOutputStream out) throws IOException;
+
+    public void markBigContent() {
+        isBigContent = true;
+    }
+
+    public boolean isBigContent() {
+        return isBigContent;
+    }
+
+    public long bytesWritten() {
+        return bytesWritten;
+    }
+
+    public byte[] extractAllBytes() throws IOException {
+        try (ByteArrayOutputStream bout = new ByteArrayOutputStream();
+                DataOutputStream dout = new DataOutputStream(bout)) {
+            write(dout);
+            dout.flush();
+            return bout.toByteArray();
+        }
+    }
+}
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/ExponentialBackoffRetry.java b/core-common/src/main/java/org/apache/kylin/common/persistence/ExponentialBackoffRetry.java
new file mode 100644
index 0000000000..315c51efcb
--- /dev/null
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/ExponentialBackoffRetry.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.common.persistence;
+
+import java.io.IOException;
+import java.util.concurrent.Callable;
+
+import org.apache.kylin.common.KylinConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ExponentialBackoffRetry {
+    private static final Logger logger = LoggerFactory.getLogger(ExponentialBackoffRetry.class);
+
+    private final ResourceStore store;
+    private final KylinConfig config;
+    private final int baseSleepTimeMs;
+    private final int maxSleepTimeMs;
+    private long firstSleepTime;
+    private int retryCount;
+
+    public ExponentialBackoffRetry(ResourceStore store) {
+        this.store = store;
+        this.config = store.getConfig();
+        this.baseSleepTimeMs = config.getResourceStoreReconnectBaseMs();
+        this.maxSleepTimeMs = config.getResourceStoreReconnectMaxMs();
+        this.retryCount = 0;
+    }
+
+    public <V> V doWithRetry(Callable<V> callable) throws IOException {
+        V result = null;
+        boolean done = false;
+
+        while (!done) {
+            try {
+                result = callable.call();
+                done = true;
+            } catch (Throwable ex) {
+                boolean shouldRetry = checkIfAllowRetry(ex);
+                if (!shouldRetry) {
+                    throwIOException(ex);
+                }
+            }
+        }
+
+        return result;
+    }
+
+    private void throwIOException(Throwable ex) throws IOException {
+        if (ex instanceof IOException)
+            throw (IOException) ex;
+
+        if (ex instanceof RuntimeException)
+            throw (RuntimeException) ex;
+
+        if (ex instanceof Error)
+            throw (Error) ex;
+
+        throw new IOException(ex);
+    }
+
+    private boolean checkIfAllowRetry(Throwable ex) {
+        if (config.isResourceStoreReconnectEnabled() && store.isUnreachableException(ex)) {
+            if (isTimeOut(config.getResourceStoreReconnectTimeoutMs())) {
+                logger.error("Reconnect to resource store timeout, abandoning...", ex);
+                return false;
+            }
+
+            long waitMs = getSleepTimeMs();
+            long seconds = waitMs / 1000;
+            logger.info("Will try to re-connect after {} seconds.", seconds);
+            try {
+                Thread.sleep(waitMs);
+            } catch (InterruptedException e) {
+                throw new RuntimeException("Current thread for resource store's CRUD is interrupted, abandoning...");
+            }
+            increaseRetryCount();
+            return true;
+        }
+
+        return false;
+    }
+
+    private long getSleepTimeMs() {
+        if (retryCount == 0)
+            firstSleepTime = System.currentTimeMillis();
+
+        long ms = baseSleepTimeMs * (1 << retryCount);
+
+        if (ms > maxSleepTimeMs)
+            ms = maxSleepTimeMs;
+        return ms;
+    }
+
+    private void increaseRetryCount() {
+        retryCount++;
+    }
+
+    private boolean isTimeOut(long timeoutMs) {
+        return retryCount != 0 && (System.currentTimeMillis() - firstSleepTime >= timeoutMs);
+    }
+}
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/FileResourceStore.java b/core-common/src/main/java/org/apache/kylin/common/persistence/FileResourceStore.java
index 99e85dc315..bccb7a369e 100644
--- a/core-common/src/main/java/org/apache/kylin/common/persistence/FileResourceStore.java
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/FileResourceStore.java
@@ -6,45 +6,40 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * 
+ *
  *     http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
-*/
+ */
 
 package org.apache.kylin.common.persistence;
 
-import java.io.ByteArrayInputStream;
+import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.NavigableSet;
-import java.util.TreeSet;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
 import org.apache.kylin.common.KylinConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
 public class FileResourceStore extends ResourceStore {
 
     private static final Logger logger = LoggerFactory.getLogger(FileResourceStore.class);
 
     File root;
 
+    int failPutResourceCountDown = Integer.MAX_VALUE;
+    int failVisitFolderCountDown = Integer.MAX_VALUE;
+
     public FileResourceStore(KylinConfig kylinConfig) {
         super(kylinConfig);
         root = new File(getPath(kylinConfig)).getAbsoluteFile();
@@ -57,148 +52,146 @@ protected String getPath(KylinConfig kylinConfig) {
         return kylinConfig.getMetadataUrl().getIdentifier();
     }
 
-    @Override
-    protected NavigableSet<String> listResourcesImpl(String folderPath, boolean recursive) throws IOException {
-        synchronized (FileResourceStore.class) {
-            TreeSet<String> r = new TreeSet<>();
-            File file = file(folderPath);
-            String[] names = file.list();
-            // not a directory
-            if (names == null)
-                return null;
-            String prefix = folderPath.endsWith("/") ? folderPath : folderPath + "/";
-            if (recursive) {
-                Collection<File> files = FileUtils.listFiles(file, null, true);
-                for (File f : files) {
-                    String path = f.getAbsolutePath();
-                    String[] split = path.split(prefix);
-                    Preconditions.checkArgument(split.length == 2);
-                    r.add(prefix + split[1]);
-                }
-            } else {
-                for (String n : names) {
-                    r.add(prefix + n);
-                }
-            }
-            return r;
-        }
-    }
-
     @Override
     protected boolean existsImpl(String resPath) throws IOException {
-        synchronized (FileResourceStore.class) {
-            File f = file(resPath);
-            return f.exists() && f.isFile(); // directory is not considered a resource
-        }
+        File f = file(resPath);
+        return f.exists() && f.isFile(); // directory is not considered a resource
     }
 
     @Override
-    protected List<RawResource> getAllResourcesImpl(String folderPath, long timeStart, long timeEndExclusive)
-            throws IOException {
-        synchronized (FileResourceStore.class) {
-
-            NavigableSet<String> resources = listResources(folderPath);
-            if (resources == null)
-                return Collections.emptyList();
-
-            List<RawResource> result = Lists.newArrayListWithCapacity(resources.size());
-            try {
-                for (String res : resources) {
-                    long ts = getResourceTimestampImpl(res);
-                    if (timeStart <= ts && ts < timeEndExclusive) {
-                        RawResource resource = getResourceImpl(res);
-                        if (resource != null) // can be null if is a sub-folder
-                            result.add(resource);
-                    }
+    protected void visitFolderImpl(String folderPath, boolean recursive, VisitFilter filter, boolean loadContent,
+                                   Visitor visitor) throws IOException {
+        if (--failVisitFolderCountDown == 0)
+            throw new IOException("for test");
+
+        File file = file(folderPath);
+        if (!file.exists() || !file.isDirectory())
+            return;
+
+        String prefix = fixWinPath(file);
+        Collection<File> files = FileUtils.listFiles(file, null, recursive);
+
+        for (File f : files) {
+
+            String path = fixWinPath(f);
+            if (!path.startsWith(prefix))
+                throw new IllegalStateException("File path " + path + " is supposed to start with " + prefix);
+
+            String resPath = folderPath.equals("/") ? path.substring(prefix.length())
+                    : folderPath + path.substring(prefix.length());
+
+            if (filter.matches(resPath, f.lastModified())) {
+                RawResource raw = loadContent ? new RawResource(resPath, f.lastModified(), new FileInputStream(f))
+                        : new RawResource(resPath, f.lastModified());
+                try {
+                    visitor.visit(raw);
+                } finally {
+                    raw.close();
                 }
-            } catch (IOException ex) {
-                for (RawResource rawResource : result) {
-                    IOUtils.closeQuietly(rawResource.inputStream);
-                }
-                throw ex;
             }
-            return result;
         }
     }
 
+    private String fixWinPath(File file) {
+        String path = file.getAbsolutePath();
+        if (path.length() > 2 && path.charAt(1) == ':' && path.charAt(2) == '\\')
+            path = path.replace('\\', '/');
+        return path;
+    }
+
     @Override
     protected RawResource getResourceImpl(String resPath) throws IOException {
-        synchronized (FileResourceStore.class) {
 
-            File f = file(resPath);
-            if (f.exists() && f.isFile()) {
-                if (f.length() == 0) {
-                    logger.warn("Zero length file: " + f.getAbsolutePath());
-                }
-                FileInputStream resource = new FileInputStream(f);
-                return new RawResource(resource, f.lastModified());
-            } else {
-                return null;
+        File f = file(resPath);
+        if (f.exists() && f.isFile()) {
+            if (f.length() == 0) {
+                logger.warn("Zero length file: {}. ", f.getAbsolutePath());
             }
+
+            return new RawResource(resPath, f.lastModified(), new FileInputStream(f));
+        } else {
+            return null;
         }
     }
 
     @Override
     protected long getResourceTimestampImpl(String resPath) throws IOException {
-        synchronized (FileResourceStore.class) {
 
-            File f = file(resPath);
-            if (f.exists() && f.isFile())
-                return f.lastModified();
-            else
-                return 0;
-        }
+        File f = file(resPath);
+        if (f.exists() && f.isFile())
+            return f.lastModified();
+        else
+            return 0;
     }
 
     @Override
-    protected void putResourceImpl(String resPath, InputStream content, long ts) throws IOException {
-        synchronized (FileResourceStore.class) {
+    protected void putResourceImpl(String resPath, ContentWriter content, long ts) throws IOException {
+
+        if (--failPutResourceCountDown == 0)
+            throw new IOException("for test");
+
+        File tmp = File.createTempFile("kylin-fileresource-", ".tmp");
+        try {
+
+            try (FileOutputStream out = new FileOutputStream(tmp); DataOutputStream dout = new DataOutputStream(out)) {
+                content.write(dout);
+                dout.flush();
+            }
 
             File f = file(resPath);
             f.getParentFile().mkdirs();
-            FileOutputStream out = new FileOutputStream(f);
-            try {
-                IOUtils.copy(content, out);
-            } finally {
-                IOUtils.closeQuietly(out);
-                IOUtils.closeQuietly(content);
+
+            if (!tmp.renameTo(f)) {
+                f.delete();
+                for (int i = 0; f.exists() && i < 3; i++) {
+                    try {
+                        Thread.sleep(10);
+                    } catch (InterruptedException e) {
+                        throw new RuntimeException(e);
+                    }
+                    f.delete();
+                }
+
+                FileUtils.moveFile(tmp, f);
             }
 
             f.setLastModified(ts);
+
+        } finally {
+            if (tmp.exists())
+                FileUtils.forceDelete(tmp);
         }
     }
 
     @Override
     protected long checkAndPutResourceImpl(String resPath, byte[] content, long oldTS, long newTS)
             throws IOException, WriteConflictException {
-        synchronized (FileResourceStore.class) {
 
-            File f = file(resPath);
-            if ((f.exists() && f.lastModified() != oldTS) || (f.exists() == false && oldTS != 0))
-                throw new WriteConflictException("Overwriting conflict " + resPath + ", expect old TS " + oldTS
-                        + ", but found " + f.lastModified());
+        File f = file(resPath);
+        if ((f.exists() && f.lastModified() != oldTS) || (f.exists() == false && oldTS != 0))
+            throw new WriteConflictException(
+                    "Overwriting conflict " + resPath + ", expect old TS " + oldTS + ", but found " + f.lastModified());
 
-            putResourceImpl(resPath, new ByteArrayInputStream(content), newTS);
+        putResourceImpl(resPath, ContentWriter.create(content), newTS);
 
-            // some FS lose precision on given time stamp
-            return f.lastModified();
-        }
+        return f.lastModified();
     }
 
     @Override
     protected void deleteResourceImpl(String resPath) throws IOException {
-        synchronized (FileResourceStore.class) {
 
-            File f = file(resPath);
-            f.delete();
+        File f = file(resPath);
+        try {
+            if (f.exists())
+                FileUtils.forceDelete(f);
+        } catch (FileNotFoundException e) {
+            // FileNotFoundException is not a problem in case of delete
         }
     }
 
     @Override
     protected String getReadableResourcePathImpl(String resPath) {
-        synchronized (FileResourceStore.class) {
-            return file(resPath).toString();
-        }
+        return file(resPath).toString();
     }
 
     private File file(String resPath) {
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/HDFSResourceStore.java b/core-common/src/main/java/org/apache/kylin/common/persistence/HDFSResourceStore.java
index 60a647716c..c38a182f0e 100644
--- a/core-common/src/main/java/org/apache/kylin/common/persistence/HDFSResourceStore.java
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/HDFSResourceStore.java
@@ -14,15 +14,11 @@
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
-*/
+ */
 
 package org.apache.kylin.common.persistence;
 
-import java.io.ByteArrayInputStream;
 import java.io.IOException;
-import java.io.InputStream;
-import java.util.Collections;
-import java.util.List;
 import java.util.NavigableSet;
 import java.util.TreeSet;
 
@@ -41,12 +37,7 @@
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
 
-/**
- * A ResourceStore implementation with HDFS as the storage.
- * The typical scenario is as read-only or single thread temporary storage for metadata.
- */
 public class HDFSResourceStore extends ResourceStore {
 
     private static final Logger logger = LoggerFactory.getLogger(HDFSResourceStore.class);
@@ -57,25 +48,25 @@
 
     private static final String HDFS_SCHEME = "hdfs";
 
-    public HDFSResourceStore(KylinConfig kylinConfig) throws IOException {
+    public HDFSResourceStore(KylinConfig kylinConfig) throws Exception {
         this(kylinConfig, kylinConfig.getMetadataUrl());
     }
 
-    public HDFSResourceStore(KylinConfig kylinConfig, StorageURL metadataUrl) throws IOException {
+    public HDFSResourceStore(KylinConfig kylinConfig, StorageURL metadataUrl) throws Exception {
         super(kylinConfig);
         Preconditions.checkState(HDFS_SCHEME.equals(metadataUrl.getScheme()));
 
         String path = metadataUrl.getParameter("path");
         if (path == null) {
             // missing path is not expected, but don't fail it
-            path = kylinConfig.getHdfsWorkingDirectory() + "tmp_metadata";
-            logger.warn("Missing path, fall back to {}", path);
+            path = kylinConfig.getHdfsWorkingDirectory(null) + "tmp_metadata";
+            logger.warn("Missing path, fall back to {}. ", path);
         }
 
         fs = HadoopUtil.getFileSystem(path);
         Path metadataPath = new Path(path);
-        if (!fs.exists(metadataPath)) {
-            logger.warn("Path not exist in HDFS, create it: {}", path);
+        if (fs.exists(metadataPath) == false) {
+            logger.warn("Path not exist in HDFS, create it: {}. ", path);
             createMetaFolder(metadataPath);
         }
 
@@ -84,7 +75,7 @@ public HDFSResourceStore(KylinConfig kylinConfig, StorageURL metadataUrl) throws
 
     }
 
-    private void createMetaFolder(Path metaDirName) throws IOException {
+    private void createMetaFolder(Path metaDirName) throws Exception {
         //create hdfs meta path
         if (!fs.exists(metaDirName)) {
             fs.mkdirs(metaDirName);
@@ -94,7 +85,16 @@ private void createMetaFolder(Path metaDirName) throws IOException {
     }
 
     @Override
-    protected NavigableSet<String> listResourcesImpl(String folderPath, boolean recursive) throws IOException {
+    protected NavigableSet<String> listResourcesImpl(String folderPath) throws IOException {
+        return listResourcesImpl(folderPath, false);
+    }
+
+    @Override
+    protected NavigableSet<String> listResourcesRecursivelyImpl(String folderPath) throws IOException {
+        return listResourcesImpl(folderPath, true);
+    }
+
+    private NavigableSet<String> listResourcesImpl(String folderPath, boolean recursive) throws IOException {
         Path p = getRealHDFSPath(folderPath);
         String prefix = folderPath.endsWith("/") ? folderPath : folderPath + "/";
         if (!fs.exists(p) || !fs.isDirectory(p)) {
@@ -110,100 +110,125 @@ private void createMetaFolder(Path metaDirName) throws IOException {
         return r.isEmpty() ? null : r;
     }
 
-    private TreeSet<String> getFilePath(Path p, String prefix) throws IOException {
+    private TreeSet<String> getFilePath(Path p, String resPathPrefix) throws IOException {
         TreeSet<String> fileList = new TreeSet<>();
         for (FileStatus fileStat : fs.listStatus(p)) {
-            fileList.add(prefix + fileStat.getPath().getName());
+            fileList.add(resPathPrefix + fileStat.getPath().getName());
         }
         return fileList;
     }
 
-    TreeSet<String> getAllFilePath(Path filePath, String prefix) throws IOException {
+    TreeSet<String> getAllFilePath(Path filePath, String resPathPrefix) throws IOException {
+        String fsPathPrefix = filePath.toUri().getPath();
+
         TreeSet<String> fileList = new TreeSet<>();
         RemoteIterator<LocatedFileStatus> it = fs.listFiles(filePath, true);
         while (it.hasNext()) {
-            String[] path = it.next().getPath().toString().split(prefix, 2);
-            fileList.add(prefix + path[1]);
+            String path = it.next().getPath().toUri().getPath();
+            if (!path.startsWith(fsPathPrefix))
+                throw new IllegalStateException("File path " + path + " is supposed to start with " + fsPathPrefix);
+
+            String resPath = resPathPrefix + path.substring(fsPathPrefix.length() + 1);
+            fileList.add(resPath);
         }
         return fileList;
     }
 
     @Override
-    protected boolean existsImpl(String resPath) throws IOException {
-        Path p = getRealHDFSPath(resPath);
-        return fs.exists(p) && fs.isFile(p);
-    }
+    protected void visitFolderImpl(String folderPath, boolean recursive, VisitFilter filter, boolean loadContent,
+                                   Visitor visitor) throws IOException {
+        Path p = getRealHDFSPath(folderPath);
+        if (!fs.exists(p) || !fs.isDirectory(p)) {
+            return;
+        }
 
-    @Override
-    protected List<RawResource> getAllResourcesImpl(String folderPath, long timeStart, long timeEndExclusive)
-            throws IOException {
-        NavigableSet<String> resources = listResources(folderPath);
-        if (resources == null)
-            return Collections.emptyList();
-        List<RawResource> result = Lists.newArrayListWithCapacity(resources.size());
-        try {
-            for (String res : resources) {
-                long ts = getResourceTimestampImpl(res);
-                if (timeStart <= ts && ts < timeEndExclusive) {
-                    RawResource resource = getResourceImpl(res);
-                    if (resource != null) // can be null if is a sub-folder
-                        result.add(resource);
+        String fsPathPrefix = p.toUri().getPath();
+        String resPathPrefix = folderPath.endsWith("/") ? folderPath : folderPath + "/";
+
+        RemoteIterator<LocatedFileStatus> it = fs.listFiles(p, recursive);
+        while (it.hasNext()) {
+            LocatedFileStatus status = it.next();
+            if (status.isDirectory())
+                continue;
+
+            String path = status.getPath().toUri().getPath();
+            if (!path.startsWith(fsPathPrefix))
+                throw new IllegalStateException("File path " + path + " is supposed to start with " + fsPathPrefix);
+
+            String resPath = resPathPrefix + path.substring(fsPathPrefix.length() + 1);
+
+            if (filter.matches(resPath, status.getModificationTime())) {
+                RawResource raw;
+                if (loadContent)
+                    raw = new RawResource(resPath, status.getModificationTime(), fs.open(status.getPath()));
+                else
+                    raw = new RawResource(resPath, status.getModificationTime());
+
+                try {
+                    visitor.visit(raw);
+                } finally {
+                    raw.close();
                 }
             }
-        } catch (IOException ex) {
-            for (RawResource rawResource : result) {
-                IOUtils.closeQuietly(rawResource.inputStream);
-            }
-            throw ex;
         }
-        return result;
+    }
+
+    @Override
+    protected boolean existsImpl(String resPath) throws IOException {
+        Path p = getRealHDFSPath(resPath);
+        return fs.exists(p) && fs.isFile(p);
     }
 
     @Override
     protected RawResource getResourceImpl(String resPath) throws IOException {
         Path p = getRealHDFSPath(resPath);
         if (fs.exists(p) && fs.isFile(p)) {
-            if (fs.getFileStatus(p).getLen() == 0) {
-                logger.warn("Zero length file: {}", p);
+            FileStatus fileStatus = fs.getFileStatus(p);
+            if (fileStatus.getLen() == 0) {
+                logger.warn("Zero length file: {}. ", p);
             }
-            FSDataInputStream in = getHDFSFileInputStream(fs, p);
-            long t = in.readLong();
-            return new RawResource(in, t);
+            FSDataInputStream in = fs.open(p);
+            long ts = fileStatus.getModificationTime();
+            return new RawResource(resPath, ts, in);
         } else {
             return null;
         }
     }
 
-    private FSDataInputStream getHDFSFileInputStream(FileSystem fs, Path path) throws IOException {
-        return fs.open(path);
-    }
-
     @Override
     protected long getResourceTimestampImpl(String resPath) throws IOException {
         Path p = getRealHDFSPath(resPath);
         if (!fs.exists(p) || !fs.isFile(p)) {
             return 0;
         }
-        try (FSDataInputStream in = fs.open(p)) {
-            return in.readLong();
+        try {
+            return fs.getFileStatus(p).getModificationTime();
+        } catch (Exception e) {
+            throw new IOException("Put resource fail", e);
         }
+
     }
 
     @Override
-    protected void putResourceImpl(String resPath, InputStream content, long ts) throws IOException {
-        logger.trace("res path : {}", resPath);
+    protected void putResourceImpl(String resPath, ContentWriter content, long ts) throws IOException {
+        logger.trace("res path : {}. ", resPath);
         Path p = getRealHDFSPath(resPath);
-        logger.trace("put resource : {}", p.toUri());
-        try (FSDataOutputStream out = fs.create(p, true)) {
-            out.writeLong(ts);
-            IOUtils.copy(content, out);
+        logger.trace("put resource : {}. ", p.toUri());
+        FSDataOutputStream out = null;
+        try {
+            out = fs.create(p, true);
+            content.write(out);
+        } catch (Exception e) {
+            throw new IOException("Put resource fail", e);
         } finally {
-            IOUtils.closeQuietly(content);
+            IOUtils.closeQuietly(out);
+            fs.setTimes(p, ts, -1);
         }
     }
 
     @Override
-    protected long checkAndPutResourceImpl(String resPath, byte[] content, long oldTS, long newTS) throws IOException {
+    protected long checkAndPutResourceImpl(String resPath, byte[] content, long oldTS, long newTS)
+            throws IOException, WriteConflictException {
         Path p = getRealHDFSPath(resPath);
         if (!fs.exists(p)) {
             if (oldTS != 0) {
@@ -218,7 +243,7 @@ protected long checkAndPutResourceImpl(String resPath, byte[] content, long oldT
                         + ", but found " + realLastModify);
             }
         }
-        putResourceImpl(resPath, new ByteArrayInputStream(content), newTS);
+        putResourceImpl(resPath, ContentWriter.create(content), newTS);
         return newTS;
     }
 
@@ -233,7 +258,6 @@ protected void deleteResourceImpl(String resPath) throws IOException {
             throw new IOException("Delete resource fail", e);
         }
     }
-
     @Override
     protected String getReadableResourcePathImpl(String resPath) {
         return getRealHDFSPath(resPath).toString();
@@ -243,7 +267,7 @@ private Path getRealHDFSPath(String resourcePath) {
         if (resourcePath.equals("/"))
             return this.hdfsMetaPath;
         if (resourcePath.startsWith("/") && resourcePath.length() > 1)
-            resourcePath = resourcePath.substring(1);
+            resourcePath = resourcePath.substring(1, resourcePath.length());
         return new Path(this.hdfsMetaPath, resourcePath);
     }
 }
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java
index 5f56de1c74..dcb9a1bb49 100644
--- a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCConnectionManager.java
@@ -39,6 +39,7 @@
 public class JDBCConnectionManager {
 
     private static final Logger logger = LoggerFactory.getLogger(JDBCConnectionManager.class);
+    private static final String PASSWORD = "password";
 
     private static JDBCConnectionManager INSTANCE = null;
 
@@ -74,7 +75,7 @@ private JDBCConnectionManager(KylinConfig config) {
         JDBCResourceStore.checkScheme(metadataUrl);
 
         LinkedHashMap<String, String> ret = new LinkedHashMap<>(metadataUrl.getAllParameters());
-        List<String> mandatoryItems = Arrays.asList("url", "username", "password");
+        List<String> mandatoryItems = Arrays.asList("url", "username", PASSWORD);
 
         for (String item : mandatoryItems) {
             Preconditions.checkNotNull(ret.get(item),
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCResourceDAO.java b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCResourceDAO.java
deleted file mode 100644
index 70a049b8a7..0000000000
--- a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCResourceDAO.java
+++ /dev/null
@@ -1,717 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.kylin.common.persistence;
-
-import java.io.BufferedInputStream;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Types;
-import java.text.FieldPosition;
-import java.text.MessageFormat;
-import java.util.List;
-import java.util.Locale;
-import java.util.NavigableSet;
-import java.util.TreeSet;
-
-import com.google.common.base.Preconditions;
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.kylin.common.KylinConfig;
-import org.apache.kylin.common.util.DBUtils;
-import org.apache.kylin.common.util.HadoopUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Lists;
-
-public class JDBCResourceDAO {
-
-    private static Logger logger = LoggerFactory.getLogger(JDBCResourceDAO.class);
-
-    private static final String META_TABLE_KEY = "META_TABLE_KEY";
-
-    private static final String META_TABLE_TS = "META_TABLE_TS";
-
-    private static final String META_TABLE_CONTENT = "META_TABLE_CONTENT";
-
-    private JDBCConnectionManager connectionManager;
-
-    private JDBCSqlQueryFormat jdbcSqlQueryFormat;
-
-    private String[] tableNames = new String[2];
-
-    private KylinConfig kylinConfig;
-
-    // For test
-    private long queriedSqlNum = 0;
-
-    private FileSystem redirectFileSystem;
-
-    public JDBCResourceDAO(KylinConfig kylinConfig, String metadataIdentifier) throws SQLException {
-        this.kylinConfig = kylinConfig;
-        this.connectionManager = JDBCConnectionManager.getConnectionManager();
-        this.jdbcSqlQueryFormat = JDBCSqlQueryFormatProvider
-                .createJDBCSqlQueriesFormat(kylinConfig.getMetadataDialect());
-        this.tableNames[0] = metadataIdentifier;
-        this.tableNames[1] = metadataIdentifier + "_log";
-        for (int i = 0; i < tableNames.length; i++) {
-            createTableIfNeeded(tableNames[i]);
-            createIndex("IDX_" + META_TABLE_TS, tableNames[i], META_TABLE_TS);
-        }
-        try {
-            redirectFileSystem = HadoopUtil.getReadFileSystem();
-        } catch (IOException e) {
-            throw new SQLException(e);
-        }
-    }
-
-    public void close() {
-        connectionManager.close();
-    }
-
-    public JDBCResource getResource(final String resourcePath, final boolean fetchContent, final boolean fetchTimestamp)
-            throws SQLException {
-        return getResource(resourcePath, fetchContent, fetchTimestamp, false);
-    }
-
-    public JDBCResource getResource(final String resourcePath, final boolean fetchContent, final boolean fetchTimestamp,
-            final boolean isAllowBroken) throws SQLException {
-        final JDBCResource resource = new JDBCResource();
-        logger.trace("getResource method. resourcePath : {} , fetchConetent : {} , fetch TS : {}", resourcePath,
-                fetchContent, fetchTimestamp);
-        executeSql(new SqlOperation() {
-            @Override
-            public void execute(Connection connection) throws SQLException {
-                String tableName = getMetaTableName(resourcePath);
-                pstat = connection.prepareStatement(getKeyEqualSqlString(tableName, fetchContent, fetchTimestamp));
-                pstat.setString(1, resourcePath);
-                rs = pstat.executeQuery();
-                if (rs.next()) {
-                    resource.setPath(rs.getString(META_TABLE_KEY));
-                    if (fetchTimestamp)
-                        resource.setTimestamp(rs.getLong(META_TABLE_TS));
-                    if (fetchContent) {
-                        try {
-                            resource.setContent(getInputStream(resourcePath, rs));
-                        } catch (Exception e) {
-                            if (!isAllowBroken) {
-                                throw new SQLException(e);
-                            }
-
-                            final BrokenEntity brokenEntity = new BrokenEntity(resourcePath, e.getMessage());
-                            resource.setContent(new BrokenInputStream(brokenEntity));
-                            logger.warn(e.getMessage());
-                        }
-                    }
-                }
-            }
-        });
-        if (resource.getPath() != null) {
-            return resource;
-        } else {
-            return null;
-        }
-    }
-
-    public boolean existResource(final String resourcePath) throws SQLException {
-        JDBCResource resource = getResource(resourcePath, false, false);
-        return (resource != null);
-    }
-
-    public long getResourceTimestamp(final String resourcePath) throws SQLException {
-        JDBCResource resource = getResource(resourcePath, false, true);
-        return resource == null ? 0 : resource.getTimestamp();
-    }
-
-    //fetch primary key only
-    public NavigableSet<String> listAllResource(final String folderPath, final boolean recursive) throws SQLException {
-        final NavigableSet<String> allResourceName = new TreeSet<>();
-        executeSql(new SqlOperation() {
-            @Override
-            public void execute(Connection connection) throws SQLException {
-                String tableName = getMetaTableName(folderPath);
-                pstat = connection.prepareStatement(getListResourceSqlString(tableName));
-                pstat.setString(1, folderPath + "%");
-                rs = pstat.executeQuery();
-                while (rs.next()) {
-                    String path = rs.getString(META_TABLE_KEY);
-                    Preconditions.checkState(path.startsWith(folderPath));
-                    if (recursive) {
-                        allResourceName.add(path);
-                    } else {
-                        int cut = path.indexOf('/', folderPath.length());
-                        String child = cut < 0 ? path : path.substring(0, cut);
-                        allResourceName.add(child);
-                    }
-                }
-            }
-        });
-        return allResourceName;
-    }
-
-    public List<JDBCResource> getAllResource(final String folderPath, final long timeStart, final long timeEndExclusive,
-            final boolean isAllowBroken) throws SQLException {
-        final List<JDBCResource> allResource = Lists.newArrayList();
-        executeSql(new SqlOperation() {
-            @Override
-            public void execute(Connection connection) throws SQLException {
-                String tableName = getMetaTableName(folderPath);
-                pstat = connection.prepareStatement(getAllResourceSqlString(tableName));
-                pstat.setString(1, folderPath + "%");
-                pstat.setLong(2, timeStart);
-                pstat.setLong(3, timeEndExclusive);
-                rs = pstat.executeQuery();
-                while (rs.next()) {
-                    String resPath = rs.getString(META_TABLE_KEY);
-                    if (checkPath(folderPath, resPath)) {
-                        JDBCResource resource = new JDBCResource();
-                        resource.setPath(resPath);
-                        resource.setTimestamp(rs.getLong(META_TABLE_TS));
-                        try {
-                            resource.setContent(getInputStream(resPath, rs));
-                        } catch (Exception e) {
-                            if (!isAllowBroken) {
-                                throw new SQLException(e);
-                            }
-
-                            final BrokenEntity brokenEntity = new BrokenEntity(resPath, e.getMessage());
-                            resource.setContent(new BrokenInputStream(brokenEntity));
-                            logger.warn(e.getMessage());
-                        }
-                        allResource.add(resource);
-                    }
-                }
-            }
-        });
-        return allResource;
-    }
-
-    private boolean checkPath(String lookForPrefix, String resPath) {
-        lookForPrefix = lookForPrefix.endsWith("/") ? lookForPrefix : lookForPrefix + "/";
-        assert resPath.startsWith(lookForPrefix);
-        int cut = resPath.indexOf('/', lookForPrefix.length());
-        return (cut < 0);
-    }
-
-    private boolean isJsonMetadata(String resourcePath) {
-        String trim = resourcePath.trim();
-        return trim.endsWith(".json") || trim.startsWith(ResourceStore.EXECUTE_RESOURCE_ROOT)
-                || trim.startsWith(ResourceStore.EXECUTE_OUTPUT_RESOURCE_ROOT);
-
-    }
-
-    public void deleteResource(final String resourcePath) throws SQLException {
-
-        boolean skipHdfs = isJsonMetadata(resourcePath);
-
-        executeSql(new SqlOperation() {
-            @Override
-            public void execute(Connection connection) throws SQLException {
-                String tableName = getMetaTableName(resourcePath);
-                pstat = connection.prepareStatement(getDeletePstatSql(tableName));
-                pstat.setString(1, resourcePath);
-                pstat.executeUpdate();
-            }
-        });
-
-        if (!skipHdfs) {
-            try {
-                deleteHDFSResourceIfExist(resourcePath);
-            } catch (Exception e) {
-                throw new SQLException(e);
-            }
-        }
-    }
-
-    private void deleteHDFSResourceIfExist(String resourcePath) throws IOException {
-        Path redirectPath = bigCellHDFSPath(resourcePath);
-        if (redirectFileSystem.exists(redirectPath)) {
-            redirectFileSystem.delete(redirectPath, true);
-        }
-
-    }
-
-    public void putResource(final JDBCResource resource) throws SQLException {
-        executeSql(new SqlOperation() {
-            @Override
-            public void execute(Connection connection) throws SQLException {
-                byte[] content = getResourceDataBytes(resource);
-                synchronized (resource.getPath().intern()) {
-                    boolean existing = existResource(resource.getPath());
-                    String tableName = getMetaTableName(resource.getPath());
-                    if (existing) {
-                        pstat = connection.prepareStatement(getReplaceSql(tableName));
-                        pstat.setLong(1, resource.getTimestamp());
-                        pstat.setBlob(2, new BufferedInputStream(new ByteArrayInputStream(content)));
-                        pstat.setString(3, resource.getPath());
-                    } else {
-                        pstat = connection.prepareStatement(getInsertSql(tableName));
-                        pstat.setString(1, resource.getPath());
-                        pstat.setLong(2, resource.getTimestamp());
-                        pstat.setBlob(3, new BufferedInputStream(new ByteArrayInputStream(content)));
-                    }
-
-                    if (isContentOverflow(content, resource.getPath())) {
-                        logger.debug("Overflow! resource path: {}, content size: {}, timeStamp: {}", resource.getPath(),
-                                content.length, resource.getTimestamp());
-                        if (existing) {
-                            pstat.setNull(2, Types.BLOB);
-                        } else {
-                            pstat.setNull(3, Types.BLOB);
-                        }
-                        writeLargeCellToHdfs(resource.getPath(), content);
-                        try {
-                            int result = pstat.executeUpdate();
-                            if (result != 1)
-                                throw new SQLException();
-                        } catch (SQLException e) {
-                            rollbackLargeCellFromHdfs(resource.getPath());
-                            throw e;
-                        }
-                        if (existing) {
-                            cleanOldLargeCellFromHdfs(resource.getPath());
-                        }
-                    } else {
-                        pstat.executeUpdate();
-                    }
-                }
-            }
-        });
-    }
-
-    public void checkAndPutResource(final String resPath, final byte[] content, final long oldTS, final long newTS)
-            throws SQLException, WriteConflictException {
-        logger.trace(
-                "execute checkAndPutResource method. resPath : {} , oldTs : {} , newTs : {} , content null ? : {} ",
-                resPath, oldTS, newTS, content == null);
-        executeSql(new SqlOperation() {
-            @Override
-            public void execute(Connection connection) throws SQLException {
-                synchronized (resPath.intern()) {
-                    String tableName = getMetaTableName(resPath);
-                    if (!existResource(resPath)) {
-                        if (oldTS != 0) {
-                            throw new IllegalStateException(
-                                    "For not exist file. OldTS have to be 0. but Actual oldTS is : " + oldTS);
-                        }
-                        if (isContentOverflow(content, resPath)) {
-                            logger.debug("Overflow! resource path: {}, content size: {}", resPath, content.length);
-                            pstat = connection.prepareStatement(getInsertSqlWithoutContent(tableName));
-                            pstat.setString(1, resPath);
-                            pstat.setLong(2, newTS);
-                            writeLargeCellToHdfs(resPath, content);
-                            try {
-                                int result = pstat.executeUpdate();
-                                if (result != 1)
-                                    throw new SQLException();
-                            } catch (SQLException e) {
-                                rollbackLargeCellFromHdfs(resPath);
-                                throw e;
-                            }
-                        } else {
-                            pstat = connection.prepareStatement(getInsertSql(tableName));
-                            pstat.setString(1, resPath);
-                            pstat.setLong(2, newTS);
-                            pstat.setBlob(3, new BufferedInputStream(new ByteArrayInputStream(content)));
-                            pstat.executeUpdate();
-                        }
-                    } else {
-                        // Note the checkAndPut trick:
-                        // update {0} set {1}=? where {2}=? and {3}=?
-                        pstat = connection.prepareStatement(getUpdateSqlWithoutContent(tableName));
-                        pstat.setLong(1, newTS);
-                        pstat.setString(2, resPath);
-                        pstat.setLong(3, oldTS);
-                        int result = pstat.executeUpdate();
-                        if (result != 1) {
-                            long realTime = getResourceTimestamp(resPath);
-                            throw new WriteConflictException("Overwriting conflict " + resPath + ", expect old TS "
-                                    + oldTS + ", but it is " + realTime);
-                        }
-                        PreparedStatement pstat2 = null;
-                        try {
-                            // "update {0} set {1}=? where {3}=?"
-                            pstat2 = connection.prepareStatement(getUpdateContentSql(tableName));
-                            if (isContentOverflow(content, resPath)) {
-                                logger.debug("Overflow! resource path: {}, content size: {}", resPath, content.length);
-                                pstat2.setNull(1, Types.BLOB);
-                                pstat2.setString(2, resPath);
-                                writeLargeCellToHdfs(resPath, content);
-                                try {
-                                    int result2 = pstat2.executeUpdate();
-                                    if (result2 != 1)
-                                        throw new SQLException();
-                                } catch (SQLException e) {
-                                    rollbackLargeCellFromHdfs(resPath);
-                                    throw e;
-                                }
-                                cleanOldLargeCellFromHdfs(resPath);
-                            } else {
-                                pstat2.setBinaryStream(1, new BufferedInputStream(new ByteArrayInputStream(content)));
-                                pstat2.setString(2, resPath);
-                                pstat2.executeUpdate();
-                            }
-                        } finally {
-                            JDBCConnectionManager.closeQuietly(pstat2);
-                        }
-                    }
-                }
-            }
-        });
-    }
-
-    private byte[] getResourceDataBytes(JDBCResource resource) throws SQLException {
-        ByteArrayOutputStream bout = null;
-        try {
-            bout = new ByteArrayOutputStream();
-            IOUtils.copy(resource.getContent(), bout);
-            return bout.toByteArray();
-        } catch (Exception e) {
-            throw new SQLException(e);
-        } finally {
-            IOUtils.closeQuietly(bout);
-        }
-    }
-
-    private boolean isContentOverflow(byte[] content, String resPath) throws SQLException {
-        if (kylinConfig.isJsonAlwaysSmallCell() && isJsonMetadata(resPath)) {
-
-            int smallCellMetadataWarningThreshold = kylinConfig.getSmallCellMetadataWarningThreshold();
-            int smallCellMetadataErrorThreshold = kylinConfig.getSmallCellMetadataErrorThreshold();
-
-            if (content.length > smallCellMetadataWarningThreshold) {
-                logger.warn(
-                        "A JSON metadata entry's size is not supposed to exceed kylin.metadata.jdbc.small-cell-meta-size-warning-threshold("
-                                + smallCellMetadataWarningThreshold + "), resPath: " + resPath + ", actual size: "
-                                + content.length);
-            }
-            if (content.length > smallCellMetadataErrorThreshold) {
-                throw new SQLException(new IllegalArgumentException(
-                        "A JSON metadata entry's size is not supposed to exceed kylin.metadata.jdbc.small-cell-meta-size-error-threshold("
-                                + smallCellMetadataErrorThreshold + "), resPath: " + resPath + ", actual size: "
-                                + content.length));
-            }
-
-            return false;
-        }
-
-        int maxSize = kylinConfig.getJdbcResourceStoreMaxCellSize();
-        if (content.length > maxSize)
-            return true;
-        else
-            return false;
-    }
-
-    private void createTableIfNeeded(final String tableName) throws SQLException {
-        executeSql(new SqlOperation() {
-            @Override
-            public void execute(Connection connection) throws SQLException {
-                if (checkTableExists(tableName, connection)) {
-                    logger.info("Table [{}] already exists", tableName);
-                    return;
-                }
-
-                pstat = connection.prepareStatement(getCreateIfNeededSql(tableName));
-                pstat.executeUpdate();
-                logger.info("Create table [{}] success", tableName);
-            }
-
-            private boolean checkTableExists(final String tableName, final Connection connection) throws SQLException {
-                PreparedStatement ps = null;
-                ResultSet rs = null;
-                try {
-                    ps = connection.prepareStatement(getCheckTableExistsSql(tableName));
-                    rs = ps.executeQuery();
-                    while (rs.next()) {
-                        if (tableName.equals(rs.getString(1))) {
-                            return true;
-                        }
-                    }
-                } finally {
-                    DBUtils.closeQuietly(rs);
-                    DBUtils.closeQuietly(ps);
-                }
-
-                return false;
-            }
-        });
-    }
-
-    private void createIndex(final String indexName, final String tableName, final String colName) {
-        try {
-            executeSql(new SqlOperation() {
-                @Override
-                public void execute(Connection connection) throws SQLException {
-                    pstat = connection.prepareStatement(getCreateIndexSql(indexName, tableName, colName));
-                    pstat.executeUpdate();
-                }
-            });
-        } catch (SQLException ex) {
-            logger.info("Create index failed with message: " + ex.getLocalizedMessage());
-        }
-    }
-
-    abstract static class SqlOperation {
-        PreparedStatement pstat = null;
-        ResultSet rs = null;
-
-        abstract public void execute(final Connection connection) throws SQLException;
-    }
-
-    private void executeSql(SqlOperation operation) throws SQLException {
-        Connection connection = null;
-        try {
-            connection = connectionManager.getConn();
-            operation.execute(connection);
-            queriedSqlNum++;
-        } finally {
-            JDBCConnectionManager.closeQuietly(operation.rs);
-            JDBCConnectionManager.closeQuietly(operation.pstat);
-            JDBCConnectionManager.closeQuietly(connection);
-        }
-    }
-
-    private String getCheckTableExistsSql(final String tableName) {
-        final String sql = new MessageFormat(jdbcSqlQueryFormat.getCheckTableExistsSql(), Locale.ROOT)
-                .format(new Object[] { tableName }, new StringBuffer(), new FieldPosition(0)).toString();
-        return sql;
-    }
-
-    //sql queries
-    private String getCreateIfNeededSql(String tableName) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getCreateIfNeedSql(), Locale.ROOT)
-                .format(new Object[] { tableName, META_TABLE_KEY, META_TABLE_TS, META_TABLE_CONTENT },
-                        new StringBuffer(), new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    //sql queries
-    private String getCreateIndexSql(String indexName, String tableName, String indexCol) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getCreateIndexSql(), Locale.ROOT)
-                .format(new Object[] { indexName, tableName, indexCol }, new StringBuffer(), new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    private String getKeyEqualSqlString(String tableName, boolean fetchContent, boolean fetchTimestamp) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getKeyEqualsSql(), Locale.ROOT)
-                .format(new Object[] { getSelectList(fetchContent, fetchTimestamp), tableName, META_TABLE_KEY },
-                        new StringBuffer(), new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    private String getDeletePstatSql(String tableName) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getDeletePstatSql(), Locale.ROOT)
-                .format(new Object[] { tableName, META_TABLE_KEY }, new StringBuffer(), new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    private String getListResourceSqlString(String tableName) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getListResourceSql(), Locale.ROOT)
-                .format(new Object[] { META_TABLE_KEY, tableName, META_TABLE_KEY }, new StringBuffer(),
-                        new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    private String getAllResourceSqlString(String tableName) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getAllResourceSql(), Locale.ROOT).format(
-                new Object[] { getSelectList(true, true), tableName, META_TABLE_KEY, META_TABLE_TS, META_TABLE_TS },
-                new StringBuffer(), new FieldPosition(0)).toString();
-        return sql;
-    }
-
-    private String getReplaceSql(String tableName) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getReplaceSql(), Locale.ROOT)
-                .format(new Object[] { tableName, META_TABLE_TS, META_TABLE_CONTENT, META_TABLE_KEY },
-                        new StringBuffer(), new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    private String getInsertSql(String tableName) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getInsertSql(), Locale.ROOT)
-                .format(new Object[] { tableName, META_TABLE_KEY, META_TABLE_TS, META_TABLE_CONTENT },
-                        new StringBuffer(), new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    @SuppressWarnings("unused")
-    private String getReplaceSqlWithoutContent(String tableName) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getReplaceSqlWithoutContent(), Locale.ROOT)
-                .format(new Object[] { tableName, META_TABLE_TS, META_TABLE_KEY }, new StringBuffer(),
-                        new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    private String getInsertSqlWithoutContent(String tableName) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getInsertSqlWithoutContent(), Locale.ROOT)
-                .format(new Object[] { tableName, META_TABLE_KEY, META_TABLE_TS }, new StringBuffer(),
-                        new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    private String getUpdateSqlWithoutContent(String tableName) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getUpdateSqlWithoutContent(), Locale.ROOT)
-                .format(new Object[] { tableName, META_TABLE_TS, META_TABLE_KEY, META_TABLE_TS }, new StringBuffer(),
-                        new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    private String getUpdateContentSql(String tableName) {
-        String sql = new MessageFormat(jdbcSqlQueryFormat.getUpdateContentSql(), Locale.ROOT)
-                .format(new Object[] { tableName, META_TABLE_CONTENT, META_TABLE_KEY }, new StringBuffer(),
-                        new FieldPosition(0))
-                .toString();
-        return sql;
-    }
-
-    private String getSelectList(boolean fetchContent, boolean fetchTimestamp) {
-        StringBuilder sb = new StringBuilder();
-        sb.append(META_TABLE_KEY);
-        if (fetchTimestamp)
-            sb.append("," + META_TABLE_TS);
-        if (fetchContent)
-            sb.append("," + META_TABLE_CONTENT);
-        return sb.toString();
-    }
-
-    private InputStream getInputStream(String resPath, ResultSet rs) throws SQLException, IOException {
-        if (rs == null) {
-            return null;
-        }
-        InputStream inputStream = rs.getBlob(META_TABLE_CONTENT) == null ? null
-                : rs.getBlob(META_TABLE_CONTENT).getBinaryStream();
-        if (inputStream != null) {
-            return inputStream;
-        } else {
-            Path redirectPath = bigCellHDFSPath(resPath);
-            return redirectFileSystem.open(redirectPath);
-        }
-    }
-
-    private Path writeLargeCellToHdfs(String resPath, byte[] largeColumn) throws SQLException {
-
-        boolean isResourceExist;
-        FSDataOutputStream out = null;
-        Path redirectPath = bigCellHDFSPath(resPath);
-        Path oldPath = new Path(redirectPath.toString() + "_old");
-        try {
-            isResourceExist = redirectFileSystem.exists(redirectPath);
-            if (isResourceExist) {
-                FileUtil.copy(redirectFileSystem, redirectPath, redirectFileSystem, oldPath, false,
-                        HadoopUtil.getCurrentConfiguration());
-                redirectFileSystem.delete(redirectPath, true);
-                logger.debug("a copy of hdfs file {} is made", redirectPath);
-            }
-            out = redirectFileSystem.create(redirectPath);
-            out.write(largeColumn);
-            return redirectPath;
-        } catch (Exception e) {
-            try {
-                rollbackLargeCellFromHdfs(resPath);
-            } catch (Exception ex) {
-                logger.error("fail to roll back resource " + resPath + " in hdfs", ex);
-            }
-            throw new SQLException(e);
-        } finally {
-            IOUtils.closeQuietly(out);
-        }
-    }
-
-    public void rollbackLargeCellFromHdfs(String resPath) throws SQLException {
-        Path redirectPath = bigCellHDFSPath(resPath);
-        Path oldPath = new Path(redirectPath.toString() + "_old");
-        try {
-            if (redirectFileSystem.exists(oldPath)) {
-                FileUtil.copy(redirectFileSystem, oldPath, redirectFileSystem, redirectPath, true, true,
-                        HadoopUtil.getCurrentConfiguration());
-                logger.info("roll back hdfs file {}", resPath);
-            } else {
-                redirectFileSystem.delete(redirectPath, true);
-                logger.warn("no backup for hdfs file {} is found, clean it", resPath);
-            }
-        } catch (Exception e) {
-
-            try {
-                //last try to delete redirectPath, because we prefer a deleted rather than incomplete
-                redirectFileSystem.delete(redirectPath, true);
-            } catch (Exception ex) {
-                logger.error("fail to delete resource " + redirectPath + " in hdfs", ex);
-            }
-
-            throw new SQLException(e);
-        }
-    }
-
-    private void cleanOldLargeCellFromHdfs(String resPath) throws SQLException {
-        Path redirectPath = bigCellHDFSPath(resPath);
-        Path oldPath = new Path(redirectPath.toString() + "_old");
-        try {
-            if (redirectFileSystem.exists(oldPath)) {
-                redirectFileSystem.delete(oldPath, true);
-            }
-        } catch (Exception e) {
-            logger.warn("error cleaning the backup file for " + redirectPath + ", leave it as garbage", e);
-        }
-    }
-
-    public Path bigCellHDFSPath(String resPath) {
-        String hdfsWorkingDirectory = this.kylinConfig.getHdfsWorkingDirectory();
-        Path redirectPath = new Path(hdfsWorkingDirectory, "resources-jdbc" + resPath);
-        redirectPath = Path.getPathWithoutSchemeAndAuthority(redirectPath);
-        return redirectPath;
-    }
-
-    public long getQueriedSqlNum() {
-        return queriedSqlNum;
-    }
-
-    /**
-     * Persist metadata to different SQL tables
-     * @param resPath the metadata path key
-     * @return the table name
-     */
-    public String getMetaTableName(String resPath) {
-        if (resPath.startsWith(ResourceStore.BAD_QUERY_RESOURCE_ROOT)
-                || resPath.startsWith(ResourceStore.EXECUTE_OUTPUT_RESOURCE_ROOT)
-                || resPath.startsWith(ResourceStore.TEMP_STATMENT_RESOURCE_ROOT)) {
-            return tableNames[1];
-        } else {
-            return tableNames[0];
-        }
-    }
-
-}
\ No newline at end of file
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCResourceSQL.java b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCResourceSQL.java
new file mode 100644
index 0000000000..3dc7b65ecf
--- /dev/null
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCResourceSQL.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.common.persistence;
+
+import java.text.FieldPosition;
+import java.text.MessageFormat;
+import java.util.Locale;
+
+public class JDBCResourceSQL {
+
+    final private JDBCSqlQueryFormat format;
+    private String tableName;
+    final private String metaTableKey;
+    final private String metaTableTs;
+    final private String metaTableContent;
+
+    public JDBCResourceSQL(String dialect, String tableName, String metaTableKey, String metaTableTs, String metaTableContent) {
+        this.format = JDBCSqlQueryFormatProvider.createJDBCSqlQueriesFormat(dialect);
+        this.tableName = tableName;
+        this.metaTableKey = metaTableKey;
+        this.metaTableTs = metaTableTs;
+        this.metaTableContent = metaTableContent;
+    }
+
+    public String getCheckTableExistsSql(final String tableName) {
+        final String sql = new MessageFormat(format.getCheckTableExistsSql(), Locale.ROOT)
+                .format(new Object[] { tableName }, new StringBuffer(), new FieldPosition(0)).toString();
+        return sql;
+    }
+
+    public String getCreateIfNeededSql(String tableName) {
+        final String sql = new MessageFormat(format.getCreateIfNeedSql(), Locale.ROOT)
+                .format(new Object[] { tableName, metaTableKey, metaTableTs, metaTableContent }, new StringBuffer(),
+                        new FieldPosition(0))
+                .toString();
+        return sql;
+    }
+
+    public String getCreateIndexSql(String indexName, String tableName, String indexCol) {
+        final String sql = new MessageFormat(format.getCreateIndexSql(), Locale.ROOT)
+                .format(new Object[] { indexName, tableName, indexCol }, new StringBuffer(), new FieldPosition(0))
+                .toString();
+        return sql;
+    }
+
+    public String getKeyEqualSqlString(boolean fetchContent, boolean fetchTimestamp) {
+        final String sql = new MessageFormat(format.getKeyEqualsSql(), Locale.ROOT)
+                .format(new Object[] { getSelectList(fetchContent, fetchTimestamp), tableName, metaTableKey },
+                        new StringBuffer(), new FieldPosition(0))
+                .toString();
+        return sql;
+    }
+
+    public String getDeletePstatSql() {
+        final String sql = new MessageFormat(format.getDeletePstatSql(), Locale.ROOT)
+                .format(new Object[] { tableName, metaTableKey }, new StringBuffer(), new FieldPosition(0)).toString();
+        return sql;
+    }
+
+    public String getAllResourceSqlString(boolean loadContent) {
+        final String sql = new MessageFormat(format.getAllResourceSql(), Locale.ROOT).format(
+                new Object[] { getSelectList(loadContent, true), tableName, metaTableKey, metaTableTs, metaTableTs },
+                new StringBuffer(), new FieldPosition(0)).toString();
+        return sql;
+    }
+
+    public String getReplaceSql() {
+        final String sql = new MessageFormat(format.getReplaceSql(), Locale.ROOT)
+                .format(new Object[] { tableName, metaTableTs, metaTableContent, metaTableKey }, new StringBuffer(),
+                        new FieldPosition(0))
+                .toString();
+        return sql;
+    }
+
+    public String getInsertSql() {
+        final String sql = new MessageFormat(format.getInsertSql(), Locale.ROOT)
+                .format(new Object[] { tableName, metaTableKey, metaTableTs, metaTableContent }, new StringBuffer(),
+                        new FieldPosition(0))
+                .toString();
+        return sql;
+    }
+
+    @SuppressWarnings("unused")
+    private String getReplaceSqlWithoutContent() {
+        final String sql = new MessageFormat(format.getReplaceSqlWithoutContent(), Locale.ROOT)
+                .format(new Object[] { tableName, metaTableTs, metaTableKey }, new StringBuffer(), new FieldPosition(0))
+                .toString();
+        return sql;
+    }
+
+    public String getInsertSqlWithoutContent() {
+        final String sql = new MessageFormat(format.getInsertSqlWithoutContent(), Locale.ROOT)
+                .format(new Object[] { tableName, metaTableKey, metaTableTs }, new StringBuffer(), new FieldPosition(0))
+                .toString();
+        return sql;
+    }
+
+    public String getUpdateSqlWithoutContent() {
+        final String sql = new MessageFormat(format.getUpdateSqlWithoutContent(), Locale.ROOT)
+                .format(new Object[] { tableName, metaTableTs, metaTableKey, metaTableTs }, new StringBuffer(),
+                        new FieldPosition(0))
+                .toString();
+        return sql;
+    }
+
+    public String getUpdateContentSql() {
+        final String sql = new MessageFormat(format.getUpdateContentSql(), Locale.ROOT)
+                .format(new Object[] { tableName, metaTableContent, metaTableKey }, new StringBuffer(),
+                        new FieldPosition(0))
+                .toString();
+        return sql;
+    }
+
+    private String getSelectList(boolean fetchContent, boolean fetchTimestamp) {
+        StringBuilder sb = new StringBuilder();
+        sb.append(metaTableKey);
+        if (fetchTimestamp)
+            sb.append("," + metaTableTs);
+        if (fetchContent)
+            sb.append("," + metaTableContent);
+        return sb.toString();
+    }
+
+}
\ No newline at end of file
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCResourceStore.java b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCResourceStore.java
index a0a58cb8bf..dc3a45b00f 100644
--- a/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCResourceStore.java
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/JDBCResourceStore.java
@@ -18,135 +18,527 @@
 
 package org.apache.kylin.common.persistence;
 
+import java.io.BufferedInputStream;
+import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.net.ConnectException;
+import java.net.SocketTimeoutException;
+import java.sql.Blob;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Types;
+import java.util.ArrayList;
 import java.util.List;
-import java.util.NavigableSet;
 
-import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.kylin.common.KylinConfig;
 import org.apache.kylin.common.StorageURL;
+import org.apache.kylin.common.util.DBUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
 
-public class JDBCResourceStore extends ResourceStore {
+public class JDBCResourceStore extends PushdownResourceStore {
 
-    private static final String JDBC_SCHEME = "jdbc";
+    private static Logger logger = LoggerFactory.getLogger(JDBCResourceStore.class);
 
-    private String metadataIdentifier;
+    public static final String JDBC_SCHEME = "jdbc";
 
-    private JDBCResourceDAO resourceDAO;
+    private static final String META_TABLE_KEY = "META_TABLE_KEY";
 
-    public JDBCResourceStore(KylinConfig kylinConfig) throws SQLException {
+    private static final String META_TABLE_TS = "META_TABLE_TS";
+
+    private static final String META_TABLE_CONTENT = "META_TABLE_CONTENT";
+
+    public static void checkScheme(StorageURL url) {
+        Preconditions.checkState(JDBC_SCHEME.equals(url.getScheme()));
+    }
+
+    // ============================================================================
+
+    private JDBCConnectionManager connectionManager;
+
+    private String[] tableNames = new String[2];
+
+    private String metadataIdentifier = null;
+
+    // For test
+    private long queriedSqlNum = 0;
+
+    public JDBCResourceStore(KylinConfig kylinConfig) throws SQLException, IOException {
         super(kylinConfig);
         StorageURL metadataUrl = kylinConfig.getMetadataUrl();
         checkScheme(metadataUrl);
-        metadataIdentifier = metadataUrl.getIdentifier();
-        this.resourceDAO = new JDBCResourceDAO(kylinConfig, metadataUrl.getIdentifier());
+        this.metadataIdentifier = metadataUrl.getIdentifier();
+        this.tableNames[0] = metadataIdentifier;
+        this.tableNames[1] = metadataIdentifier + "_log";
+        this.connectionManager = JDBCConnectionManager.getConnectionManager();
+        for (int i = 0; i < tableNames.length; i++) {
+            createTableIfNeeded(tableNames[i]);
+        }
     }
 
-    @Override
-    protected boolean existsImpl(String resPath) throws IOException {
+    abstract static class SqlOperation {
+        PreparedStatement pstat = null;
+        ResultSet rs = null;
+
+        abstract public void execute(final Connection connection) throws SQLException, IOException;
+    }
+
+    private void executeSql(SqlOperation operation) throws SQLException, IOException {
+        Connection connection = null;
         try {
-            return resourceDAO.existResource(resPath);
-        } catch (SQLException e) {
-            throw new IOException(e);
+            connection = connectionManager.getConn();
+
+            // set a low translation level for best performance
+            connection.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
+
+            operation.execute(connection);
+            queriedSqlNum++;
+        } finally {
+            DBUtils.closeQuietly(operation.rs);
+            DBUtils.closeQuietly(operation.pstat);
+            DBUtils.closeQuietly(connection);
         }
     }
 
-    @Override
-    protected RawResource getResourceImpl(String resPath) throws IOException {
-        return getResourceImpl(resPath, false);
+    private void createTableIfNeeded(final String tableName) throws SQLException, IOException {
+        JDBCResourceSQL sqls = getJDBCResourceSQL(tableName);
+        executeSql(new SqlOperation() {
+            @Override
+            public void execute(Connection connection) throws SQLException {
+                if (checkTableExists(tableName, connection)) {
+                    logger.info("Table [{}] already exists", tableName);
+                    return;
+                }
+
+                String createIfNeededSql = sqls.getCreateIfNeededSql(tableName);
+                logger.info("Creating table: {}", createIfNeededSql);
+                pstat = connection.prepareStatement(createIfNeededSql);
+                pstat.executeUpdate();
+
+                try {
+                    String indexName = "IDX_" + META_TABLE_TS;
+                    String createIndexSql = sqls.getCreateIndexSql(indexName, tableName, META_TABLE_TS);
+                    logger.info("Creating index: {}", createIndexSql);
+                    pstat = connection.prepareStatement(createIndexSql);
+                    pstat.executeUpdate();
+                } catch (SQLException ex) {
+                    logger.error("Failed to create index on " + META_TABLE_TS, ex);
+                }
+            }
+
+            private boolean checkTableExists(final String tableName, final Connection connection) throws SQLException {
+                PreparedStatement ps = null;
+                ResultSet rs = null;
+                try {
+                    String checkTableExistsSql = sqls.getCheckTableExistsSql(tableName);
+                    ps = connection.prepareStatement(checkTableExistsSql);
+                    rs = ps.executeQuery();
+                    while (rs.next()) {
+                        // use equalsIgnoreCase() as some RDBMS is case insensitive
+                        if (tableName.equalsIgnoreCase(rs.getString(1))) {
+                            return true;
+                        }
+                    }
+                } finally {
+                    DBUtils.closeQuietly(rs);
+                    DBUtils.closeQuietly(ps);
+                }
+
+                return false;
+            }
+        });
+    }
+
+    public long getQueriedSqlNum() {
+        return queriedSqlNum;
+    }
+
+    public void close() {
+        connectionManager.close();
     }
 
-    protected RawResource getResourceImpl(String resPath, final boolean isAllowBroken) throws IOException {
+    private boolean isJsonMetadata(String resourcePath) {
+        String trim = resourcePath.trim();
+        return trim.endsWith(".json") || trim.startsWith(ResourceStore.EXECUTE_RESOURCE_ROOT)
+                || trim.startsWith(ResourceStore.EXECUTE_OUTPUT_RESOURCE_ROOT);
+    }
+
+    @Override
+    protected void visitFolderImpl(final String folderPath, final boolean recursive, final VisitFilter filter,
+                                   final boolean loadContent, final Visitor visitor) throws IOException {
+
         try {
-            JDBCResource resource = resourceDAO.getResource(resPath, true, true, isAllowBroken);
-            if (resource != null)
-                return new RawResource(resource.getContent(), resource.getTimestamp());
-            else
-                return null;
+            executeSql(new SqlOperation() {
+                @Override
+                public void execute(Connection connection) throws SQLException {
+                    String folderPrefix = folderPath.endsWith("/") ? folderPath : folderPath + "/";
+                    String lookForPrefix = folderPrefix;
+                    if (filter.hasPathPrefixFilter()) {
+                        Preconditions.checkArgument(filter.pathPrefix.startsWith(folderPrefix));
+                        lookForPrefix = filter.pathPrefix;
+                    }
+
+                    if (isRootPath(folderPath)){
+                        for (int i=0; i<tableNames.length; i++){
+                            final String tableName = tableNames[i];
+                            JDBCResourceSQL sqls = getJDBCResourceSQL(tableName);
+                            String sql = sqls.getAllResourceSqlString(loadContent);
+                            pstat = connection.prepareStatement(sql);
+                            // '_' is LIKE wild char, need escape
+                            pstat.setString(1, lookForPrefix.replace("_", "#_") + "%");
+                            pstat.setLong(2, filter.lastModStart);
+                            pstat.setLong(3, filter.lastModEndExclusive);
+                            rs = pstat.executeQuery();
+                            while (rs.next()) {
+                                String resPath = rs.getString(META_TABLE_KEY);
+                                if (resPath.equals(folderPath))
+                                    continue; // the folder itself exists as a resource? ignore..
+
+                                if (recursive || isDirectChild(folderPrefix, resPath)) {
+                                    RawResource raw = rawResource(rs, loadContent, true);
+                                    try {
+                                        visitor.visit(raw);
+                                    } catch (IOException e) {
+                                        throw new RuntimeException(e);
+                                    } finally {
+                                        raw.close();
+                                    }
+                                }
+                            }
+                        }
+                    }else{
+                        JDBCResourceSQL sqls = getJDBCResourceSQL(getMetaTableName(folderPath));
+                        String sql = sqls.getAllResourceSqlString(loadContent);
+                        pstat = connection.prepareStatement(sql);
+                        // '_' is LIKE wild char, need escape
+                        pstat.setString(1, lookForPrefix.replace("_", "#_") + "%");
+                        pstat.setLong(2, filter.lastModStart);
+                        pstat.setLong(3, filter.lastModEndExclusive);
+                        rs = pstat.executeQuery();
+                        while (rs.next()) {
+                            String resPath = rs.getString(META_TABLE_KEY);
+                            if (resPath.equals(folderPath))
+                                continue; // the folder itself exists as a resource? ignore..
+
+                            if (recursive || isDirectChild(folderPrefix, resPath)) {
+                                RawResource raw = rawResource(rs, loadContent, true);
+                                try {
+                                    visitor.visit(raw);
+                                } catch (IOException e) {
+                                    throw new RuntimeException(e);
+                                } finally {
+                                    raw.close();
+                                }
+                            }
+                        }
+                    }
+                }
+            });
         } catch (SQLException e) {
             throw new IOException(e);
         }
     }
 
+    private boolean isDirectChild(String folderPrefix, String resPath) {
+        assert resPath.startsWith(folderPrefix);
+        int cut = resPath.indexOf('/', folderPrefix.length());
+        return (cut < 0);
+    }
+
     @Override
-    protected long getResourceTimestampImpl(String resPath) throws IOException {
+    protected boolean existsImpl(String resPath) throws IOException {
         try {
-            JDBCResource resource = resourceDAO.getResource(resPath, false, true);
-            if (resource != null) {
-                return resource.getTimestamp();
-            } else {
-                return 0L;
-            }
+            RawResource resource = getResourceInteral(resPath, false, false);
+            return (resource != null);
         } catch (SQLException e) {
             throw new IOException(e);
         }
     }
 
     @Override
-    protected NavigableSet<String> listResourcesImpl(String folderPath, boolean recursive) throws IOException {
+    protected RawResource getResourceImpl(String resPath) throws IOException {
         try {
-            final NavigableSet<String> result = resourceDAO.listAllResource(makeFolderPath(folderPath), recursive);
-            return result.isEmpty() ? null : result;
+            return getResourceInteral(resPath, true, true);
         } catch (SQLException e) {
             throw new IOException(e);
         }
     }
 
-    @Override
-    protected List<RawResource> getAllResourcesImpl(String folderPath, long timeStart, long timeEndExclusive)
-            throws IOException {
-        return getAllResourcesImpl(folderPath, timeStart, timeEndExclusive, false);
+    RawResource getResourceInteral(final String resourcePath, final boolean fetchContent, final boolean fetchTimestamp)
+            throws SQLException, IOException {
+        logger.trace("getResource method. resourcePath : {} , fetchConetent : {} , fetch TS : {}", resourcePath,
+                fetchContent, fetchTimestamp);
+
+        final RawResource[] holder = new RawResource[1];
+
+        JDBCResourceSQL sqls = getJDBCResourceSQL(getMetaTableName(resourcePath));
+        executeSql(new SqlOperation() {
+            @Override
+            public void execute(Connection connection) throws SQLException {
+                pstat = connection.prepareStatement(sqls.getKeyEqualSqlString(fetchContent, fetchTimestamp));
+                pstat.setString(1, resourcePath);
+                rs = pstat.executeQuery();
+                if (rs.next()) {
+                    holder[0] = rawResource(rs, fetchContent, fetchTimestamp);
+                }
+            }
+        });
+
+        return holder[0];
+    }
+
+    private RawResource rawResource(ResultSet rs, boolean fetchContent, boolean fetchTime) throws SQLException {
+        String path = rs.getString(META_TABLE_KEY);
+        long ts = fetchTime ? rs.getLong(META_TABLE_TS) : -1;
+
+        if (fetchContent) {
+            try {
+                return new RawResource(path, ts, getInputStream(path, rs));
+            } catch (IOException e) {
+                return new RawResource(path, ts, e); // let the caller handle broken content
+            } catch (SQLException e) {
+                return new RawResource(path, ts, new IOException(e)); // let the caller handle broken content
+            }
+        } else {
+            return new RawResource(path, ts);
+        }
+    }
+
+    private InputStream getInputStream(String resPath, ResultSet rs) throws SQLException, IOException {
+        if (rs == null) {
+            return null;
+        }
+
+        Blob blob = rs.getBlob(META_TABLE_CONTENT);
+
+        if (blob == null || blob.length() == 0) {
+            return openPushdown(resPath); // empty bytes is pushdown indicator
+        } else {
+            return blob.getBinaryStream();
+        }
     }
 
     @Override
-    protected List<RawResource> getAllResourcesImpl(String folderPath, long timeStart, long timeEndExclusive,
-            final boolean isAllowBroken) throws IOException {
-        final List<RawResource> result = Lists.newArrayList();
+    protected long getResourceTimestampImpl(String resPath) throws IOException {
         try {
-            List<JDBCResource> allResource = resourceDAO.getAllResource(makeFolderPath(folderPath), timeStart,
-                    timeEndExclusive, isAllowBroken);
-            for (JDBCResource resource : allResource) {
-                result.add(new RawResource(resource.getContent(), resource.getTimestamp()));
-            }
-            return result;
+            RawResource resource = getResourceInteral(resPath, false, true);
+            return resource == null ? 0 : resource.lastModified();
         } catch (SQLException e) {
-            for (RawResource rawResource : result) {
-                IOUtils.closeQuietly(rawResource.inputStream);
-            }
             throw new IOException(e);
         }
     }
 
     @Override
-    protected void putResourceImpl(String resPath, InputStream content, long ts) throws IOException {
+    protected void putSmallResource(String resPath, ContentWriter content, long ts) throws IOException {
         try {
-            JDBCResource resource = new JDBCResource(resPath, ts, content);
-            resourceDAO.putResource(resource);
+            putResourceInternal(resPath, content, ts);
         } catch (SQLException e) {
             throw new IOException(e);
         }
     }
 
+    void putResourceInternal(final String resPath, final ContentWriter content, final long ts)
+            throws SQLException, IOException {
+        executeSql(new SqlOperation() {
+            @Override
+            public void execute(Connection connection) throws SQLException, IOException {
+                byte[] bytes = content.extractAllBytes();
+                synchronized (resPath.intern()) {
+                    JDBCResourceSQL sqls = getJDBCResourceSQL(getMetaTableName(resPath));
+                    boolean existing = existsImpl(resPath);
+                    if (existing) {
+                        pstat = connection.prepareStatement(sqls.getReplaceSql());
+                        pstat.setLong(1, ts);
+                        pstat.setBlob(2, new BufferedInputStream(new ByteArrayInputStream(bytes)));
+                        pstat.setString(3, resPath);
+                    } else {
+                        pstat = connection.prepareStatement(sqls.getInsertSql());
+                        pstat.setString(1, resPath);
+                        pstat.setLong(2, ts);
+                        pstat.setBlob(3, new BufferedInputStream(new ByteArrayInputStream(bytes)));
+                    }
+
+                    if (isContentOverflow(bytes, resPath)) {
+                        logger.debug("Overflow! resource path: {}, content size: {}, timeStamp: {}", resPath,
+                                bytes.length, ts);
+                        if (existing) {
+                            pstat.setNull(2, Types.BLOB);
+                        } else {
+                            pstat.setNull(3, Types.BLOB);
+                        }
+
+                        RollbackablePushdown pushdown = writePushdown(resPath, ContentWriter.create(bytes));
+                        try {
+                            int result = pstat.executeUpdate();
+                            if (result != 1)
+                                throw new SQLException();
+                        } catch (Throwable ex) {
+                            pushdown.rollback();
+                            throw ex;
+                        } finally {
+                            pushdown.close();
+                        }
+                    } else {
+                        pstat.executeUpdate();
+                    }
+                }
+            }
+        });
+    }
+
+    private boolean isContentOverflow(byte[] content, String resPath) throws SQLException {
+        if (kylinConfig.isJsonAlwaysSmallCell() && isJsonMetadata(resPath)) {
+
+            int smallCellMetadataWarningThreshold = kylinConfig.getSmallCellMetadataWarningThreshold();
+            int smallCellMetadataErrorThreshold = kylinConfig.getSmallCellMetadataErrorThreshold();
+
+            if (content.length > smallCellMetadataWarningThreshold) {
+                logger.warn(
+                        "A JSON metadata entry's size is not supposed to exceed kap.metadata.jdbc.small-cell-meta-size-warning-threshold("
+                                + smallCellMetadataWarningThreshold + "), resPath: " + resPath + ", actual size: "
+                                + content.length);
+            }
+            if (content.length > smallCellMetadataErrorThreshold) {
+                throw new SQLException(new IllegalArgumentException(
+                        "A JSON metadata entry's size is not supposed to exceed kap.metadata.jdbc.small-cell-meta-size-error-threshold("
+                                + smallCellMetadataErrorThreshold + "), resPath: " + resPath + ", actual size: "
+                                + content.length));
+            }
+
+            return false;
+        }
+
+        int maxSize = kylinConfig.getJdbcResourceStoreMaxCellSize();
+        if (content.length > maxSize)
+            return true;
+        else
+            return false;
+    }
+
     @Override
     protected long checkAndPutResourceImpl(String resPath, byte[] content, long oldTS, long newTS)
             throws IOException, WriteConflictException {
         try {
-            resourceDAO.checkAndPutResource(resPath, content, oldTS, newTS);
+            checkAndPutResourceInternal(resPath, content, oldTS, newTS);
             return newTS;
         } catch (SQLException e) {
             throw new IOException(e);
         }
     }
 
+    void checkAndPutResourceInternal(final String resPath, final byte[] content, final long oldTS, final long newTS)
+            throws SQLException, IOException, WriteConflictException {
+        logger.trace(
+                "execute checkAndPutResource method. resPath : {} , oldTs : {} , newTs : {} , content null ? : {} ",
+                resPath, oldTS, newTS, content == null);
+        executeSql(new SqlOperation() {
+            @Override
+            public void execute(Connection connection) throws SQLException, IOException {
+                synchronized (resPath.intern()) {
+                    JDBCResourceSQL sqls = getJDBCResourceSQL(getMetaTableName(resPath));
+                    if (!existsImpl(resPath)) {
+                        if (oldTS != 0) {
+                            throw new IllegalStateException(
+                                    "For not exist file. OldTS have to be 0. but Actual oldTS is : " + oldTS);
+                        }
+                        if (isContentOverflow(content, resPath)) {
+                            logger.debug("Overflow! resource path: {}, content size: {}", resPath, content.length);
+                            pstat = connection.prepareStatement(sqls.getInsertSqlWithoutContent());
+                            pstat.setString(1, resPath);
+                            pstat.setLong(2, newTS);
+                            RollbackablePushdown pushdown = writePushdown(resPath, ContentWriter.create(content));
+                            try {
+                                int result = pstat.executeUpdate();
+                                if (result != 1)
+                                    throw new SQLException();
+                            } catch (Throwable e) {
+                                pushdown.rollback();
+                                throw e;
+                            } finally {
+                                pushdown.close();
+                            }
+                        } else {
+                            pstat = connection.prepareStatement(sqls.getInsertSql());
+                            pstat.setString(1, resPath);
+                            pstat.setLong(2, newTS);
+                            pstat.setBlob(3, new BufferedInputStream(new ByteArrayInputStream(content)));
+                            pstat.executeUpdate();
+                        }
+                    } else {
+                        // Note the checkAndPut trick:
+                        // update {0} set {1}=? where {2}=? and {3}=?
+                        pstat = connection.prepareStatement(sqls.getUpdateSqlWithoutContent());
+                        pstat.setLong(1, newTS);
+                        pstat.setString(2, resPath);
+                        pstat.setLong(3, oldTS);
+                        int result = pstat.executeUpdate();
+                        if (result != 1) {
+                            long realTime = getResourceTimestamp(resPath);
+                            throw new WriteConflictException("Overwriting conflict " + resPath + ", expect old TS "
+                                    + oldTS + ", but it is " + realTime);
+                        }
+                        PreparedStatement pstat2 = null;
+                        try {
+                            // "update {0} set {1}=? where {3}=?"
+                            pstat2 = connection.prepareStatement(sqls.getUpdateContentSql());
+                            if (isContentOverflow(content, resPath)) {
+                                logger.debug("Overflow! resource path: {}, content size: {}", resPath, content.length);
+                                pstat2.setNull(1, Types.BLOB);
+                                pstat2.setString(2, resPath);
+                                RollbackablePushdown pushdown = writePushdown(resPath, ContentWriter.create(content));
+                                try {
+                                    int result2 = pstat2.executeUpdate();
+                                    if (result2 != 1)
+                                        throw new SQLException();
+                                } catch (Throwable e) {
+                                    pushdown.rollback();
+                                    throw e;
+                                } finally {
+                                    pushdown.close();
+                                }
+                            } else {
+                                pstat2.setBinaryStream(1,
+                                        new BufferedInputStream(new ByteArrayInputStream(content)));
+                                pstat2.setString(2, resPath);
+                                pstat2.executeUpdate();
+                            }
+                        } finally {
+                            JDBCConnectionManager.closeQuietly(pstat2);
+                        }
+                    }
+                }
+            }
+        });
+    }
+
     @Override
-    protected void deleteResourceImpl(String resPath) throws IOException {
+    protected void deleteResourceImpl(final String resPath) throws IOException {
         try {
-            resourceDAO.deleteResource(resPath);
+            boolean skipHdfs = isJsonMetadata(resPath);
+
+            JDBCResourceSQL sqls = getJDBCResourceSQL(getMetaTableName(resPath));
+            executeSql(new SqlOperation() {
+                @Override
+                public void execute(Connection connection) throws SQLException {
+                    pstat = connection.prepareStatement(sqls.getDeletePstatSql());
+                    pstat.setString(1, resPath);
+                    pstat.executeUpdate();
+                }
+            });
+
+            if (!skipHdfs) {
+                try {
+                    deletePushdown(resPath);
+                } catch (Throwable e) {
+                    throw new SQLException(e);
+                }
+            }
         } catch (SQLException e) {
             throw new IOException(e);
         }
@@ -157,21 +549,69 @@ protected String getReadableResourcePathImpl(String resPath) {
         return metadataIdentifier + "(key='" + resPath + "')@" + kylinConfig.getMetadataUrl();
     }
 
-    private String makeFolderPath(String folderPath) {
-        Preconditions.checkState(folderPath.startsWith("/"));
-        String lookForPrefix = folderPath.endsWith("/") ? folderPath : folderPath + "/";
-        return lookForPrefix;
+    @Override
+    protected String pushdownRootPath() {
+        String metastoreBigCellHdfsDirectory = kylinConfig.getMetastoreBigCellHdfsDirectory();
+        if (metastoreBigCellHdfsDirectory.endsWith("/"))
+            return metastoreBigCellHdfsDirectory + "resources-jdbc";
+        else
+            return metastoreBigCellHdfsDirectory + "/" + "resources-jdbc";
     }
 
-    protected JDBCResourceDAO getResourceDAO() {
-        return resourceDAO;
+    // visible for test
+    @Override
+    protected FileSystem pushdownFS() {
+        return super.pushdownFS();
     }
 
-    public long getQueriedSqlNum() {
-        return resourceDAO.getQueriedSqlNum();
+    @Override
+    protected boolean isUnreachableException(Throwable ex) {
+        if (super.isUnreachableException(ex)) {
+            return true;
+        }
+
+        if (ex instanceof SocketTimeoutException)
+            return true;
+
+        List<String> exceptionList = new ArrayList<>();
+        exceptionList.add(ex.getClass().getName());
+
+        Throwable t = ex.getCause();
+        int depth = 0;
+        while (t != null && depth < 5) {
+            exceptionList.add(t.getClass().getName());
+            depth++;
+            if (t instanceof ConnectException) {
+                return true;
+            }
+            t = t.getCause();
+        }
+
+        logger.trace("Not an unreachable exception with causes {}", exceptionList);
+        return false;
     }
 
-    public static void checkScheme(StorageURL url) {
-        Preconditions.checkState(JDBC_SCHEME.equals(url.getScheme()));
+    public String getMetaTableName(String resPath) {
+        if (isRootPath(resPath)) {
+            throw new IllegalArgumentException("Not supported");
+        }
+
+        if (resPath.startsWith(ResourceStore.BAD_QUERY_RESOURCE_ROOT)
+                || resPath.startsWith(ResourceStore.EXECUTE_OUTPUT_RESOURCE_ROOT)
+                || resPath.startsWith(ResourceStore.TEMP_STATMENT_RESOURCE_ROOT)) {
+            return tableNames[1];
+        } else {
+            return tableNames[0];
+        }
+    }
+
+    private JDBCResourceSQL getJDBCResourceSQL(String metaTableName) {
+        return new JDBCResourceSQL(kylinConfig.getMetadataDialect(), metaTableName, META_TABLE_KEY, META_TABLE_TS,
+                META_TABLE_CONTENT);
     }
+
+    public boolean isRootPath(String path) {
+        return "/".equals(path);
+    }
+
 }
\ No newline at end of file
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/PushdownResourceStore.java b/core-common/src/main/java/org/apache/kylin/common/persistence/PushdownResourceStore.java
new file mode 100644
index 0000000000..cdf5eb4e48
--- /dev/null
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/PushdownResourceStore.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.common.persistence;
+
+import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.kylin.common.KylinConfig;
+import org.apache.kylin.common.util.BytesUtil;
+import org.apache.kylin.common.util.HadoopUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A big resource may not fit in a store cell. When that happens, HDFS becomes a fallback storage.
+ *
+ * This class helps to pushdown big resource to HDFS.
+ * - An empty byte array is saved to ResourceStore as an indicator of pushdown.
+ * - The big resource is saved as HDFS file according to its resource path.
+ * - Method like checkAndPut() does not work on big resource like such, because HDFS lack of transaction support.
+ */
+abstract public class PushdownResourceStore extends ResourceStore {
+    private static final Logger logger = LoggerFactory.getLogger(HDFSResourceStore.class);
+
+    protected PushdownResourceStore(KylinConfig kylinConfig) {
+        super(kylinConfig);
+    }
+
+    final protected void putResourceImpl(String resPath, ContentWriter content, long ts) throws IOException {
+        if (content.isBigContent())
+            putBigResource(resPath, content, ts);
+        else
+            putSmallResource(resPath, content, ts);
+    }
+
+    abstract protected void putSmallResource(String resPath, ContentWriter content, long ts) throws IOException;
+
+    final void putBigResource(String resPath, ContentWriter content, long newTS) throws IOException {
+
+        // pushdown the big resource to DFS file
+        RollbackablePushdown pushdown = writePushdown(resPath, content);
+
+        try {
+            // write a marker in resource store, to indicate the resource is now available
+            logger.debug("Writing marker for big resource {}", resPath);
+            putResourceWithRetry(resPath, ContentWriter.create(BytesUtil.EMPTY_BYTE_ARRAY), newTS);
+
+        } catch (Throwable ex) {
+            pushdown.rollback();
+            throw ex;
+        } finally {
+            pushdown.close();
+        }
+    }
+
+    protected RollbackablePushdown writePushdown(String resPath, ContentWriter content) throws IOException {
+        return new RollbackablePushdown(resPath, content);
+    }
+
+    public class RollbackablePushdown implements AutoCloseable {
+        FileSystem fs;
+        Path tempPath;
+        Path realPath;
+        Path backPath;
+        boolean hasOldFile;
+        boolean hasRollback = false;
+
+        private RollbackablePushdown(String resPath, ContentWriter content) throws IOException {
+            int salt = System.identityHashCode(resPath) + System.identityHashCode(content);
+            tempPath = pushdownPath(resPath + ".temp." + salt);
+            realPath = pushdownPath(resPath);
+            backPath = pushdownPath(resPath + ".orig." + salt);
+            fs = pushdownFS();
+
+            if (fs.exists(tempPath))
+                fs.delete(tempPath, true);
+
+            logger.debug("Writing pushdown file {}", tempPath);
+            try (DataOutputStream out = fs.create(tempPath, true)) {
+                content.write(out);
+            } catch (IOException ex) {
+                close();
+                throw ex;
+            }
+
+            try {
+                hasOldFile = fs.exists(realPath);
+                if (hasOldFile) {
+                    logger.debug("Backup {} to {}", realPath, backPath);
+                    fs.rename(realPath, backPath);
+                }
+            } catch (IOException ex) {
+                close();
+                throw ex;
+            }
+
+            logger.debug("Move {} to {}", tempPath, realPath);
+            try {
+                fs.rename(tempPath, realPath);
+            } catch (IOException ex) {
+                rollback();
+                close();
+                throw ex;
+            }
+        }
+
+        public void rollback() {
+            if (hasRollback)
+                return;
+
+            hasRollback = true;
+
+            try {
+                logger.error("Rollback {} from {}", realPath, hasOldFile ? backPath.toString() : "<empty>");
+
+                if (fs.exists(realPath))
+                    fs.delete(realPath, true);
+
+                if (hasOldFile)
+                    fs.rename(backPath, realPath);
+
+            } catch (IOException ex2) {
+                logger.error("Rollback failed", ex2);
+            }
+        }
+
+        @Override
+        public void close() {
+            try {
+                if (fs.exists(tempPath))
+                    fs.delete(tempPath, true);
+            } catch (IOException e) {
+                logger.error("Error cleaning up " + tempPath, e);
+            }
+
+            try {
+                if (fs.exists(backPath))
+                    fs.delete(backPath, true);
+            } catch (IOException e) {
+                logger.error("Error cleaning up " + backPath, e);
+            }
+        }
+
+    }
+
+    protected InputStream openPushdown(String resPath) throws IOException {
+        try {
+            Path p = pushdownPath(resPath);
+            FileSystem fs = pushdownFS();
+            if (fs.exists(p))
+                return fs.open(p);
+            else
+                throw new FileNotFoundException(p.toString() + "  (FS: " + fs + ")");
+
+        } catch (Exception ex) {
+            throw new IOException("Failed to read big resource " + resPath, ex);
+        }
+    }
+
+    abstract protected String pushdownRootPath();
+
+    protected FileSystem pushdownFS() {
+        return HadoopUtil.getFileSystem(new Path(kylinConfig.getMetastoreBigCellHdfsDirectory()));
+    }
+
+    final protected Path pushdownPath(String resPath) {
+        Path p = new Path(pushdownRootPath() + resPath);
+        return Path.getPathWithoutSchemeAndAuthority(p);
+    }
+
+    protected void deletePushdown(String resPath) throws IOException {
+        deletePushdownFile(pushdownPath(resPath));
+    }
+
+    private void deletePushdownFile(Path path) throws IOException {
+        FileSystem fileSystem = pushdownFS();
+
+        if (fileSystem.exists(path)) {
+            fileSystem.delete(path, true);
+            logger.debug("Delete temp file success. Temp file: {} .", path);
+        } else {
+            logger.debug("{} is not exists in the file system.", path);
+        }
+    }
+}
\ No newline at end of file
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/RawResource.java b/core-common/src/main/java/org/apache/kylin/common/persistence/RawResource.java
index 6c155a23a5..932228151c 100644
--- a/core-common/src/main/java/org/apache/kylin/common/persistence/RawResource.java
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/RawResource.java
@@ -6,29 +6,68 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * 
+ *
  *     http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
-*/
+ */
 
 package org.apache.kylin.common.persistence;
 
+import java.io.IOException;
 import java.io.InputStream;
 
-/**
- */
-public class RawResource {
+import org.apache.commons.io.IOUtils;
+
+public class RawResource implements AutoCloseable {
 
-    public final InputStream inputStream;
-    public final long timestamp;
+    private final String path;
+    private final long lastModified;
+    private final InputStream content;
+
+    public RawResource(String path, long lastModified) {
+        this(path, lastModified, (InputStream) null);
+    }
+
+    public RawResource(String path, long lastModified, InputStream content) {
+        this.path = path;
+        this.lastModified = lastModified;
+        this.content = content;
+    }
+
+    public RawResource(String path, long lastModified, IOException brokenContentException) {
+        this(path, lastModified, wrap(brokenContentException));
+    }
+
+    private static InputStream wrap(final IOException brokenContentException) {
+        return new InputStream() {
+            @Override
+            public int read() throws IOException {
+                throw brokenContentException;
+            }
+        };
+    }
+
+    public String path() {
+        return path;
+    }
+
+    public long lastModified() {
+        return lastModified;
+    }
+
+    public InputStream content() {
+        return content;
+    }
 
-    public RawResource(InputStream resource, long timestamp) {
-        this.inputStream = resource;
-        this.timestamp = timestamp;
+    @Override
+    public void close() {
+        if (content != null) {
+            IOUtils.closeQuietly(content);
+        }
     }
 }
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/ResourceParallelCopier.java b/core-common/src/main/java/org/apache/kylin/common/persistence/ResourceParallelCopier.java
new file mode 100644
index 0000000000..93d0373ca9
--- /dev/null
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/ResourceParallelCopier.java
@@ -0,0 +1,359 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kylin.common.persistence;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.kylin.common.persistence.ResourceStore.VisitFilter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ResourceParallelCopier {
+
+    private static final Logger logger = LoggerFactory.getLogger(ResourceParallelCopier.class);
+
+    final private ResourceStore src;
+    final private ResourceStore dst;
+
+    private int threadCount = 5;
+    private int groupSize = 200;
+    private int heartBeatSec = 20;
+    private int retry = 2;
+
+    public ResourceParallelCopier(ResourceStore src, ResourceStore dst) {
+        this.src = src;
+        this.dst = dst;
+    }
+
+    public void setThreadCount(int threadCount) {
+        this.threadCount = threadCount;
+    }
+
+    public void setGroupSize(int groupSize) {
+        this.groupSize = groupSize;
+    }
+
+    public void setHeartBeatSec(int heartBeatSec) {
+        this.heartBeatSec = heartBeatSec;
+    }
+
+    public void setRetry(int retry) {
+        this.retry = retry;
+    }
+
+    public Stats copy(String folder, String[] includes, String[] excludes) throws IOException {
+        return copy(folder, includes, excludes, new Stats());
+    }
+
+    public Stats copy(String folder, String[] includes, String[] excludes, Stats stats) throws IOException {
+        logger.info("Copy {} from {} to {}", folder, src, dst);
+
+        TreeMap<String, Integer> groups = calculateGroupsToCopy(folder, includes, excludes);
+        if (groups == null || groups.isEmpty())
+            return stats;
+
+        copyGroups(groups, includes, excludes, stats);
+
+        while (stats.hasError() && retry > 0) {
+            retry--;
+
+            stats.onRetry(stats.errorResource.get());
+            copyGroups(collectErrorGroups(stats), includes, excludes, stats);
+        }
+
+        logger.info("Done copy {} from {} to {}", folder, src, dst);
+        return stats;
+    }
+
+    private void copyGroups(TreeMap<String, Integer> groups, String[] includes, String[] excludes, Stats stats) {
+        stats.onAllStart(groups);
+
+        // parallel copy all groups
+        ExecutorService exec = Executors.newFixedThreadPool(threadCount);
+        try {
+            doCopyParallel(exec, groups, includes, excludes, stats);
+        } finally {
+            // await all parallel copy is done
+            exec.shutdown();
+            stats.heartBeat();
+            while (!exec.isTerminated()) {
+                try {
+                    exec.awaitTermination(heartBeatSec, TimeUnit.SECONDS);
+                    stats.heartBeat();
+                } catch (InterruptedException e) {
+                    logger.error("interruped", e);
+                }
+            }
+        }
+
+        stats.onAllDone();
+    }
+
+    private TreeMap<String, Integer> calculateGroupsToCopy(String folder, String[] includes, String[] excludes)
+            throws IOException {
+        NavigableSet<String> all = src.listResourcesRecursively(folder);
+        if (all == null || all.isEmpty())
+            return null;
+
+        int sizeBeforeFilter = all.size();
+
+        for (Iterator<String> it = all.iterator(); it.hasNext(); ) {
+            String path = it.next();
+            if (!ResourceTool.matchFilter(path, includes, excludes)) {
+                it.remove();
+            }
+        }
+
+        int sizeAfterFilter = all.size();
+        logger.info("{} resources (out of {}) to copy", sizeAfterFilter, sizeBeforeFilter);
+
+        // returns a list of prefixes, each represents a group of resources
+        TreeMap<String, Integer> groupCollector = new TreeMap<>();
+        divideGroups(all, "/", groupCollector);
+        return groupCollector;
+    }
+
+    private TreeMap<String, Integer> collectErrorGroups(Stats stats) {
+        TreeMap<String, Integer> newGroups = new TreeMap<>();
+
+        for (String errGroup : stats.errorGroups) {
+            newGroups.put(errGroup, stats.allGroups.get(errGroup));
+        }
+        for (String errResPath : stats.errorResourcePaths) {
+            newGroups.put(errResPath, 1);
+        }
+
+        return newGroups;
+    }
+
+    void divideGroups(NavigableSet<String> resources, String prefixSoFar, TreeMap<String, Integer> groupCollector) {
+        if (resources.isEmpty())
+            return;
+        if (resources.size() <= groupSize) {
+            String group = longestCommonPrefix(resources, prefixSoFar);
+            groupCollector.put(group, resources.size());
+            return;
+        }
+
+        // the resources set is too big, divide it
+        TreeSet<String> newSet = new TreeSet<>();
+        String newPrefix = null;
+        int newPrefixLen = prefixSoFar.length() + 1;
+        for (String path : resources) {
+            String myPrefix = path.length() < newPrefixLen ? path : path.substring(0, newPrefixLen);
+            if (newPrefix != null && !myPrefix.equals(newPrefix)) {
+                // cut off last group
+                divideGroups(newSet, newPrefix, groupCollector);
+                newSet.clear();
+                newPrefix = null;
+            }
+
+            if (newPrefix == null)
+                newPrefix = myPrefix;
+
+            newSet.add(path);
+        }
+
+        // the last group
+        if (!newSet.isEmpty()) {
+            divideGroups(newSet, newPrefix, groupCollector);
+        }
+    }
+
+    String longestCommonPrefix(NavigableSet<String> strs, String prefixSoFar) {
+        // find minimal length
+        int minLen = Integer.MAX_VALUE;
+        for (String s : strs) {
+            minLen = Math.min(minLen, s.length());
+        }
+
+        for (int i = prefixSoFar.length(); i < minLen; i++) {
+            char c = strs.first().charAt(i);
+            for (String s : strs) {
+                if (s.charAt(i) != c)
+                    return s.substring(0, i);
+            }
+        }
+
+        return strs.first().substring(0, minLen);
+    }
+
+    private void doCopyParallel(ExecutorService exec, TreeMap<String, Integer> groups, final String[] includes,
+                                final String[] excludes, final Stats stats) {
+
+        for (final Map.Entry<String, Integer> entry : groups.entrySet()) {
+            exec.execute(new Runnable() {
+                @Override
+                public void run() {
+                    String group = entry.getKey();
+                    int expectResources = entry.getValue();
+
+                    stats.onGroupStart(group);
+                    try {
+                        int actualResources = copyGroup(group, includes, excludes, stats);
+                        stats.onGroupSuccess(group, expectResources, actualResources);
+                    } catch (Throwable ex) {
+                        stats.onGroupError(group, expectResources, ex);
+                    }
+                }
+            });
+        }
+    }
+
+    private int copyGroup(String group, final String[] includes, final String[] excludes, final Stats stats)
+            throws IOException {
+
+        int cut = group.lastIndexOf('/');
+        String folder = cut == 0 ? "/" : group.substring(0, cut);
+        final int[] count = new int[1];
+
+        src.visitFolderAndContent(folder, true, new VisitFilter(group), new ResourceStore.Visitor() {
+            @Override
+            public void visit(RawResource resource) {
+                String path = resource.path();
+                try {
+                    if (!ResourceTool.matchFilter(path, includes, excludes))
+                        return;
+
+                    count[0]++;
+                    stats.onResourceStart(path);
+                    long nBytes = dst.putResource(path, resource.content(), resource.lastModified());
+                    stats.onResourceSuccess(path, nBytes);
+                } catch (Exception ex) {
+                    stats.onResourceError(path, ex);
+                } finally {
+                    closeQuietly(resource);
+                }
+            }
+        });
+        return count[0];
+    }
+
+    private void closeQuietly(RawResource raw) {
+        try {
+            if (raw != null)
+                raw.close();
+        } catch (Exception e) {
+            // ignore
+        }
+    }
+
+    public static class Stats {
+
+        final public Map<String, Integer> allGroups = Collections.synchronizedMap(new TreeMap<String, Integer>());
+        final public Set<String> startedGroups = Collections.synchronizedSet(new TreeSet<String>());
+        final public Set<String> successGroups = Collections.synchronizedSet(new TreeSet<String>());
+        final public Set<String> errorGroups = Collections.synchronizedSet(new TreeSet<String>());
+
+        final public AtomicLong totalBytes = new AtomicLong();
+        final public AtomicInteger totalResource = new AtomicInteger();
+        final public AtomicInteger successResource = new AtomicInteger();
+        final public AtomicInteger errorResource = new AtomicInteger();
+        final public Set<String> errorResourcePaths = Collections.synchronizedSet(new TreeSet<String>());
+
+        public long createTime = System.nanoTime();
+        public long startTime;
+        public long endTime;
+
+        private void reset() {
+            startTime = endTime = 0;
+            allGroups.clear();
+            startedGroups.clear();
+            successGroups.clear();
+            errorGroups.clear();
+            totalBytes.set(0);
+            totalResource.set(0);
+            successResource.set(0);
+            errorResource.set(0);
+            errorResourcePaths.clear();
+        }
+
+        void onAllStart(TreeMap<String, Integer> groups) {
+            // retry enters here too, reset everything first
+            reset();
+
+            logger.debug("{} groups to copy in parallel", groups.size());
+            allGroups.putAll(groups);
+            startTime = System.nanoTime();
+        }
+
+        void onAllDone() {
+            endTime = System.nanoTime();
+        }
+
+        void onGroupStart(String group) {
+            logger.debug("Copying group {}*", group);
+            startedGroups.add(group);
+        }
+
+        void onGroupError(String group, int resourcesInGroup, Throwable ex) {
+            logger.error("Error copying group " + group, ex);
+            errorGroups.add(group);
+            errorResource.addAndGet(resourcesInGroup);
+        }
+
+        void onGroupSuccess(String group, int expectResources, int actualResources) {
+            successGroups.add(group);
+            if (actualResources != expectResources) {
+                logger.warn("Group {} expects {} resources but got {}", group, expectResources, actualResources);
+            }
+        }
+
+        void onResourceStart(String path) {
+            logger.trace("Copying {}", path);
+            totalResource.incrementAndGet();
+        }
+
+        void onResourceError(String path, Throwable ex) {
+            logger.error("Error copying " + path, ex);
+            errorResource.incrementAndGet();
+            errorResourcePaths.add(path);
+        }
+
+        void onResourceSuccess(String path, long nBytes) {
+            successResource.incrementAndGet();
+            totalBytes.addAndGet(nBytes);
+        }
+
+        void onRetry(int errorResourceCnt) {
+            // for progress printing
+        }
+
+        void heartBeat() {
+            // for progress printing
+        }
+
+        public boolean hasError() {
+            return errorResource.get() > 0;
+        }
+    }
+
+}
diff --git a/core-common/src/main/java/org/apache/kylin/common/persistence/ResourceStore.java b/core-common/src/main/java/org/apache/kylin/common/persistence/ResourceStore.java
index 9643e8dfe6..275d95a72c 100644
--- a/core-common/src/main/java/org/apache/kylin/common/persistence/ResourceStore.java
+++ b/core-common/src/main/java/org/apache/kylin/common/persistence/ResourceStore.java
@@ -18,30 +18,30 @@
 
 package org.apache.kylin.common.persistence;
 
-import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.Closeable;
-import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.NavigableSet;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.kylin.common.KylinConfig;
 import org.apache.kylin.common.StorageURL;
 import org.apache.kylin.common.util.ClassUtil;
 import org.apache.kylin.common.util.OptionsHelper;
-import org.apache.kylin.common.util.RandomUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -91,7 +91,8 @@ private static ResourceStore createResourceStore(KylinConfig kylinConfig) {
             Class<? extends ResourceStore> cls = ClassUtil.forName(clsName, ResourceStore.class);
             ResourceStore store = cls.getConstructor(KylinConfig.class).newInstance(kylinConfig);
             if (!store.exists(METASTORE_UUID_TAG)) {
-                store.putResource(METASTORE_UUID_TAG, new StringEntity(store.createMetaStoreUUID()), 0, StringEntity.serializer);
+                store.checkAndPutResource(METASTORE_UUID_TAG, new StringEntity(store.createMetaStoreUUID()), 0,
+                        StringEntity.serializer);
             }
             return store;
         } catch (Throwable e) {
@@ -120,42 +121,122 @@ public static ResourceStore getStore(KylinConfig kylinConfig) {
     protected ResourceStore(KylinConfig kylinConfig) {
         this.kylinConfig = kylinConfig;
     }
-    
+
     final public KylinConfig getConfig() {
         return kylinConfig;
     }
 
+    protected String createMetaStoreUUID() throws IOException {
+        return UUID.randomUUID().toString();
+    }
+
+    public String getMetaStoreUUID() throws IOException {
+        if (!exists(ResourceStore.METASTORE_UUID_TAG)) {
+            checkAndPutResource(ResourceStore.METASTORE_UUID_TAG, new StringEntity(createMetaStoreUUID()), 0,
+                    StringEntity.serializer);
+        }
+        StringEntity entity = getResource(ResourceStore.METASTORE_UUID_TAG, StringEntity.serializer);
+        return entity.toString();
+    }
+
     /**
-     * List resources and sub-folders under a given folder, return null if given path is not a folder
+     * Collect resources recursively under a folder, return empty list if folder does not exist
+     */
+    final public List<String> collectResourceRecursively(final String folderPath, final String suffix) throws IOException {
+        return new ExponentialBackoffRetry(this).doWithRetry(new Callable<List<String>>() {
+            @Override
+            public List<String> call() throws Exception {
+                final ArrayList<String> collector = Lists.newArrayList();
+                visitFolder(folderPath, true, new Visitor() {
+                    @Override
+                    public void visit(RawResource resource) {
+                        String path = resource.path();
+                        if (suffix == null || path.endsWith(suffix))
+                            collector.add(path);
+                    }
+                });
+                return collector;
+            }
+        });
+    }
+
+    /**
+     * List resources and sub-folders under a given folder, return null if folder does not exist or is empty
      */
     final public NavigableSet<String> listResources(String folderPath) throws IOException {
-        String path = norm(folderPath);
-        return listResourcesImpl(path, false);
+        return listResourcesImpl(norm(folderPath));
+    }
+
+    // sub-class may choose to override for better performance
+    protected NavigableSet<String> listResourcesImpl(String folderPath) throws IOException {
+        List<String> list = collectResourceRecursively(folderPath, null);
+        if (list.isEmpty())
+            return null;
+
+        TreeSet<String> result = new TreeSet();
+        String root = norm(folderPath);
+
+        for (String p : list) {
+            int cut = p.indexOf('/', root.length() + 1);
+            result.add(cut < 0 ? p : p.substring(0, cut));
+        }
+        return result;
     }
 
     /**
-     * List resources and its full path, only support HBase now.
+     * List resources recursively under a folder, return null if folder does not exist or is empty
      */
     final public NavigableSet<String> listResourcesRecursively(String folderPath) throws IOException {
-        String path = norm(folderPath);
-        return listResourcesImpl(path, true);
+        return listResourcesRecursivelyImpl(norm(folderPath));
+    }
+
+    // sub-class may choose to override
+    protected NavigableSet<String> listResourcesRecursivelyImpl(String folderPath) throws IOException {
+        List<String> list = collectResourceRecursively(folderPath, null);
+        if (list.isEmpty())
+            return null;
+        else
+            return new TreeSet<String>(list);
     }
 
     /**
-     * return null if given path is not a folder or not exists
+     * Read all resources under a folder. Return empty list if folder not exist.
+     *

  (This diff was longer than 20,000 lines, and has been truncated...)


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services