You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by mi...@apache.org on 2018/07/16 15:53:57 UTC
[4/5] impala git commit: IMPALA-7295: Remove
IMPALA_MINICLUSTER_PROFILE=2
IMPALA-7295: Remove IMPALA_MINICLUSTER_PROFILE=2
This patch removes the use of IMPALA_MINICLUSTER_PROFILE. The code that
uses IMPALA_MINICLUSTER_PROFILE=2 is removed and it defaults to code from
IMPALA_MINICLUSTER_PROFILE=3. In order to reduce having too many code
changes in this patch, there is no code change for the shims. The shims
for IMPALA_MINICLUSTER_PROFILE=3 automatically become the default
implementation.
Testing:
- Ran core and exhaustive tests
Change-Id: Iba4a81165b3d2012dc04d4115454372c41e39f08
Reviewed-on: http://gerrit.cloudera.org:8080/10940
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>
Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/a203733f
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/a203733f
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/a203733f
Branch: refs/heads/master
Commit: a203733fac3e1e37df8abeee39a88d187153a8c5
Parents: d366011
Author: Fredy Wijaya <fw...@cloudera.com>
Authored: Thu Jul 12 17:01:13 2018 -0700
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Sat Jul 14 01:03:18 2018 +0000
----------------------------------------------------------------------
bin/create-test-configuration.sh | 12 +-
bin/impala-config.sh | 102 +----
bin/jenkins/build-all-flag-combinations.sh | 16 +-
fe/pom.xml | 450 +++++++------------
.../service/rpc/thrift/TGetCatalogsReq.java | 24 -
.../hive/service/rpc/thrift/TGetColumnsReq.java | 24 -
.../service/rpc/thrift/TGetFunctionsReq.java | 25 --
.../hive/service/rpc/thrift/TGetInfoReq.java | 24 -
.../hive/service/rpc/thrift/TGetSchemasReq.java | 24 -
.../hive/service/rpc/thrift/TGetTablesReq.java | 24 -
.../authorization/SentryAuthProvider.java | 74 ---
.../java/org/apache/impala/compat/HdfsShim.java | 31 --
.../org/apache/impala/compat/MetastoreShim.java | 127 ------
.../impala/compat/MiniclusterProfile.java | 25 --
.../java/org/apache/impala/util/SentryUtil.java | 49 --
.../HBaseTestDataRegionAssignment.java | 139 ------
.../apache/impala/analysis/ParquetHelper.java | 341 --------------
.../authorization/ImpalaActionFactory.java | 57 ---
.../authorization/ImpalaPrivilegeModel.java | 43 --
.../authorization/SentryAuthProvider.java | 80 ----
.../java/org/apache/impala/compat/HdfsShim.java | 30 --
.../org/apache/impala/compat/MetastoreShim.java | 127 ------
.../impala/compat/MiniclusterProfile.java | 25 --
.../java/org/apache/impala/util/SentryUtil.java | 54 ---
.../authorization/ImpalaActionFactoryTest.java | 132 ------
.../HBaseTestDataRegionAssignment.java | 164 -------
.../apache/impala/analysis/ParquetHelper.java | 341 ++++++++++++++
.../authorization/ImpalaActionFactory.java | 57 +++
.../authorization/ImpalaPrivilegeModel.java | 43 ++
.../authorization/SentryAuthProvider.java | 80 ++++
.../java/org/apache/impala/compat/HdfsShim.java | 30 ++
.../org/apache/impala/compat/MetastoreShim.java | 127 ++++++
.../java/org/apache/impala/util/SentryUtil.java | 54 +++
.../impala/analysis/AuthorizationTest.java | 13 +-
.../authorization/ImpalaActionFactoryTest.java | 132 ++++++
.../apache/impala/common/FrontendTestBase.java | 18 +-
.../HBaseTestDataRegionAssignment.java | 164 +++++++
impala-parent/pom.xml | 49 +-
testdata/bin/run-hbase.sh | 4 +-
testdata/bin/run-hive-server.sh | 6 +-
testdata/bin/run-mini-dfs.sh | 3 -
.../common/etc/init.d/common.tmpl | 12 +-
.../common/etc/init.d/yarn-common | 14 +-
testdata/pom.xml | 21 -
.../queries/QueryTest/views-compatibility.test | 10 +-
tests/common/environ.py | 3 -
tests/metadata/test_views_compatibility.py | 3 -
tests/query_test/test_partitioning.py | 6 +-
48 files changed, 1264 insertions(+), 2149 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/bin/create-test-configuration.sh
----------------------------------------------------------------------
diff --git a/bin/create-test-configuration.sh b/bin/create-test-configuration.sh
index b6781c1..812154d 100755
--- a/bin/create-test-configuration.sh
+++ b/bin/create-test-configuration.sh
@@ -95,11 +95,7 @@ if [ $CREATE_METASTORE -eq 1 ]; then
# Hive schema SQL scripts include other scripts using \i, which expects absolute paths.
# Switch to the scripts directory to make this work.
pushd ${HIVE_HOME}/scripts/metastore/upgrade/postgres
- if [[ $IMPALA_MINICLUSTER_PROFILE == 2 ]]; then
- psql -q -U hiveuser -d ${METASTORE_DB} -f hive-schema-1.1.0.postgres.sql
- elif [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
- psql -q -U hiveuser -d ${METASTORE_DB} -f hive-schema-2.1.1.postgres.sql
- fi
+ psql -q -U hiveuser -d ${METASTORE_DB} -f hive-schema-2.1.1.postgres.sql
popd
# Increase the size limit of PARAM_VALUE from SERDE_PARAMS table to be able to create
# HBase tables with large number of columns.
@@ -164,11 +160,7 @@ fi
generate_config postgresql-hive-site.xml.template hive-site.xml
generate_config log4j.properties.template log4j.properties
-if [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
- generate_config hive-log4j2.properties.template hive-log4j2.properties
-else
- generate_config hive-log4j.properties.template hive-log4j.properties
-fi
+generate_config hive-log4j2.properties.template hive-log4j2.properties
generate_config hbase-site.xml.template hbase-site.xml
generate_config authz-policy.ini.template authz-policy.ini
generate_config sentry-site.xml.template sentry-site.xml
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/bin/impala-config.sh
----------------------------------------------------------------------
diff --git a/bin/impala-config.sh b/bin/impala-config.sh
index 771ca2b..0091cd3 100755
--- a/bin/impala-config.sh
+++ b/bin/impala-config.sh
@@ -157,67 +157,19 @@ fi
export IMPALA_KUDU_VERSION=a954418
unset IMPALA_KUDU_URL
-
-# Versions of Hadoop ecosystem dependencies.
-# ------------------------------------------
-# IMPALA_MINICLUSTER_PROFILE can have two values:
-# 2 represents:
-# Hadoop 2.6
-# HBase 1.2
-# Hive 1.1
-# Sentry 1.5
-# Parquet 1.5
-# Llama (used for Mini KDC) 1.0
-# 3 represents:
-# Hadoop 3.0
-# HBase 2.0
-# Hive 2.1
-# Sentry 2.0
-# Parquet 1.9
-#
-# Impala 3.x defaults to profile 3 and marks profile 2 deprecated,
-# so that it may be removed in the 3.x line.
-
-DEFAULT_MINICLUSTER_PROFILE=3
-: ${IMPALA_MINICLUSTER_PROFILE_OVERRIDE:=$DEFAULT_MINICLUSTER_PROFILE}
-
: ${CDH_DOWNLOAD_HOST:=native-toolchain.s3.amazonaws.com}
export CDH_DOWNLOAD_HOST
-
-if [[ $IMPALA_MINICLUSTER_PROFILE_OVERRIDE == 2 ]]; then
- echo "IMPALA_MINICLUSTER_PROFILE=2 is deprecated and may be removed in Impala 3.x"
-
- export IMPALA_MINICLUSTER_PROFILE=2
- export CDH_MAJOR_VERSION=5
- export CDH_BUILD_NUMBER=44
- export IMPALA_HADOOP_VERSION=2.6.0-cdh5.16.0-SNAPSHOT
- export IMPALA_HBASE_VERSION=1.2.0-cdh5.16.0-SNAPSHOT
- export IMPALA_HIVE_VERSION=1.1.0-cdh5.16.0-SNAPSHOT
- export IMPALA_SENTRY_VERSION=1.5.1-cdh5.16.0-SNAPSHOT
- export IMPALA_PARQUET_VERSION=1.5.0-cdh5.16.0-SNAPSHOT
- export IMPALA_LLAMA_MINIKDC_VERSION=1.0.0
- export IMPALA_KITE_VERSION=1.0.0-cdh5.16.0-SNAPSHOT
- # Kudu version used to identify Java client jar from maven
- export KUDU_JAVA_VERSION=1.8.0-cdh5.16.0-SNAPSHOT
- # IMPALA-6972: Temporarily disable Hive parallelism during dataload
- # The Hive version used for IMPALA_MINICLUSTER_PROFIILE=2 has a concurrency issue
- # that intermittent fails parallel dataload.
- export IMPALA_SERIAL_DATALOAD=1
-
-elif [[ $IMPALA_MINICLUSTER_PROFILE_OVERRIDE == 3 ]]; then
- export IMPALA_MINICLUSTER_PROFILE=3
- export CDH_MAJOR_VERSION=6
- export CDH_BUILD_NUMBER=422770
- export IMPALA_HADOOP_VERSION=3.0.0-cdh6.x-SNAPSHOT
- export IMPALA_HBASE_VERSION=2.0.0-cdh6.x-SNAPSHOT
- export IMPALA_HIVE_VERSION=2.1.1-cdh6.x-SNAPSHOT
- export IMPALA_SENTRY_VERSION=2.0.0-cdh6.x-SNAPSHOT
- export IMPALA_PARQUET_VERSION=1.9.0-cdh6.x-SNAPSHOT
- export IMPALA_AVRO_JAVA_VERSION=1.8.2-cdh6.x-SNAPSHOT
- export IMPALA_LLAMA_MINIKDC_VERSION=1.0.0
- export IMPALA_KITE_VERSION=1.0.0-cdh6.x-SNAPSHOT
- export KUDU_JAVA_VERSION=1.8.0-cdh6.x-SNAPSHOT
-fi
+export CDH_MAJOR_VERSION=6
+export CDH_BUILD_NUMBER=422770
+export IMPALA_HADOOP_VERSION=3.0.0-cdh6.x-SNAPSHOT
+export IMPALA_HBASE_VERSION=2.0.0-cdh6.x-SNAPSHOT
+export IMPALA_HIVE_VERSION=2.1.1-cdh6.x-SNAPSHOT
+export IMPALA_SENTRY_VERSION=2.0.0-cdh6.x-SNAPSHOT
+export IMPALA_PARQUET_VERSION=1.9.0-cdh6.x-SNAPSHOT
+export IMPALA_AVRO_JAVA_VERSION=1.8.2-cdh6.x-SNAPSHOT
+export IMPALA_LLAMA_MINIKDC_VERSION=1.0.0
+export IMPALA_KITE_VERSION=1.0.0-cdh6.x-SNAPSHOT
+export KUDU_JAVA_VERSION=1.8.0-cdh6.x-SNAPSHOT
unset IMPALA_HADOOP_URL
unset IMPALA_HBASE_URL
@@ -477,10 +429,6 @@ elif [ "${TARGET_FILESYSTEM}" = "local" ]; then
export FILESYSTEM_PREFIX="${LOCAL_FS}"
elif [ "${TARGET_FILESYSTEM}" = "hdfs" ]; then
if [[ "${ERASURE_CODING}" = true ]]; then
- if [[ "${IMPALA_MINICLUSTER_PROFILE}" -lt 3 ]]; then
- echo "Hadoop 3 is required for HDFS erasure coding."
- return 1
- fi
export HDFS_ERASURECODE_POLICY="RS-3-2-1024k"
export HDFS_ERASURECODE_PATH="/test-warehouse"
fi
@@ -548,14 +496,12 @@ export HADOOP_CLASSPATH="${HADOOP_CLASSPATH-}:${HADOOP_HOME}/share/hadoop/tools/
export LZO_JAR_PATH="$HADOOP_LZO/build/hadoop-lzo-0.4.15.jar"
HADOOP_CLASSPATH+=":$LZO_JAR_PATH"
-if [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
- # Beware of adding entries from $HADOOP_HOME here, because they can change
- # the order of the classpath, leading to configuration not showing up first.
- HADOOP_CLASSPATH="$LZO_JAR_PATH"
- # Add the path containing the hadoop-aws jar, which is required to access AWS from the
- # minicluster.
- HADOOP_CLASSPATH="${HADOOP_CLASSPATH}:${HADOOP_HOME}/share/hadoop/tools/lib/*"
-fi
+# Beware of adding entries from $HADOOP_HOME here, because they can change
+# the order of the classpath, leading to configuration not showing up first.
+HADOOP_CLASSPATH="$LZO_JAR_PATH"
+# Add the path containing the hadoop-aws jar, which is required to access AWS from the
+# minicluster.
+HADOOP_CLASSPATH="${HADOOP_CLASSPATH}:${HADOOP_HOME}/share/hadoop/tools/lib/*"
export MINI_DFS_BASE_DATA_DIR="$IMPALA_HOME/cdh-${CDH_MAJOR_VERSION}-hdfs-data"
export PATH="$HADOOP_HOME/bin:$PATH"
@@ -724,18 +670,16 @@ else
| sort | uniq`
fi
-if [[ $IMPALA_MINICLUSTER_PROFILE_OVERRIDE == 3 ]]; then
- # Check for minimum required Java version
- # Only issue Java version warning when running Java 7.
- if $JAVA -version 2>&1 | grep -q 'java version "1.7'; then
- cat << EOF
+# Check for minimum required Java version
+# Only issue Java version warning when running Java 7.
+if $JAVA -version 2>&1 | grep -q 'java version "1.7'; then
+ cat << EOF
WARNING: Your development environment is configured for Hadoop 3 and Java 7. Hadoop 3
requires at least Java 8. Your JAVA binary currently points to $JAVA
and reports the following version:
EOF
- $JAVA -version
- echo
- fi
+ $JAVA -version
+ echo
fi
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/bin/jenkins/build-all-flag-combinations.sh
----------------------------------------------------------------------
diff --git a/bin/jenkins/build-all-flag-combinations.sh b/bin/jenkins/build-all-flag-combinations.sh
index 841b668..e6dfc1c 100755
--- a/bin/jenkins/build-all-flag-combinations.sh
+++ b/bin/jenkins/build-all-flag-combinations.sh
@@ -29,32 +29,22 @@ trap 'echo Error in $0 at line $LINENO: $(cd "'$PWD'" && awk "NR == $LINENO" $0)
. bin/impala-config.sh
-# These are configurations for buildall, with a special sigil for
-# "minicluster profile" where appropriate.
+# These are configurations for buildall.
CONFIGS=(
# Test gcc builds with and without -so:
"-skiptests -noclean"
- "-skiptests -noclean -so -profile2"
"-skiptests -noclean -release"
"-skiptests -noclean -release -so -ninja"
# clang sanitizer builds:
"-skiptests -noclean -asan"
- "-skiptests -noclean -ubsan -so -ninja -profile2"
"-skiptests -noclean -tsan"
+ "-skiptests -noclean -ubsan -so -ninja"
)
FAILED=""
for CONFIG in "${CONFIGS[@]}"; do
- CONFIG2=${CONFIG/-profile2/}
- if [[ "$CONFIG" != "$CONFIG2" ]]; then
- CONFIG=$CONFIG2
- export IMPALA_MINICLUSTER_PROFILE_OVERRIDE=2
- else
- export IMPALA_MINICLUSTER_PROFILE_OVERRIDE=3
- fi
-
- DESCRIPTION="Options $CONFIG and profile $IMPALA_MINICLUSTER_PROFILE_OVERRIDE"
+ DESCRIPTION="Options $CONFIG"
if [[ $# == 1 && $1 == "--dryrun" ]]; then
echo $DESCRIPTION
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/pom.xml
----------------------------------------------------------------------
diff --git a/fe/pom.xml b/fe/pom.xml
index 7e73242..70ff9cc 100644
--- a/fe/pom.xml
+++ b/fe/pom.xml
@@ -186,6 +186,18 @@ under the License.
</dependency>
<dependency>
+ <groupId>org.apache.sentry</groupId>
+ <artifactId>sentry-policy-engine</artifactId>
+ <version>${sentry.version}</version>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.parquet</groupId>
+ <artifactId>parquet-hadoop-bundle</artifactId>
+ <version>${parquet.version}</version>
+ </dependency>
+
+ <dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>${hbase.version}</version>
@@ -258,6 +270,7 @@ under the License.
</exclusion>
</exclusions>
</dependency>
+
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-serde</artifactId>
@@ -272,6 +285,109 @@ under the License.
</dependency>
<dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-exec</artifactId>
+ <version>${hive.version}</version>
+ <exclusions>
+ <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+ <exclusion>
+ <groupId>org.apache.logging.log4j</groupId>
+ <artifactId>log4j-slf4j-impl</artifactId>
+ </exclusion>
+ <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
+ <exclusion>
+ <groupId>net.minidev</groupId>
+ <artifactId>json-smart</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-common</artifactId>
+ <version>${hive.version}</version>
+ <exclusions>
+ <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+ <exclusion>
+ <groupId>org.apache.logging.log4j</groupId>
+ <artifactId>log4j-slf4j-impl</artifactId>
+ </exclusion>
+ <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
+ <exclusion>
+ <groupId>net.minidev</groupId>
+ <artifactId>json-smart</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-jdbc</artifactId>
+ <version>${hive.version}</version>
+ <scope>test</scope>
+ <exclusions>
+ <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+ <exclusion>
+ <groupId>org.apache.logging.log4j</groupId>
+ <artifactId>log4j-slf4j-impl</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>net.minidev</groupId>
+ <artifactId>json-smart</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-hbase-handler</artifactId>
+ <version>${hive.version}</version>
+ <exclusions>
+ <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+ <exclusion>
+ <groupId>org.apache.logging.log4j</groupId>
+ <artifactId>log4j-slf4j-impl</artifactId>
+ </exclusion>
+ <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
+ <exclusion>
+ <groupId>net.minidev</groupId>
+ <artifactId>json-smart</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hive</groupId>
+ <artifactId>hive-metastore</artifactId>
+ <version>${hive.version}</version>
+ <exclusions>
+ <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+ <exclusion>
+ <groupId>org.apache.logging.log4j</groupId>
+ <artifactId>log4j-slf4j-impl</artifactId>
+ </exclusion>
+ <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
+ <exclusion>
+ <groupId>net.minidev</groupId>
+ <artifactId>json-smart</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
+ <groupId>org.apache.hive.shims</groupId>
+ <artifactId>hive-shims-common</artifactId>
+ <version>${hive.version}</version>
+ <exclusions>
+ <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+ <exclusion>
+ <groupId>org.apache.logging.log4j</groupId>
+ <artifactId>log4j-slf4j-impl</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+
+ <dependency>
<groupId>org.apache.kudu</groupId>
<artifactId>kudu-client</artifactId>
<version>${kudu.version}</version>
@@ -490,19 +606,6 @@ under the License.
-->
<source>${project.basedir}/generated-sources/gen-java</source>
<source>${project.build.directory}/generated-sources/cup</source>
- <source>${project.basedir}/src/compat-minicluster-profile-${env.IMPALA_MINICLUSTER_PROFILE}/java</source>
- </sources>
- </configuration>
- </execution>
- <execution>
- <id>add-test-source</id>
- <phase>generate-test-sources</phase>
- <goals>
- <goal>add-test-source</goal>
- </goals>
- <configuration>
- <sources>
- <source>${project.basedir}/src/compat-minicluster-profile-${env.IMPALA_MINICLUSTER_PROFILE}/test/java</source>
</sources>
</configuration>
</execution>
@@ -568,6 +671,46 @@ under the License.
</configuration>
</plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-enforcer-plugin</artifactId>
+ <version>3.0.0-M1</version>
+ <executions>
+ <execution>
+ <id>enforce-banned-dependencies</id>
+ <goals>
+ <goal>enforce</goal>
+ </goals>
+ <configuration>
+ <rules>
+ <bannedDependencies>
+ <excludes>
+ <exclude>org.apache.logging.log4j:log4j-slf4j-impl</exclude>
+ <!-- Assert that we only use artifacts from only the specified
+ version of these components. -->
+ <exclude>org.apache.hadoop:*</exclude>
+ <exclude>org.apache.hbase:*</exclude>
+ <exclude>org.apache.hive:*</exclude>
+ <exclude>org.apache.kudu:*</exclude>
+ <exclude>org.apache.sentry:*</exclude>
+ <exclude>org.apache.parquet:*</exclude>
+ </excludes>
+ <includes>
+ <include>org.apache.hadoop:*:${hadoop.version}</include>
+ <include>org.apache.hbase:*:${hbase.version}</include>
+ <include>org.apache.hive:*:${hive.version}</include>
+ <include>org.apache.kudu:*:${kudu.version}</include>
+ <include>org.apache.sentry:*:${sentry.version}</include>
+ <include>org.apache.parquet:*:${parquet.version}</include>
+ </includes>
+ </bannedDependencies>
+ </rules>
+ <fail>true</fail>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+
</plugins>
<pluginManagement>
<plugins>
@@ -730,270 +873,23 @@ under the License.
</plugins>
</build>
</profile>
+ </profiles>
- <profile>
- <id>impala-minicluster-profile-2</id>
- <activation>
- <property>
- <name>env.IMPALA_MINICLUSTER_PROFILE</name>
- <value>2</value>
- </property>
- </activation>
- <dependencies>
- <dependency>
- <groupId>org.apache.sentry</groupId>
- <artifactId>sentry-policy-db</artifactId>
- <version>${sentry.version}</version>
- </dependency>
- <dependency>
- <groupId>com.twitter</groupId>
- <artifactId>parquet-hadoop-bundle</artifactId>
- <version>${parquet.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-exec</artifactId>
- <version>${hive.version}</version>
- <exclusions>
- <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
- <exclusion>
- <groupId>net.minidev</groupId>
- <artifactId>json-smart</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-common</artifactId>
- <version>${hive.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-jdbc</artifactId>
- <version>${hive.version}</version>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-hbase-handler</artifactId>
- <version>${hive.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-metastore</artifactId>
- <version>${hive.version}</version>
- </dependency>
- </dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-antrun-plugin</artifactId>
- <version>1.8</version>
- <executions>
- <execution>
- <id>generate-minicluster-profile-2-compat-shims</id>
- <phase>generate-sources</phase>
- <goals>
- <goal>run</goal>
- </goals>
- <configuration>
- <!-- Generates ParquetHelper for minicluster profile 2. -->
- <target>
- <echo file="${project.build.directory}/gen-compat-shims.sh">
- echo Generating shims for Minicluster Profile 2
- mkdir -p generated-sources/gen-java/org/apache/impala/analysis
- sed -e s,org.apache.parquet,parquet,g src/compat-minicluster-profile-3/java/org/apache/impala/analysis/ParquetHelper.java > generated-sources/gen-java/org/apache/impala/analysis/ParquetHelper.java
- </echo>
- <exec executable="bash" dir="${project.basedir}" failonerror="true">
- <arg line="-ex ${project.build.directory}/gen-compat-shims.sh" />
- </exec>
- </target>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
+ <dependencyManagement>
+ <dependencies>
+ <!--
+ Pin org.glassfish:javax.el explicitly.
- </profile>
- <profile>
- <id>impala-minicluster-profile-3</id>
- <activation>
- <property>
- <name>env.IMPALA_MINICLUSTER_PROFILE</name>
- <value>3</value>
- </property>
- </activation>
- <dependencies>
- <dependency>
- <groupId>org.apache.sentry</groupId>
- <artifactId>sentry-policy-engine</artifactId>
- <version>${sentry.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.parquet</groupId>
- <artifactId>parquet-hadoop-bundle</artifactId>
- <version>${parquet.version}</version>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-exec</artifactId>
- <version>${hive.version}</version>
- <exclusions>
- <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
- <exclusion>
- <groupId>org.apache.logging.log4j</groupId>
- <artifactId>log4j-slf4j-impl</artifactId>
- </exclusion>
- <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
- <exclusion>
- <groupId>net.minidev</groupId>
- <artifactId>json-smart</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-common</artifactId>
- <version>${hive.version}</version>
- <exclusions>
- <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
- <exclusion>
- <groupId>org.apache.logging.log4j</groupId>
- <artifactId>log4j-slf4j-impl</artifactId>
- </exclusion>
- <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
- <exclusion>
- <groupId>net.minidev</groupId>
- <artifactId>json-smart</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-jdbc</artifactId>
- <version>${hive.version}</version>
- <scope>test</scope>
- <exclusions>
- <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
- <exclusion>
- <groupId>org.apache.logging.log4j</groupId>
- <artifactId>log4j-slf4j-impl</artifactId>
- </exclusion>
- <exclusion>
- <groupId>net.minidev</groupId>
- <artifactId>json-smart</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-hbase-handler</artifactId>
- <version>${hive.version}</version>
- <exclusions>
- <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
- <exclusion>
- <groupId>org.apache.logging.log4j</groupId>
- <artifactId>log4j-slf4j-impl</artifactId>
- </exclusion>
- <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
- <exclusion>
- <groupId>net.minidev</groupId>
- <artifactId>json-smart</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.hive</groupId>
- <artifactId>hive-metastore</artifactId>
- <version>${hive.version}</version>
- <exclusions>
- <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
- <exclusion>
- <groupId>org.apache.logging.log4j</groupId>
- <artifactId>log4j-slf4j-impl</artifactId>
- </exclusion>
- <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
- <exclusion>
- <groupId>net.minidev</groupId>
- <artifactId>json-smart</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.apache.hive.shims</groupId>
- <artifactId>hive-shims-common</artifactId>
- <version>${hive.version}</version>
- <exclusions>
- <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
- <exclusion>
- <groupId>org.apache.logging.log4j</groupId>
- <artifactId>log4j-slf4j-impl</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- </dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-enforcer-plugin</artifactId>
- <version>3.0.0-M1</version>
- <executions>
- <execution>
- <id>enforce-banned-dependencies</id>
- <goals>
- <goal>enforce</goal>
- </goals>
- <configuration>
- <rules>
- <bannedDependencies>
- <excludes>
- <exclude>org.apache.logging.log4j:log4j-slf4j-impl</exclude>
- <!-- Assert that we only use artifacts from only the specified
- version of these components. -->
- <exclude>org.apache.hadoop:*</exclude>
- <exclude>org.apache.hbase:*</exclude>
- <exclude>org.apache.hive:*</exclude>
- <exclude>org.apache.kudu:*</exclude>
- <exclude>org.apache.sentry:*</exclude>
- <exclude>org.apache.parquet:*</exclude>
- </excludes>
- <includes>
- <include>org.apache.hadoop:*:${hadoop.version}</include>
- <include>org.apache.hbase:*:${hbase.version}</include>
- <include>org.apache.hive:*:${hive.version}</include>
- <include>org.apache.kudu:*:${kudu.version}</include>
- <include>org.apache.sentry:*:${sentry.version}</include>
- <include>org.apache.parquet:*:${parquet.version}</include>
- </includes>
- </bannedDependencies>
- </rules>
- <fail>true</fail>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
- <dependencyManagement>
- <dependencies>
- <!--
- Pin org.glassfish:javax.el explicitly.
-
- HBase depends on this indirectly, and it's configured with
- a range of versions. This causes Maven to talk to all configured
- repositories, leading both to a lot of chattiness, and also
- failures if one of the repositories is unavailable.
- -->
- <dependency>
- <groupId>org.glassfish</groupId>
- <artifactId>javax.el</artifactId>
- <version>3.0.1-b08</version>
- </dependency>
- </dependencies>
- </dependencyManagement>
- </profile>
- </profiles>
+ HBase depends on this indirectly, and it's configured with
+ a range of versions. This causes Maven to talk to all configured
+ repositories, leading both to a lot of chattiness, and also
+ failures if one of the repositories is unavailable.
+ -->
+ <dependency>
+ <groupId>org.glassfish</groupId>
+ <artifactId>javax.el</artifactId>
+ <version>3.0.1-b08</version>
+ </dependency>
+ </dependencies>
+ </dependencyManagement>
</project>
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java
deleted file mode 100644
index 6a264bd..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetCatalogsReq extends org.apache.hive.service.cli.thrift.TGetCatalogsReq {}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java
deleted file mode 100644
index b35819a..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetColumnsReq extends org.apache.hive.service.cli.thrift.TGetColumnsReq {}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java
deleted file mode 100644
index 63424eb..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java
+++ /dev/null
@@ -1,25 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetFunctionsReq
- extends org.apache.hive.service.cli.thrift.TGetFunctionsReq {}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java
deleted file mode 100644
index 708134d..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetInfoReq extends org.apache.hive.service.cli.thrift.TGetInfoReq {}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java
deleted file mode 100644
index 3b6ec26..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetSchemasReq extends org.apache.hive.service.cli.thrift.TGetSchemasReq {}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java
deleted file mode 100644
index fd309d4..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetTablesReq extends org.apache.hive.service.cli.thrift.TGetTablesReq {}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/impala/authorization/SentryAuthProvider.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/authorization/SentryAuthProvider.java b/fe/src/compat-minicluster-profile-2/java/org/apache/impala/authorization/SentryAuthProvider.java
deleted file mode 100644
index 4793516..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/authorization/SentryAuthProvider.java
+++ /dev/null
@@ -1,74 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.authorization;
-
-import com.google.common.base.Preconditions;
-import org.apache.sentry.policy.db.SimpleDBPolicyEngine;
-import org.apache.commons.lang.reflect.ConstructorUtils;
-import org.apache.impala.catalog.AuthorizationPolicy;
-import org.apache.sentry.provider.file.SimpleFileProviderBackend;
-import org.apache.sentry.policy.common.PolicyEngine;
-import org.apache.sentry.provider.cache.SimpleCacheProviderBackend;
-import org.apache.sentry.provider.common.ProviderBackend;
-import org.apache.sentry.provider.common.ProviderBackendContext;
-import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
-
-/**
- * Wrapper to facilitate differences in Sentry APIs across
- * Sentry versions.
- */
-class SentryAuthProvider {
- /*
- * Creates a new ResourceAuthorizationProvider based on the given configuration.
- */
- static ResourceAuthorizationProvider createProvider(AuthorizationConfig config,
- AuthorizationPolicy policy) {
- try {
- ProviderBackend providerBe;
- // Create the appropriate backend provider.
- if (config.isFileBasedPolicy()) {
- providerBe = new SimpleFileProviderBackend(config.getSentryConfig().getConfig(),
- config.getPolicyFile());
- } else {
- // Note: The second parameter to the ProviderBackend is a "resourceFile" path
- // which is not used by Impala. We cannot pass 'null' so instead pass an empty
- // string.
- providerBe = new SimpleCacheProviderBackend(config.getSentryConfig().getConfig(),
- "");
- Preconditions.checkNotNull(policy);
- ProviderBackendContext context = new ProviderBackendContext();
- context.setBindingHandle(policy);
- providerBe.initialize(context);
- }
-
- SimpleDBPolicyEngine engine =
- new SimpleDBPolicyEngine(config.getServerName(), providerBe);
-
- // Try to create an instance of the specified policy provider class.
- // Re-throw any exceptions that are encountered.
- String policyFile = config.getPolicyFile() == null ? "" : config.getPolicyFile();
- return (ResourceAuthorizationProvider) ConstructorUtils.invokeConstructor(
- Class.forName(config.getPolicyProviderClassName()),
- new Object[] {policyFile, engine});
- } catch (Exception e) {
- // Re-throw as unchecked exception.
- throw new IllegalStateException(
- "Error creating ResourceAuthorizationProvider: ", e);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/HdfsShim.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/HdfsShim.java b/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/HdfsShim.java
deleted file mode 100644
index ef3da61..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/HdfsShim.java
+++ /dev/null
@@ -1,31 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-import org.apache.hadoop.fs.FileStatus;
-
-/**
- * Wrapper classes to abstract away differences between HDFS versions in
- * the MiniCluster profiles.
- */
-public class HdfsShim {
- public static boolean isErasureCoded(FileStatus fileStatus) {
- // Hadoop 2 didn't support Erasure Coding
- return false;
- }
-}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MetastoreShim.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MetastoreShim.java b/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MetastoreShim.java
deleted file mode 100644
index d0cd351..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MetastoreShim.java
+++ /dev/null
@@ -1,127 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hive.service.cli.thrift.TGetColumnsReq;
-import org.apache.hive.service.cli.thrift.TGetFunctionsReq;
-import org.apache.hive.service.cli.thrift.TGetSchemasReq;
-import org.apache.hive.service.cli.thrift.TGetTablesReq;
-import org.apache.impala.authorization.User;
-import org.apache.impala.common.Pair;
-import org.apache.impala.common.ImpalaException;
-import org.apache.impala.service.Frontend;
-import org.apache.impala.service.MetadataOp;
-import org.apache.impala.thrift.TMetadataOpRequest;
-import org.apache.impala.thrift.TResultSet;
-import org.apache.thrift.TException;
-
-/**
- * A wrapper around some of Hive's Metastore API's to abstract away differences
- * between major versions of Hive. This implements the shimmed methods for Hive 2.
- */
-public class MetastoreShim {
- /**
- * Wrapper around MetaStoreUtils.validateName() to deal with added arguments.
- */
- public static boolean validateName(String name) {
- return MetaStoreUtils.validateName(name);
- }
-
- /**
- * Wrapper around IMetaStoreClient.alter_partition() to deal with added
- * arguments.
- */
- public static void alterPartition(IMetaStoreClient client, Partition partition)
- throws InvalidOperationException, MetaException, TException {
- client.alter_partition(partition.getDbName(), partition.getTableName(), partition);
- }
-
- /**
- * Wrapper around IMetaStoreClient.alter_partitions() to deal with added
- * arguments.
- */
- public static void alterPartitions(IMetaStoreClient client, String dbName,
- String tableName, List<Partition> partitions)
- throws InvalidOperationException, MetaException, TException {
- client.alter_partitions(dbName, tableName, partitions);
- }
-
- /**
- * Wrapper around MetaStoreUtils.updatePartitionStatsFast() to deal with added
- * arguments.
- */
- public static void updatePartitionStatsFast(Partition partition, Warehouse warehouse)
- throws MetaException {
- MetaStoreUtils.updatePartitionStatsFast(partition, warehouse);
- }
-
- /**
- * Return the maximum number of Metastore objects that should be retrieved in
- * a batch.
- */
- public static String metastoreBatchRetrieveObjectsMaxConfigKey() {
- return HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX.toString();
- }
-
- /**
- * Return the key and value that should be set in the partition parameters to
- * mark that the stats were generated automatically by a stats task.
- */
- public static Pair<String, String> statsGeneratedViaStatsTaskParam() {
- return Pair.create(
- StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, StatsSetupConst.TRUE);
- }
-
- public static TResultSet execGetFunctions(
- Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
- TGetFunctionsReq req = request.getGet_functions_req();
- return MetadataOp.getFunctions(
- frontend, req.getCatalogName(), req.getSchemaName(), req.getFunctionName(), user);
- }
-
- public static TResultSet execGetColumns(
- Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
- TGetColumnsReq req = request.getGet_columns_req();
- return MetadataOp.getColumns(frontend, req.getCatalogName(), req.getSchemaName(),
- req.getTableName(), req.getColumnName(), user);
- }
-
- public static TResultSet execGetTables(
- Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
- TGetTablesReq req = request.getGet_tables_req();
- return MetadataOp.getTables(frontend, req.getCatalogName(), req.getSchemaName(),
- req.getTableName(), req.getTableTypes(), user);
- }
-
- public static TResultSet execGetSchemas(
- Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
- TGetSchemasReq req = request.getGet_schemas_req();
- return MetadataOp.getSchemas(
- frontend, req.getCatalogName(), req.getSchemaName(), user);
- }
-}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MiniclusterProfile.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MiniclusterProfile.java b/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MiniclusterProfile.java
deleted file mode 100644
index 330035e..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MiniclusterProfile.java
+++ /dev/null
@@ -1,25 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-/**
- * Constant to tell us what Minicluster Profile we are built against.
- */
-public class MiniclusterProfile {
- public static final int MINICLUSTER_PROFILE = 2;
-}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/impala/util/SentryUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/util/SentryUtil.java b/fe/src/compat-minicluster-profile-2/java/org/apache/impala/util/SentryUtil.java
deleted file mode 100644
index 5d2eadc..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/util/SentryUtil.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-package org.apache.impala.util;
-
-import java.util.Set;
-
-import org.apache.sentry.provider.db.SentryAccessDeniedException;
-import org.apache.sentry.provider.db.SentryAlreadyExistsException;
-import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient;
-import org.apache.sentry.provider.db.service.thrift.TSentryRole;
-import org.apache.sentry.SentryUserException;
-
-/**
- * Wrapper to facilitate differences in Sentry APIs across Sentry versions.
- */
-public class SentryUtil {
- static boolean isSentryAlreadyExists(Exception e) {
- return e instanceof SentryAlreadyExistsException;
- }
-
- static boolean isSentryAccessDenied(Exception e) {
- return e instanceof SentryAccessDeniedException;
- }
-
- public static boolean isSentryGroupNotFound(Exception e) {
- // Sentry 1.5 does not have this exception
- return false;
- }
-
- static Set<TSentryRole> listRoles(SentryPolicyServiceClient client, String username)
- throws SentryUserException {
- return client.listRoles(username);
- }
-}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java b/fe/src/compat-minicluster-profile-2/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
deleted file mode 100644
index f8c1ae9..0000000
--- a/fe/src/compat-minicluster-profile-2/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
+++ /dev/null
@@ -1,139 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.datagenerator;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.PairOfSameType;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.impala.planner.HBaseScanNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
-/**
- * Deterministically assigns regions to region servers.
- */
-public class HBaseTestDataRegionAssignment {
- public class TableNotFoundException extends Exception {
- public TableNotFoundException(String s) {
- super(s);
- }
- }
-
- private final static Logger LOG = LoggerFactory.getLogger(
- HBaseTestDataRegionAssignment.class);
- private final Configuration conf;
- private final HBaseAdmin hbaseAdmin;
- private final List<ServerName> sortedRS; // sorted list of region server name
- private final String[] splitPoints = { "1", "3", "5", "7", "9"};
-
- public HBaseTestDataRegionAssignment() throws IOException {
- conf = new Configuration();
- hbaseAdmin = new HBaseAdmin(conf);
- ClusterStatus clusterStatus = hbaseAdmin.getClusterStatus();
- Collection<ServerName> regionServerNames = clusterStatus.getServers();
- sortedRS = new ArrayList<ServerName>(regionServerNames);
- Collections.sort(sortedRS);
- }
-
- public void close() throws IOException {
- hbaseAdmin.close();
- }
-
- /**
- * The table comes in already split into regions specified by splitPoints and with data
- * already loaded. Pair up adjacent regions and assign to the same server.
- * Each region pair in ([unbound:1,1:3], [3:5,5:7], [7:9,9:unbound])
- * will be on the same server.
- */
- public void performAssignment(String tableName) throws IOException,
- InterruptedException, TableNotFoundException {
- HTableDescriptor[] desc = hbaseAdmin.listTables(tableName);
- if (desc == null || desc.length == 0) {
- throw new TableNotFoundException("Table " + tableName + " not found.");
- }
-
- // Sort the region by start key
- List<HRegionInfo> regions = hbaseAdmin.getTableRegions(tableName.getBytes());
- Preconditions.checkArgument(regions.size() == splitPoints.length + 1);
- Collections.sort(regions);
-
- // Pair up two adjacent regions to the same region server. That is,
- // region server 1 <- regions (unbound:1), (1:3)
- // region server 2 <- regions (3:5), (5:7)
- // region server 3 <- regions (7:9), (9:unbound)
- NavigableMap<HRegionInfo, ServerName> expectedLocs = Maps.newTreeMap();
- for (int i = 0; i < regions.size(); ++i) {
- HRegionInfo regionInfo = regions.get(i);
- int rsIdx = (i / 2) % sortedRS.size();
- ServerName regionServerName = sortedRS.get(rsIdx);
- hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
- regionServerName.getServerName().getBytes());
- expectedLocs.put(regionInfo, regionServerName);
- }
-
- // hbaseAdmin.move() is an asynchronous operation. HBase tests use sleep to wait for
- // the move to complete. It should be done in 10sec.
- int sleepCnt = 0;
- HTable hbaseTable = new HTable(conf, tableName);
- try {
- while(!expectedLocs.equals(hbaseTable.getRegionLocations()) &&
- sleepCnt < 100) {
- Thread.sleep(100);
- ++sleepCnt;
- }
- NavigableMap<HRegionInfo, ServerName> actualLocs = hbaseTable.getRegionLocations();
- Preconditions.checkArgument(expectedLocs.equals(actualLocs));
-
- // Log the actual region location map
- for (Map.Entry<HRegionInfo, ServerName> entry: actualLocs.entrySet()) {
- LOG.info(HBaseScanNode.printKey(entry.getKey().getStartKey()) + " -> " +
- entry.getValue().getHostAndPort());
- }
-
- // Force a major compaction such that the HBase table is backed by deterministic
- // physical artifacts (files, WAL, etc.). Our #rows estimate relies on the sizes of
- // these physical artifacts.
- LOG.info("Major compacting HBase table: " + tableName);
- hbaseAdmin.majorCompact(tableName);
- } finally {
- IOUtils.closeQuietly(hbaseTable);
- }
- }
-}
-
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/analysis/ParquetHelper.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/analysis/ParquetHelper.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/analysis/ParquetHelper.java
deleted file mode 100644
index 8c9bff8..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/analysis/ParquetHelper.java
+++ /dev/null
@@ -1,341 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.analysis;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.parquet.hadoop.metadata.ParquetMetadata;
-import org.apache.parquet.hadoop.ParquetFileReader;
-import org.apache.parquet.schema.OriginalType;
-import org.apache.parquet.schema.PrimitiveType;
-
-import org.apache.impala.catalog.ArrayType;
-import org.apache.impala.catalog.MapType;
-import org.apache.impala.catalog.ScalarType;
-import org.apache.impala.catalog.StructField;
-import org.apache.impala.catalog.StructType;
-import org.apache.impala.catalog.Type;
-import org.apache.impala.common.AnalysisException;
-import org.apache.impala.common.FileSystemUtil;
-
-/**
- * Provides extractParquetSchema() to extract a schema
- * from a parquet file.
- *
- * Because Parquet's Java package changed between Parquet 1.5
- * and 1.9, a second copy of this file, with "org.apache.parquet." replaced
- * with "org.apache.org.apache.parquet." is generated by the build system.
- */
-class ParquetHelper {
- private final static String ERROR_MSG =
- "Failed to convert Parquet type\n%s\nto an Impala %s type:\n%s\n";
-
- /**
- * Reads the first block from the given HDFS file and returns the Parquet schema.
- * Throws Analysis exception for any failure, such as failing to read the file
- * or failing to parse the contents.
- */
- private static org.apache.parquet.schema.MessageType loadParquetSchema(Path pathToFile)
- throws AnalysisException {
- try {
- FileSystem fs = pathToFile.getFileSystem(FileSystemUtil.getConfiguration());
- if (!fs.isFile(pathToFile)) {
- throw new AnalysisException("Cannot infer schema, path is not a file: " +
- pathToFile);
- }
- } catch (IOException e) {
- throw new AnalysisException("Failed to connect to filesystem:" + e);
- } catch (IllegalArgumentException e) {
- throw new AnalysisException(e.getMessage());
- }
- ParquetMetadata readFooter = null;
- try {
- readFooter = ParquetFileReader.readFooter(FileSystemUtil.getConfiguration(),
- pathToFile);
- } catch (FileNotFoundException e) {
- throw new AnalysisException("File not found: " + e);
- } catch (IOException e) {
- throw new AnalysisException("Failed to open file as a parquet file: " + e);
- } catch (RuntimeException e) {
- // Parquet throws a generic RuntimeException when reading a non-parquet file
- if (e.toString().contains("is not a Parquet file")) {
- throw new AnalysisException("File is not a parquet file: " + pathToFile);
- }
- // otherwise, who knows what we caught, throw it back up
- throw e;
- }
- return readFooter.getFileMetaData().getSchema();
- }
-
- /**
- * Converts a "primitive" Parquet type to an Impala type.
- * A primitive type is a non-nested type with no annotations.
- */
- private static Type convertPrimitiveParquetType(org.apache.parquet.schema.Type parquetType)
- throws AnalysisException {
- Preconditions.checkState(parquetType.isPrimitive());
- PrimitiveType prim = parquetType.asPrimitiveType();
- switch (prim.getPrimitiveTypeName()) {
- case BINARY: return Type.STRING;
- case BOOLEAN: return Type.BOOLEAN;
- case DOUBLE: return Type.DOUBLE;
- case FIXED_LEN_BYTE_ARRAY:
- throw new AnalysisException(
- "Unsupported parquet type FIXED_LEN_BYTE_ARRAY for field " +
- parquetType.getName());
- case FLOAT: return Type.FLOAT;
- case INT32: return Type.INT;
- case INT64: return Type.BIGINT;
- case INT96: return Type.TIMESTAMP;
- default:
- Preconditions.checkState(false, "Unexpected parquet primitive type: " +
- prim.getPrimitiveTypeName());
- return null;
- }
- }
-
- /**
- * Converts a Parquet group type to an Impala map Type. We support both standard
- * Parquet map representations, as well as legacy. Legacy representations are handled
- * according to this specification:
- * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules-1
- *
- * Standard representation of a map in Parquet:
- * <optional | required> group <name> (MAP) { <-- outerGroup is pointing at this
- * repeated group key_value {
- * required <key-type> key;
- * <optional | required> <value-type> value;
- * }
- * }
- */
- private static MapType convertMap(org.apache.parquet.schema.GroupType outerGroup)
- throws AnalysisException {
- if (outerGroup.getFieldCount() != 1){
- throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
- "MAP", "The logical MAP type must have exactly 1 inner field."));
- }
-
- org.apache.parquet.schema.Type innerField = outerGroup.getType(0);
- if (!innerField.isRepetition(org.apache.parquet.schema.Type.Repetition.REPEATED)){
- throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
- "MAP", "The logical MAP type must have a repeated inner field."));
- }
- if (innerField.isPrimitive()) {
- throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
- "MAP", "The inner field of the logical MAP type must be a group."));
- }
-
- org.apache.parquet.schema.GroupType innerGroup = innerField.asGroupType();
- // It does not matter whether innerGroup has an annotation or not (for example it may
- // be annotated with MAP_KEY_VALUE). We treat the case that innerGroup has an
- // annotation and the case the innerGroup does not have an annotation the same.
- if (innerGroup.getFieldCount() != 2) {
- throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
- "MAP", "The inner field of the logical MAP type must have exactly 2 fields."));
- }
-
- org.apache.parquet.schema.Type key = innerGroup.getType(0);
- if (!key.getName().equals("key")) {
- throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
- "MAP", "The name of the first field of the inner field of the logical MAP " +
- "type must be 'key'"));
- }
- if (!key.isPrimitive()) {
- throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
- "MAP", "The key type of the logical MAP type must be primitive."));
- }
- org.apache.parquet.schema.Type value = innerGroup.getType(1);
- if (!value.getName().equals("value")) {
- throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
- "MAP", "The name of the second field of the inner field of the logical MAP " +
- "type must be 'value'"));
- }
-
- return new MapType(convertParquetType(key), convertParquetType(value));
- }
-
- /**
- * Converts a Parquet group type to an Impala struct Type.
- */
- private static StructType convertStruct(org.apache.parquet.schema.GroupType outerGroup)
- throws AnalysisException {
- ArrayList<StructField> structFields = new ArrayList<StructField>();
- for (org.apache.parquet.schema.Type field: outerGroup.getFields()) {
- StructField f = new StructField(field.getName(), convertParquetType(field));
- structFields.add(f);
- }
- return new StructType(structFields);
- }
-
- /**
- * Converts a Parquet group type to an Impala array Type. We can handle the standard
- * representation, but also legacy representations for backwards compatibility.
- * Legacy representations are handled according to this specification:
- * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules
- *
- * Standard representation of an array in Parquet:
- * <optional | required> group <name> (LIST) { <-- outerGroup is pointing at this
- * repeated group list {
- * <optional | required> <element-type> element;
- * }
- * }
- */
- private static ArrayType convertArray(org.apache.parquet.schema.GroupType outerGroup)
- throws AnalysisException {
- if (outerGroup.getFieldCount() != 1) {
- throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
- "LIST", "The logical LIST type must have exactly 1 inner field."));
- }
-
- org.apache.parquet.schema.Type innerField = outerGroup.getType(0);
- if (!innerField.isRepetition(org.apache.parquet.schema.Type.Repetition.REPEATED)) {
- throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
- "LIST", "The inner field of the logical LIST type must be repeated."));
- }
- if (innerField.isPrimitive() || innerField.getOriginalType() != null) {
- // From the Parquet Spec:
- // 1. If the repeated field is not a group then it's type is the element type.
- //
- // If innerField is a group, but originalType is not null, the element type is
- // based on the logical type.
- return new ArrayType(convertParquetType(innerField));
- }
-
- org.apache.parquet.schema.GroupType innerGroup = innerField.asGroupType();
- if (innerGroup.getFieldCount() != 1) {
- // From the Parquet Spec:
- // 2. If the repeated field is a group with multiple fields, then it's type is a
- // struct.
- return new ArrayType(convertStruct(innerGroup));
- }
-
- return new ArrayType(convertParquetType(innerGroup.getType(0)));
- }
-
- /**
- * Converts a "logical" Parquet type to an Impala column type.
- * A Parquet type is considered logical when it has an annotation. The annotation is
- * stored as a "OriginalType". The Parquet documentation refers to these as logical
- * types, so we use that terminology here.
- */
- private static Type convertLogicalParquetType(org.apache.parquet.schema.Type parquetType)
- throws AnalysisException {
- OriginalType orig = parquetType.getOriginalType();
- if (orig == OriginalType.LIST) {
- return convertArray(parquetType.asGroupType());
- }
- if (orig == OriginalType.MAP || orig == OriginalType.MAP_KEY_VALUE) {
- // MAP_KEY_VALUE annotation should not be used any more. However, according to the
- // Parquet spec, some existing data incorrectly uses MAP_KEY_VALUE in place of MAP.
- // For backward-compatibility, a group annotated with MAP_KEY_VALUE that is not
- // contained by a MAP-annotated group should be handled as a MAP-annotated group.
- return convertMap(parquetType.asGroupType());
- }
-
- PrimitiveType prim = parquetType.asPrimitiveType();
- if (prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.BINARY &&
- (orig == OriginalType.UTF8 || orig == OriginalType.ENUM)) {
- // UTF8 is the type annotation Parquet uses for strings
- // ENUM is the type annotation Parquet uses to indicate that
- // the original data type, before conversion to parquet, had been enum.
- // Applications which do not have enumerated types (e.g. Impala)
- // should interpret it as a string.
- // We check to make sure it applies to BINARY to avoid errors if there is a bad
- // annotation.
- return Type.STRING;
- }
-
- if (prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.INT32
- || prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.INT64) {
- // Map signed integer types to an supported Impala column type
- switch (orig) {
- case INT_8: return Type.TINYINT;
- case INT_16: return Type.SMALLINT;
- case INT_32: return Type.INT;
- case INT_64: return Type.BIGINT;
- }
- }
-
- if (orig == OriginalType.DECIMAL) {
- return ScalarType.createDecimalType(prim.getDecimalMetadata().getPrecision(),
- prim.getDecimalMetadata().getScale());
- }
-
- throw new AnalysisException(
- "Unsupported logical parquet type " + orig + " (primitive type is " +
- prim.getPrimitiveTypeName().name() + ") for field " +
- parquetType.getName());
- }
-
- /**
- * Converts a Parquet type into an Impala type.
- */
- private static Type convertParquetType(org.apache.parquet.schema.Type field)
- throws AnalysisException {
- Type type = null;
- // TODO for 2.3: If a field is not annotated with LIST, it can still be sometimes
- // interpreted as an array. The following 2 examples should be interpreted as an array
- // of integers, but this is currently not done.
- // 1. repeated int int_col;
- // 2. required group int_arr {
- // repeated group list {
- // required int element;
- // }
- // }
- if (field.getOriginalType() != null) {
- type = convertLogicalParquetType(field);
- } else if (field.isPrimitive()) {
- type = convertPrimitiveParquetType(field);
- } else {
- // If field is not primitive, it must be a struct.
- type = convertStruct(field.asGroupType());
- }
- return type;
- }
-
- /**
- * Parses a Parquet file stored in HDFS and returns the corresponding Impala schema.
- * This fails with an analysis exception if any errors occur reading the file,
- * parsing the Parquet schema, or if the Parquet types cannot be represented in Impala.
- */
- static List<ColumnDef> extractParquetSchema(HdfsUri location)
- throws AnalysisException {
- org.apache.parquet.schema.MessageType parquetSchema = loadParquetSchema(location.getPath());
- List<org.apache.parquet.schema.Type> fields = parquetSchema.getFields();
- List<ColumnDef> schema = new ArrayList<ColumnDef>();
-
- for (org.apache.parquet.schema.Type field: fields) {
- Type type = convertParquetType(field);
- Preconditions.checkNotNull(type);
- String colName = field.getName();
- Map<ColumnDef.Option, Object> option = Maps.newHashMap();
- option.put(ColumnDef.Option.COMMENT, "Inferred from Parquet file.");
- schema.add(new ColumnDef(colName, new TypeDef(type), option));
- }
- return schema;
- }
-}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaActionFactory.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaActionFactory.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaActionFactory.java
deleted file mode 100644
index c3ef004..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaActionFactory.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.impala.authorization;
-
-import com.google.common.base.Preconditions;
-import org.apache.impala.authorization.Privilege.ImpalaAction;
-import org.apache.sentry.core.common.BitFieldAction;
-import org.apache.sentry.core.common.BitFieldActionFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * An implementation of BitFieldActionFactory for Impala.
- */
-public class ImpalaActionFactory extends BitFieldActionFactory {
- @Override
- public List<? extends BitFieldAction> getActionsByCode(int actionCode) {
- Preconditions.checkArgument(
- actionCode >= 1 && actionCode <= ImpalaAction.ALL.getCode(),
- String.format("Action code must between 1 and %d.", ImpalaAction.ALL.getCode()));
-
- List<BitFieldAction> actions = new ArrayList<>();
- for (ImpalaAction action : ImpalaAction.values()) {
- if ((action.getCode() & actionCode) == action.getCode()) {
- actions.add(action.getBitFieldAction());
- }
- }
- return actions;
- }
-
- @Override
- public BitFieldAction getActionByName(String name) {
- Preconditions.checkNotNull(name);
-
- for (ImpalaAction action : ImpalaAction.values()) {
- if (action.getValue().equalsIgnoreCase(name)) {
- return action.getBitFieldAction();
- }
- }
- return null;
- }
-}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
deleted file mode 100644
index 43a194e..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
+++ /dev/null
@@ -1,43 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package org.apache.impala.authorization;
-
-import java.util.Map;
-
-import org.apache.sentry.core.common.BitFieldActionFactory;
-import org.apache.sentry.core.common.ImplyMethodType;
-import org.apache.sentry.core.model.db.HivePrivilegeModel;
-import org.apache.sentry.core.common.Model;
-
-/**
- * Delegates to HivePrivilegeModel for getImplyMethodMap(), but
- * uses Impala's BitFieldActionFactory implementation.
- */
-public class ImpalaPrivilegeModel implements Model {
- public static final ImpalaPrivilegeModel INSTANCE = new ImpalaPrivilegeModel();
- private final ImpalaActionFactory actionFactory = new ImpalaActionFactory();
-
- @Override
- public Map<String, ImplyMethodType> getImplyMethodMap() {
- return HivePrivilegeModel.getInstance().getImplyMethodMap();
- }
-
- @Override
- public BitFieldActionFactory getBitFieldActionFactory() {
- return actionFactory;
- }
-}
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/SentryAuthProvider.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/SentryAuthProvider.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/SentryAuthProvider.java
deleted file mode 100644
index a4f0743..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/SentryAuthProvider.java
+++ /dev/null
@@ -1,80 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.authorization;
-
-import com.google.common.base.Preconditions;
-
-import org.apache.impala.catalog.AuthorizationPolicy;
-
-import org.apache.commons.lang.reflect.ConstructorUtils;
-import org.apache.sentry.core.common.Model;
-import org.apache.sentry.core.model.db.HivePrivilegeModel;
-import org.apache.sentry.policy.common.PolicyEngine;
-import org.apache.sentry.policy.engine.common.CommonPolicyEngine;
-import org.apache.sentry.provider.cache.SimpleCacheProviderBackend;
-import org.apache.sentry.provider.common.ProviderBackend;
-import org.apache.sentry.provider.common.ProviderBackendContext;
-import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
-import org.apache.sentry.provider.file.SimpleFileProviderBackend;
-
-/**
- * Wrapper to facilitate differences in Sentry APIs across Sentry versions.
- */
-class SentryAuthProvider {
- /*
- * Creates a new ResourceAuthorizationProvider based on the given configuration.
- */
- static ResourceAuthorizationProvider createProvider(AuthorizationConfig config,
- AuthorizationPolicy policy) {
- try {
- ProviderBackend providerBe;
- // Create the appropriate backend provider.
- if (config.isFileBasedPolicy()) {
- providerBe = new SimpleFileProviderBackend(config.getSentryConfig().getConfig(),
- config.getPolicyFile());
- ProviderBackendContext context = new ProviderBackendContext();
- providerBe.initialize(context);
- } else {
- // Note: The second parameter to the ProviderBackend is a "resourceFile" path
- // which is not used by Impala. We cannot pass 'null' so instead pass an empty
- // string.
- providerBe = new SimpleCacheProviderBackend(config.getSentryConfig().getConfig(),
- "");
- Preconditions.checkNotNull(policy);
- ProviderBackendContext context = new ProviderBackendContext();
- context.setBindingHandle(policy);
- providerBe.initialize(context);
- }
-
- CommonPolicyEngine engine =
- new CommonPolicyEngine(providerBe);
-
- // Try to create an instance of the specified policy provider class.
- // Re-throw any exceptions that are encountered.
- String policyFile = config.getPolicyFile() == null ? "" : config.getPolicyFile();
-
- return (ResourceAuthorizationProvider) ConstructorUtils.invokeConstructor(
- Class.forName(config.getPolicyProviderClassName()),
- new Object[] {policyFile, engine, ImpalaPrivilegeModel.INSTANCE});
- } catch (Exception e) {
- // Re-throw as unchecked exception.
- throw new IllegalStateException(
- "Error creating ResourceAuthorizationProvider: ", e);
- }
- }
-}