You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kg...@apache.org on 2018/02/14 10:00:25 UTC
[01/15] hive git commit: HIVE-18635: Generalize hook dispatch logics
in Driver (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Repository: hive
Updated Branches:
refs/heads/master fedefeba6 -> b0d3cb452
HIVE-18635: Generalize hook dispatch logics in Driver (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c96c6acf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c96c6acf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c96c6acf
Branch: refs/heads/master
Commit: c96c6acf6e2ef04618e2c3cb36a28fa695ae07e3
Parents: fedefeb
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Wed Feb 14 09:17:31 2018 +0100
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Wed Feb 14 09:17:31 2018 +0100
----------------------------------------------------------------------
.../java/org/apache/hadoop/hive/ql/Driver.java | 85 ++---
.../org/apache/hadoop/hive/ql/HookRunner.java | 323 +++++++++++++++++++
.../hadoop/hive/ql/QueryLifeTimeHookRunner.java | 189 -----------
.../apache/hadoop/hive/ql/hooks/HookUtils.java | 27 +-
.../hadoop/hive/ql/hooks/HooksLoader.java | 110 -------
.../hadoop/hive/ql/hooks/TestQueryHooks.java | 32 +-
.../results/clientnegative/bad_exec_hooks.q.out | 9 +-
.../service/cli/session/SessionManager.java | 4 +-
8 files changed, 397 insertions(+), 382 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/c96c6acf/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 8f7291d..23b209e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import com.google.common.annotations.VisibleForTesting;
+
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -71,11 +72,8 @@ import org.apache.hadoop.hive.ql.exec.TaskRunner;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.history.HiveHistory.Keys;
import org.apache.hadoop.hive.ql.hooks.Entity;
-import org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext;
-import org.apache.hadoop.hive.ql.hooks.Hook;
import org.apache.hadoop.hive.ql.hooks.HookContext;
import org.apache.hadoop.hive.ql.hooks.HookUtils;
-import org.apache.hadoop.hive.ql.hooks.HooksLoader;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
@@ -95,7 +93,6 @@ import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
import org.apache.hadoop.hive.ql.parse.ColumnAccessInfo;
import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState;
-import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHook;
import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContextImpl;
import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer;
@@ -176,8 +173,7 @@ public class Driver implements IDriver {
private final QueryState queryState;
// Query hooks that execute before compilation and after execution
- private QueryLifeTimeHookRunner queryLifeTimeHookRunner;
- private final HooksLoader hooksLoader;
+ private HookRunner hookRunner;
// Transaction manager the Driver has been initialized with (can be null).
// If this is set then this Transaction manager will be used during query
@@ -397,25 +393,20 @@ public class Driver implements IDriver {
}
public Driver(QueryState queryState, String userName) {
- this(queryState, userName, new HooksLoader(queryState.getConf()), null, null);
+ this(queryState, userName, null, null);
}
public Driver(QueryState queryState, String userName, QueryInfo queryInfo) {
- this(queryState, userName, new HooksLoader(queryState.getConf()), queryInfo, null);
+ this(queryState, userName, queryInfo, null);
}
public Driver(QueryState queryState, String userName, QueryInfo queryInfo, HiveTxnManager txnMgr) {
- this(queryState, userName, new HooksLoader(queryState.getConf()), queryInfo, txnMgr);
- }
-
- public Driver(QueryState queryState, String userName, HooksLoader hooksLoader, QueryInfo queryInfo, HiveTxnManager txnMgr) {
this.queryState = queryState;
this.conf = queryState.getConf();
isParallelEnabled = (conf != null)
&& HiveConf.getBoolVar(conf, ConfVars.HIVE_SERVER2_PARALLEL_COMPILATION);
this.userName = userName;
- this.hooksLoader = hooksLoader;
- this.queryLifeTimeHookRunner = new QueryLifeTimeHookRunner(conf, hooksLoader, console);
+ this.hookRunner = new HookRunner(conf, console);
this.queryInfo = queryInfo;
this.initTxnMgr = txnMgr;
}
@@ -568,7 +559,7 @@ public class Driver implements IDriver {
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARSE);
// Trigger query hook before compilation
- queryLifeTimeHookRunner.runBeforeParseHook(command);
+ hookRunner.runBeforeParseHook(command);
ASTNode tree;
try {
@@ -577,16 +568,14 @@ public class Driver implements IDriver {
parseError = true;
throw e;
} finally {
- queryLifeTimeHookRunner.runAfterParseHook(command, parseError);
+ hookRunner.runAfterParseHook(command, parseError);
}
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
- queryLifeTimeHookRunner.runBeforeCompileHook(command);
+ hookRunner.runBeforeCompileHook(command);
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(queryState, tree);
- List<HiveSemanticAnalyzerHook> saHooks =
- hooksLoader.getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, console, HiveSemanticAnalyzerHook.class);
// Flush the metastore cache. This assures that we don't pick up objects from a previous
// query running in this same thread. This has to be done after we get our semantic
@@ -604,21 +593,20 @@ public class Driver implements IDriver {
}
}
// Do semantic analysis and plan generation
- if (saHooks != null && !saHooks.isEmpty()) {
+ if (hookRunner.hasPreAnalyzeHooks()) {
HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
hookCtx.setConf(conf);
hookCtx.setUserName(userName);
hookCtx.setIpAddress(SessionState.get().getUserIpAddress());
hookCtx.setCommand(command);
hookCtx.setHiveOperation(queryState.getHiveOperation());
- for (HiveSemanticAnalyzerHook hook : saHooks) {
- tree = hook.preAnalyze(hookCtx, tree);
- }
+
+ tree = hookRunner.runPreAnalyzeHooks(hookCtx, tree);
+
sem.analyze(tree, ctx);
hookCtx.update(sem);
- for (HiveSemanticAnalyzerHook hook : saHooks) {
- hook.postAnalyze(hookCtx, sem.getAllRootTasks());
- }
+
+ hookRunner.runPostAnalyzeHooks(hookCtx, sem.getAllRootTasks());
} else {
sem.analyze(tree, ctx);
}
@@ -710,7 +698,7 @@ public class Driver implements IDriver {
// before/after execution hook will never be executed.
if (!parseError) {
try {
- queryLifeTimeHookRunner.runAfterCompilationHook(command, compileError);
+ hookRunner.runAfterCompilationHook(command, compileError);
} catch (Exception e) {
LOG.warn("Failed when invoking query after-compilation hook.", e);
}
@@ -1599,12 +1587,8 @@ public class Driver implements IDriver {
HiveDriverRunHookContext hookContext = new HiveDriverRunHookContextImpl(conf,
alreadyCompiled ? ctx.getCmd() : command);
// Get all the driver run hooks and pre-execute them.
- List<HiveDriverRunHook> driverRunHooks;
try {
- driverRunHooks = hooksLoader.getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, console, HiveDriverRunHook.class);
- for (HiveDriverRunHook driverRunHook : driverRunHooks) {
- driverRunHook.preDriverRun(hookContext);
- }
+ hookRunner.runPreDriverHooks(hookContext);
} catch (Exception e) {
errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
SQLState = ErrorMsg.findSQLState(e.getMessage());
@@ -1666,9 +1650,7 @@ public class Driver implements IDriver {
// Take all the driver run hooks and post-execute them.
try {
- for (HiveDriverRunHook driverRunHook : driverRunHooks) {
- driverRunHook.postDriverRun(hookContext);
- }
+ hookRunner.runPostDriverHooks(hookContext);
} catch (Exception e) {
errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
SQLState = ErrorMsg.findSQLState(e.getMessage());
@@ -1880,16 +1862,10 @@ public class Driver implements IDriver {
ss.getSessionId(), Thread.currentThread().getName(), ss.isHiveServerQuery(), perfLogger, queryInfo);
hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK);
- for (Hook peh : hooksLoader.getHooks(HiveConf.ConfVars.PREEXECHOOKS, console, ExecuteWithHookContext.class)) {
- perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName());
-
- ((ExecuteWithHookContext) peh).run(hookContext);
-
- perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PRE_HOOK + peh.getClass().getName());
- }
+ hookRunner.runPreHooks(hookContext);
// Trigger query hooks before query execution.
- queryLifeTimeHookRunner.runBeforeExecutionHook(queryStr, hookContext);
+ hookRunner.runBeforeExecutionHook(queryStr, hookContext);
setQueryDisplays(plan.getRootTasks());
int mrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
@@ -2064,15 +2040,10 @@ public class Driver implements IDriver {
plan.getOutputs().remove(output);
}
- hookContext.setHookType(HookContext.HookType.POST_EXEC_HOOK);
- // Get all the post execution hooks and execute them.
- for (Hook peh : hooksLoader.getHooks(HiveConf.ConfVars.POSTEXECHOOKS, console, ExecuteWithHookContext.class)) {
- perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName());
- ((ExecuteWithHookContext) peh).run(hookContext);
+ hookContext.setHookType(HookContext.HookType.POST_EXEC_HOOK);
- perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.POST_HOOK + peh.getClass().getName());
- }
+ hookRunner.runPostExecHooks(hookContext);
if (SessionState.get() != null) {
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
@@ -2110,7 +2081,7 @@ public class Driver implements IDriver {
} finally {
// Trigger query hooks after query completes its execution.
try {
- queryLifeTimeHookRunner.runAfterExecutionHook(queryStr, hookContext, executionError);
+ hookRunner.runAfterExecutionHook(queryStr, hookContext, executionError);
} catch (Exception e) {
LOG.warn("Failed when invoking query after execution hook", e);
}
@@ -2230,13 +2201,7 @@ public class Driver implements IDriver {
hookContext.setErrorMessage(errorMessage);
hookContext.setException(exception);
// Get all the failure execution hooks and execute them.
- for (Hook ofh : hooksLoader.getHooks(HiveConf.ConfVars.ONFAILUREHOOKS, console, ExecuteWithHookContext.class)) {
- perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());
-
- ((ExecuteWithHookContext) ofh).run(hookContext);
-
- perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());
- }
+ hookRunner.runFailureHooks(hookContext);
}
/**
@@ -2557,4 +2522,8 @@ public class Driver implements IDriver {
public QueryState getQueryState() {
return queryState;
}
+
+ public HookRunner getHookRunner() {
+ return hookRunner;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c96c6acf/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
new file mode 100644
index 0000000..52e99f9
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/HookRunner.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext;
+import org.apache.hadoop.hive.ql.hooks.Hook;
+import org.apache.hadoop.hive.ql.hooks.HookContext;
+import org.apache.hadoop.hive.ql.hooks.HookUtils;
+import org.apache.hadoop.hive.ql.hooks.MaterializedViewRegistryUpdateHook;
+import org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook;
+import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook;
+import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext;
+import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContextImpl;
+import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookWithParseHooks;
+import org.apache.hadoop.hive.ql.log.PerfLogger;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHook;
+import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hive.common.util.HiveStringUtils;
+
+/**
+ * Handles hook executions for {@link Driver}.
+ */
+public class HookRunner {
+
+ private static final String CLASS_NAME = Driver.class.getName();
+ private final HiveConf conf;
+ private LogHelper console;
+ private List<QueryLifeTimeHook> queryHooks = new ArrayList<>();
+ private List<HiveSemanticAnalyzerHook> saHooks = new ArrayList<>();
+ private List<HiveDriverRunHook> driverRunHooks = new ArrayList<>();
+ private List<ExecuteWithHookContext> preExecHooks = new ArrayList<>();
+ private List<ExecuteWithHookContext> postExecHooks = new ArrayList<>();
+ private List<ExecuteWithHookContext> onFailureHooks = new ArrayList<>();
+ private boolean initialized = false;
+
+ /**
+ * Constructs a {@link HookRunner} that loads all hooks to be run via a {@link HooksLoader}.
+ */
+ HookRunner(HiveConf conf, SessionState.LogHelper console) {
+ this.conf = conf;
+ this.console = console;
+ }
+
+ public void initialize() {
+ if (initialized) {
+ return;
+ }
+ initialized = true;
+ queryHooks.addAll(loadHooksFromConf(HiveConf.ConfVars.HIVE_QUERY_LIFETIME_HOOKS, QueryLifeTimeHook.class));
+ saHooks.addAll(loadHooksFromConf(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, HiveSemanticAnalyzerHook.class));
+ driverRunHooks.addAll(loadHooksFromConf(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, HiveDriverRunHook.class));
+ preExecHooks.addAll(loadHooksFromConf(HiveConf.ConfVars.PREEXECHOOKS, ExecuteWithHookContext.class));
+ postExecHooks.addAll(loadHooksFromConf(HiveConf.ConfVars.POSTEXECHOOKS, ExecuteWithHookContext.class));
+ onFailureHooks.addAll(loadHooksFromConf(HiveConf.ConfVars.ONFAILUREHOOKS, ExecuteWithHookContext.class));
+
+ if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED)) {
+ queryHooks.add(new MetricsQueryLifeTimeHook());
+ }
+ queryHooks.add(new MaterializedViewRegistryUpdateHook());
+ }
+
+
+ private <T extends Hook> List<T> loadHooksFromConf(ConfVars hookConfVar, Class<T> clazz) {
+ try {
+ return HookUtils.readHooksFromConf(conf, hookConfVar);
+ } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) {
+ String message = "Error loading hooks(" + hookConfVar + "): " + HiveStringUtils.stringifyException(e);
+ throw new RuntimeException(message, e);
+ }
+ }
+
+ /**
+ * If {@link QueryLifeTimeHookWithParseHooks} have been loaded via the {@link HooksLoader} then invoke the
+ * {@link QueryLifeTimeHookWithParseHooks#beforeParse(QueryLifeTimeHookContext)} method for each
+ * {@link QueryLifeTimeHookWithParseHooks}.
+ *
+ * @param command the Hive command that is being run
+ */
+ void runBeforeParseHook(String command) {
+ initialize();
+ if (!queryHooks.isEmpty()) {
+ QueryLifeTimeHookContext qhc =
+ new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(command).build();
+
+ for (QueryLifeTimeHook hook : queryHooks) {
+ if (hook instanceof QueryLifeTimeHookWithParseHooks) {
+ ((QueryLifeTimeHookWithParseHooks) hook).beforeParse(qhc);
+ }
+ }
+ }
+ }
+
+ /**
+ * If {@link QueryLifeTimeHookWithParseHooks} have been loaded via the {@link HooksLoader} then invoke the
+ * {@link QueryLifeTimeHookWithParseHooks#afterParse(QueryLifeTimeHookContext, boolean)} method for each
+ * {@link QueryLifeTimeHookWithParseHooks}.
+ *
+ * @param command the Hive command that is being run
+ * @param parseError true if there was an error while parsing the command, false otherwise
+ */
+ void runAfterParseHook(String command, boolean parseError) {
+ initialize();
+ if (!queryHooks.isEmpty()) {
+ QueryLifeTimeHookContext qhc =
+ new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(command).build();
+
+ for (QueryLifeTimeHook hook : queryHooks) {
+ if (hook instanceof QueryLifeTimeHookWithParseHooks) {
+ ((QueryLifeTimeHookWithParseHooks) hook).afterParse(qhc, parseError);
+ }
+ }
+ }
+ }
+
+ /**
+ * Dispatches {@link QueryLifeTimeHook#beforeCompile(QueryLifeTimeHookContext)}.
+ *
+ * @param command the Hive command that is being run
+ */
+ void runBeforeCompileHook(String command) {
+ initialize();
+ if (!queryHooks.isEmpty()) {
+ QueryLifeTimeHookContext qhc =
+ new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(command).build();
+
+ for (QueryLifeTimeHook hook : queryHooks) {
+ hook.beforeCompile(qhc);
+ }
+ }
+ }
+
+ /**
+ * Dispatches {@link QueryLifeTimeHook#afterCompile(QueryLifeTimeHookContext, boolean)}.
+ *
+ * @param command the Hive command that is being run
+ * @param compileError true if there was an error while compiling the command, false otherwise
+ */
+ void runAfterCompilationHook(String command, boolean compileError) {
+ initialize();
+ if (!queryHooks.isEmpty()) {
+ QueryLifeTimeHookContext qhc =
+ new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(command).build();
+
+ for (QueryLifeTimeHook hook : queryHooks) {
+ hook.afterCompile(qhc, compileError);
+ }
+ }
+ }
+
+ /**
+ * Dispatches {@link QueryLifeTimeHook#beforeExecution(QueryLifeTimeHookContext)}.
+ *
+ * @param command the Hive command that is being run
+ * @param hookContext the {@link HookContext} of the command being run
+ */
+ void runBeforeExecutionHook(String command, HookContext hookContext) {
+ initialize();
+ if (!queryHooks.isEmpty()) {
+ QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(command)
+ .withHookContext(hookContext).build();
+
+ for (QueryLifeTimeHook hook : queryHooks) {
+ hook.beforeExecution(qhc);
+ }
+ }
+ }
+
+ /**
+ * Dispatches {@link QueryLifeTimeHook#afterExecution(QueryLifeTimeHookContext, boolean)}.
+ *
+ * @param command the Hive command that is being run
+ * @param hookContext the {@link HookContext} of the command being run
+ * @param executionError true if there was an error while executing the command, false otherwise
+ */
+ void runAfterExecutionHook(String command, HookContext hookContext, boolean executionError) {
+ initialize();
+ if (!queryHooks.isEmpty()) {
+ QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(command)
+ .withHookContext(hookContext).build();
+
+ for (QueryLifeTimeHook hook : queryHooks) {
+ hook.afterExecution(qhc, executionError);
+ }
+ }
+ }
+
+ public ASTNode runPreAnalyzeHooks(HiveSemanticAnalyzerHookContext hookCtx, ASTNode tree) throws HiveException {
+ initialize();
+ try {
+ for (HiveSemanticAnalyzerHook hook : saHooks) {
+ tree = hook.preAnalyze(hookCtx, tree);
+ }
+ return tree;
+ } catch (HiveException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new HiveException("Error while invoking PreAnalyzeHooks:" + HiveStringUtils.stringifyException(e), e);
+ }
+ }
+
+ public boolean hasPreAnalyzeHooks() {
+ return !saHooks.isEmpty();
+ }
+
+ public void runPostAnalyzeHooks(HiveSemanticAnalyzerHookContext hookCtx,
+ List<Task<? extends Serializable>> allRootTasks) throws HiveException {
+ initialize();
+ try {
+ for (HiveSemanticAnalyzerHook hook : saHooks) {
+ hook.postAnalyze(hookCtx, allRootTasks);
+ }
+ } catch (HiveException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new HiveException("Error while invoking PostAnalyzeHooks:" + HiveStringUtils.stringifyException(e), e);
+ }
+
+ }
+
+ public void runPreDriverHooks(HiveDriverRunHookContext hookContext) throws HiveException {
+ initialize();
+ try {
+ for (HiveDriverRunHook driverRunHook : driverRunHooks) {
+ driverRunHook.preDriverRun(hookContext);
+ }
+ } catch (HiveException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new HiveException("Error while invoking PreDriverHooks:" + HiveStringUtils.stringifyException(e), e);
+ }
+ }
+
+ public void runPostDriverHooks(HiveDriverRunHookContext hookContext) throws HiveException {
+ initialize();
+ try {
+ for (HiveDriverRunHook driverRunHook : driverRunHooks) {
+ driverRunHook.postDriverRun(hookContext);
+ }
+ } catch (HiveException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new HiveException("Error while invoking PostDriverHooks:" + HiveStringUtils.stringifyException(e), e);
+ }
+ }
+
+ public void runPreHooks(HookContext hookContext) throws HiveException {
+ initialize();
+ invokeGeneralHook(preExecHooks, PerfLogger.PRE_HOOK, hookContext);
+ }
+
+ public void runPostExecHooks(HookContext hookContext) throws HiveException {
+ initialize();
+ invokeGeneralHook(postExecHooks, PerfLogger.POST_HOOK, hookContext);
+ }
+
+ public void runFailureHooks(HookContext hookContext) throws HiveException {
+ initialize();
+ invokeGeneralHook(onFailureHooks, PerfLogger.FAILURE_HOOK, hookContext);
+ }
+
+ private static void invokeGeneralHook(List<ExecuteWithHookContext> hooks, String prefix, HookContext hookContext)
+ throws HiveException {
+ if (hooks.isEmpty()) {
+ return;
+ }
+ try {
+ PerfLogger perfLogger = SessionState.getPerfLogger();
+
+ for (ExecuteWithHookContext hook : hooks) {
+ perfLogger.PerfLogBegin(CLASS_NAME, prefix + hook.getClass().getName());
+ hook.run(hookContext);
+ perfLogger.PerfLogEnd(CLASS_NAME, prefix + hook.getClass().getName());
+ }
+ } catch (HiveException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new HiveException("Error while invoking " + prefix + " hooks: " + HiveStringUtils.stringifyException(e), e);
+ }
+ }
+
+ public void addLifeTimeHook(QueryLifeTimeHook hook) {
+ queryHooks.add(hook);
+ }
+
+ public void addPreHook(ExecuteWithHookContext hook) {
+ preExecHooks.add(hook);
+ }
+
+ public void addPostHook(ExecuteWithHookContext hook) {
+ postExecHooks.add(hook);
+ }
+
+ public void addOnFailureHook(ExecuteWithHookContext hook) {
+ onFailureHooks.add(hook);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/c96c6acf/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java
deleted file mode 100644
index 53d716b..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryLifeTimeHookRunner.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.hadoop.hive.ql;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.google.common.collect.Iterables;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.hooks.HookContext;
-import org.apache.hadoop.hive.ql.hooks.HooksLoader;
-import org.apache.hadoop.hive.ql.hooks.MaterializedViewRegistryUpdateHook;
-import org.apache.hadoop.hive.ql.hooks.MetricsQueryLifeTimeHook;
-import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHook;
-import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContext;
-import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContextImpl;
-import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookWithParseHooks;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-
-/**
- * A runner class for {@link QueryLifeTimeHook}s and {@link QueryLifeTimeHookWithParseHooks}. The class has run methods
- * for each phase of a {@link QueryLifeTimeHook} and {@link QueryLifeTimeHookWithParseHooks}. Each run method checks if
- * a list of hooks has be specified, and if so invokes the appropriate callback method of each hook. Each method
- * constructs a {@link QueryLifeTimeHookContext} object and pass it to the callback functions.
- */
-class QueryLifeTimeHookRunner {
-
- private final HiveConf conf;
- private final List<QueryLifeTimeHook> queryHooks;
-
- /**
- * Constructs a {@link QueryLifeTimeHookRunner} that loads all hooks to be run via a {@link HooksLoader}.
- *
- * @param conf the {@link HiveConf} to use when creating {@link QueryLifeTimeHookContext} objects
- * @param hooksLoader the {@link HooksLoader} to use when loading all hooks to be run
- * @param console the {@link SessionState.LogHelper} to use when running {@link HooksLoader#getHooks(HiveConf.ConfVars)}
- */
- QueryLifeTimeHookRunner(HiveConf conf, HooksLoader hooksLoader, SessionState.LogHelper console) {
- this.conf = conf;
- this.queryHooks = new ArrayList<>();
-
- if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED)) {
- queryHooks.add(new MetricsQueryLifeTimeHook());
- }
- queryHooks.add(new MaterializedViewRegistryUpdateHook());
-
- List<QueryLifeTimeHook> propertyDefinedHoooks;
- try {
- propertyDefinedHoooks = hooksLoader.getHooks(
- HiveConf.ConfVars.HIVE_QUERY_LIFETIME_HOOKS, console, QueryLifeTimeHook.class);
- } catch (IllegalAccessException | InstantiationException | ClassNotFoundException e) {
- throw new IllegalArgumentException(e);
- }
- if (propertyDefinedHoooks != null) {
- Iterables.addAll(queryHooks, propertyDefinedHoooks);
- }
- }
-
- /**
- * If {@link QueryLifeTimeHookWithParseHooks} have been loaded via the {@link HooksLoader} then invoke the
- * {@link QueryLifeTimeHookWithParseHooks#beforeParse(QueryLifeTimeHookContext)} method for each
- * {@link QueryLifeTimeHookWithParseHooks}.
- *
- * @param command the Hive command that is being run
- */
- void runBeforeParseHook(String command) {
- if (containsHooks()) {
- QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
- command).build();
-
- for (QueryLifeTimeHook hook : queryHooks) {
- if (hook instanceof QueryLifeTimeHookWithParseHooks) {
- ((QueryLifeTimeHookWithParseHooks) hook).beforeParse(qhc);
- }
- }
- }
- }
-
- /**
- * If {@link QueryLifeTimeHookWithParseHooks} have been loaded via the {@link HooksLoader} then invoke the
- * {@link QueryLifeTimeHookWithParseHooks#afterParse(QueryLifeTimeHookContext, boolean)} method for each
- * {@link QueryLifeTimeHookWithParseHooks}.
- *
- * @param command the Hive command that is being run
- * @param parseError true if there was an error while parsing the command, false otherwise
- */
- void runAfterParseHook(String command, boolean parseError) {
- if (containsHooks()) {
- QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
- command).build();
-
- for (QueryLifeTimeHook hook : queryHooks) {
- if (hook instanceof QueryLifeTimeHookWithParseHooks) {
- ((QueryLifeTimeHookWithParseHooks) hook).afterParse(qhc, parseError);
- }
- }
- }
- }
-
- /**
- * Invoke the {@link QueryLifeTimeHook#beforeCompile(QueryLifeTimeHookContext)} method for each {@link QueryLifeTimeHook}
- *
- * @param command the Hive command that is being run
- */
- void runBeforeCompileHook(String command) {
- if (containsHooks()) {
- QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
- command).build();
-
- for (QueryLifeTimeHook hook : queryHooks) {
- hook.beforeCompile(qhc);
- }
- }
- }
-
- /**
- * Invoke the {@link QueryLifeTimeHook#afterCompile(QueryLifeTimeHookContext, boolean)} method for each {@link QueryLifeTimeHook}
- *
- * @param command the Hive command that is being run
- * @param compileError true if there was an error while compiling the command, false otherwise
- */
- void runAfterCompilationHook(String command, boolean compileError) {
- if (containsHooks()) {
- QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
- command).build();
-
- for (QueryLifeTimeHook hook : queryHooks) {
- hook.afterCompile(qhc, compileError);
- }
- }
- }
-
- /**
- * Invoke the {@link QueryLifeTimeHook#beforeExecution(QueryLifeTimeHookContext)} method for each {@link QueryLifeTimeHook}
- *
- * @param command the Hive command that is being run
- * @param hookContext the {@link HookContext} of the command being run
- */
- void runBeforeExecutionHook(String command, HookContext hookContext) {
- if (containsHooks()) {
- QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
- command).withHookContext(hookContext).build();
-
- for (QueryLifeTimeHook hook : queryHooks) {
- hook.beforeExecution(qhc);
- }
- }
- }
-
- /**
- * Invoke the {@link QueryLifeTimeHook#afterExecution(QueryLifeTimeHookContext, boolean)} method for each {@link QueryLifeTimeHook}
- *
- * @param command the Hive command that is being run
- * @param hookContext the {@link HookContext} of the command being run
- * @param executionError true if there was an error while executing the command, false otherwise
- */
- void runAfterExecutionHook(String command, HookContext hookContext, boolean executionError) {
- if (containsHooks()) {
- QueryLifeTimeHookContext qhc = new QueryLifeTimeHookContextImpl.Builder().withHiveConf(conf).withCommand(
- command).withHookContext(hookContext).build();
-
- for (QueryLifeTimeHook hook : queryHooks) {
- hook.afterExecution(qhc, executionError);
- }
- }
- }
-
- private boolean containsHooks() {
- return queryHooks != null && !queryHooks.isEmpty();
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/c96c6acf/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java
index dbd258a..0841d67 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookUtils.java
@@ -6,9 +6,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -18,11 +18,13 @@
package org.apache.hadoop.hive.ql.hooks;
+import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.logging.log4j.util.Strings;
public class HookUtils {
@@ -32,7 +34,7 @@ public class HookUtils {
String redactedString = logString;
if (conf != null && logString != null) {
- List<Redactor> queryRedactors = new HooksLoader(conf).getHooks(ConfVars.QUERYREDACTORHOOKS, Redactor.class);
+ List<Redactor> queryRedactors = readHooksFromConf(conf, ConfVars.QUERYREDACTORHOOKS);
for (Redactor redactor : queryRedactors) {
redactor.setConf(conf);
redactedString = redactor.redactQuery(redactedString);
@@ -40,4 +42,19 @@ public class HookUtils {
}
return redactedString;
}
+
+ public static <T extends Hook> List<T> readHooksFromConf(HiveConf conf, HiveConf.ConfVars hookConfVar)
+ throws InstantiationException, IllegalAccessException, ClassNotFoundException {
+ String csHooks = conf.getVar(hookConfVar);
+ List<T> hooks = new ArrayList<>();
+ if (Strings.isBlank(csHooks)) {
+ return hooks;
+ }
+ String[] hookClasses = csHooks.split(",");
+ for (String hookClass : hookClasses) {
+ T hook = (T) Class.forName(hookClass.trim(), true, Utilities.getSessionSpecifiedClassLoader()).newInstance();
+ hooks.add(hook);
+ }
+ return hooks;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c96c6acf/ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java
deleted file mode 100644
index 8c19338..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HooksLoader.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-
-package org.apache.hadoop.hive.ql.hooks;
-
-import java.util.List;
-
-import com.google.common.collect.ImmutableList;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-
-/**
- * A loader class for {@link Hook}s. The class provides a way to create and instantiate {@link Hook} objects. The
- * methodology for how hooks are loaded is left up to the individual methods.
- */
-public class HooksLoader {
-
- private final HiveConf conf;
-
- /**
- * Creates a new {@link HooksLoader} that uses the specified {@link HiveConf} to load the {@link Hook}s.
- *
- * @param conf the {@link HiveConf} to use when loading the {@link Hook}s
- */
- public HooksLoader(HiveConf conf) {
- this.conf = conf;
- }
-
- /**
- * Delegates to {@link #getHooks(HiveConf.ConfVars)} and prints the to the specified {@link SessionState.LogHelper} if
- * a {@link ClassNotFoundException} is thrown.
- *
- * @param hookConfVar the configuration variable specifying a comma separated list of the hook class names
- * @param console the {@link SessionState.LogHelper} to print to if a {@link ClassNotFoundException} is thrown by the
- * {@link #getHooks(HiveConf.ConfVars)} method
- *
- * @return a list of the hooks objects, in the order they are listed in the value of hookConfVar
- *
- * @throws ClassNotFoundException if the specified class names could not be found
- * @throws IllegalAccessException if the specified class names could not be accessed
- * @throws InstantiationException if the specified class names could not be instantiated
- */
- public final <T extends Hook> List<T> getHooks(HiveConf.ConfVars hookConfVar, SessionState.LogHelper console,
- Class<T> clazz) throws IllegalAccessException, InstantiationException, ClassNotFoundException {
- try {
- return getHooks(hookConfVar, clazz);
- } catch (ClassNotFoundException e) {
- console.printError(hookConfVar.varname + " Class not found: " + e.getMessage());
- throw e;
- }
- }
-
- /**
- * Returns the hooks specified in a configuration variable. The hooks are returned in a list in the order they were
- * specified in the configuration variable. The value of the specified conf variable should be a comma separated list
- * of class names where each class implements the {@link Hook} interface. The method uses reflection to an instance
- * of each class and then returns them in a {@link List}.
- *
- * @param hookConfVar The configuration variable specifying a comma separated list of the hook class names
- * @param class2
- * @param class1
- * @param console
- *
- * @return a list of the hooks objects, in the order they are listed in the value of hookConfVar
- *
- * @throws ClassNotFoundException if the specified class names could not be found
- * @throws IllegalAccessException if the specified class names could not be accessed
- * @throws InstantiationException if the specified class names could not be instantiated
- */
- public <T extends Hook> List<T> getHooks(HiveConf.ConfVars hookConfVar, Class<T> clazz)
- throws InstantiationException, IllegalAccessException, ClassNotFoundException {
- String csHooks = conf.getVar(hookConfVar);
- ImmutableList.Builder<T> hooks = ImmutableList.builder();
- if (csHooks == null) {
- return ImmutableList.of();
- }
-
- csHooks = csHooks.trim();
- if (csHooks.isEmpty()) {
- return ImmutableList.of();
- }
-
- String[] hookClasses = csHooks.split(",");
- for (String hookClass : hookClasses) {
- T hook = (T) Class.forName(hookClass.trim(), true,
- Utilities.getSessionSpecifiedClassLoader()).newInstance();
- hooks.add(hook);
- }
-
- return hooks.build();
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/c96c6acf/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java
index 492b63d..5b4b42b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.hive.ql.hooks;
-import com.google.common.collect.Lists;
-
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.QueryState;
@@ -39,7 +37,6 @@ import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
public class TestQueryHooks {
@@ -59,7 +56,9 @@ public class TestQueryHooks {
String query = "select 1";
ArgumentMatcher<QueryLifeTimeHookContext> argMatcher = new QueryLifeTimeHookContextMatcher(query);
QueryLifeTimeHookWithParseHooks mockHook = mock(QueryLifeTimeHookWithParseHooks.class);
- int ret = createDriver(mockHook).run(query).getResponseCode();
+ Driver driver = createDriver();
+ driver.getHookRunner().addLifeTimeHook(mockHook);
+ int ret = driver.run(query).getResponseCode();
assertEquals("Expected query to succeed", 0, ret);
verify(mockHook).beforeParse(argThat(argMatcher));
@@ -75,7 +74,9 @@ public class TestQueryHooks {
String query = "invalidquery";
ArgumentMatcher<QueryLifeTimeHookContext> argMatcher = new QueryLifeTimeHookContextMatcher(query);
QueryLifeTimeHookWithParseHooks mockHook = mock(QueryLifeTimeHookWithParseHooks.class);
- int ret = createDriver(mockHook).run(query).getResponseCode();
+ Driver driver = createDriver();
+ driver.getHookRunner().addLifeTimeHook(mockHook);
+ int ret = driver.run(query).getResponseCode();
assertNotEquals("Expected parsing to fail", 0, ret);
verify(mockHook).beforeParse(argThat(argMatcher));
@@ -91,7 +92,9 @@ public class TestQueryHooks {
String query = "select * from foo";
ArgumentMatcher<QueryLifeTimeHookContext> argMatcher = new QueryLifeTimeHookContextMatcher(query);
QueryLifeTimeHookWithParseHooks mockHook = mock(QueryLifeTimeHookWithParseHooks.class);
- int ret = createDriver(mockHook).run(query).getResponseCode();
+ Driver driver = createDriver();
+ driver.getHookRunner().addLifeTimeHook(mockHook);
+ int ret = driver.run(query).getResponseCode();
assertNotEquals("Expected compilation to fail", 0, ret);
verify(mockHook).beforeParse(argThat(argMatcher));
@@ -107,7 +110,9 @@ public class TestQueryHooks {
String query = "select 1";
ArgumentMatcher<QueryLifeTimeHookContext> argMatcher = new QueryLifeTimeHookContextMatcher(query);
QueryLifeTimeHook mockHook = mock(QueryLifeTimeHook.class);
- int ret = createDriver(mockHook).run(query).getResponseCode();
+ Driver driver = createDriver();
+ driver.getHookRunner().addLifeTimeHook(mockHook);
+ int ret = driver.run(query).getResponseCode();
assertEquals("Expected query to succeed", 0, ret);
verify(mockHook).beforeCompile(argThat(argMatcher));
@@ -121,7 +126,9 @@ public class TestQueryHooks {
String query = "select * from foo";
ArgumentMatcher<QueryLifeTimeHookContext> argMatcher = new QueryLifeTimeHookContextMatcher(query);
QueryLifeTimeHook mockHook = mock(QueryLifeTimeHook.class);
- int ret = createDriver(mockHook).run(query).getResponseCode();
+ Driver driver = createDriver();
+ driver.getHookRunner().addLifeTimeHook(mockHook);
+ int ret = driver.run(query).getResponseCode();
assertNotEquals("Expected compilation to fail", 0, ret);
verify(mockHook).beforeCompile(argThat(argMatcher));
@@ -130,14 +137,9 @@ public class TestQueryHooks {
verify(mockHook, never()).afterExecution(any(), anyBoolean());
}
- private Driver createDriver(QueryLifeTimeHook mockHook) throws IllegalAccessException, ClassNotFoundException, InstantiationException {
- HooksLoader mockLoader = mock(HooksLoader.class);
- when(mockLoader.getHooks(eq(HiveConf.ConfVars.HIVE_QUERY_LIFETIME_HOOKS), any())).thenReturn(
- Lists.newArrayList(mockHook));
-
+ private Driver createDriver() throws IllegalAccessException, ClassNotFoundException, InstantiationException {
SessionState.start(conf);
-
- Driver driver = new Driver(new QueryState.Builder().withGenerateNewQueryId(true).withHiveConf(conf).build(), null, mockLoader, null, null);
+ Driver driver = new Driver(conf);
return driver;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/c96c6acf/ql/src/test/results/clientnegative/bad_exec_hooks.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/bad_exec_hooks.q.out b/ql/src/test/results/clientnegative/bad_exec_hooks.q.out
index 6f2ee5c..d0b47b7 100644
--- a/ql/src/test/results/clientnegative/bad_exec_hooks.q.out
+++ b/ql/src/test/results/clientnegative/bad_exec_hooks.q.out
@@ -1,5 +1,8 @@
-hive.exec.pre.hooks Class not found: "org.this.is.a.bad.class"
-FAILED: Hive Internal Error: java.lang.ClassNotFoundException("org.this.is.a.bad.class")
-java.lang.ClassNotFoundException: "org.this.is.a.bad.class"
+FAILED: Hive Internal Error: java.lang.RuntimeException(Error loading hooks(hive.exec.pre.hooks): java.lang.ClassNotFoundException: "org.this.is.a.bad.class"
+#### A masked pattern was here ####
+)
+java.lang.RuntimeException: Error loading hooks(hive.exec.pre.hooks): java.lang.ClassNotFoundException: "org.this.is.a.bad.class"
+#### A masked pattern was here ####
+
#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/c96c6acf/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
index f3e08a9..e964982 100644
--- a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
+++ b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.ql.hooks.HooksLoader;
+import org.apache.hadoop.hive.ql.hooks.HookUtils;
import org.apache.hive.service.CompositeService;
import org.apache.hive.service.cli.HiveSQLException;
import org.apache.hive.service.cli.SessionHandle;
@@ -655,7 +655,7 @@ public class SessionManager extends CompositeService {
// execute session hooks
private void executeSessionHooks(HiveSession session) throws Exception {
List<HiveSessionHook> sessionHooks =
- new HooksLoader(hiveConf).getHooks(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK, HiveSessionHook.class);
+ HookUtils.readHooksFromConf(hiveConf, HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK);
for (HiveSessionHook sessionHook : sessionHooks) {
sessionHook.run(new HiveSessionHookContextImpl(session));
}
[02/15] hive git commit: HIVE-18698: Fix
TestMiniLlapLocalCliDriver#testCliDriver[bucket_map_join_tez1] (Zoltan
Haindrich reviewed by Jesus Camacho Rodriguez)
Posted by kg...@apache.org.
HIVE-18698: Fix TestMiniLlapLocalCliDriver#testCliDriver[bucket_map_join_tez1] (Zoltan Haindrich reviewed by Jesus Camacho Rodriguez)
Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c2c188e5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c2c188e5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c2c188e5
Branch: refs/heads/master
Commit: c2c188e5148cdaadedc99804f8735aac5d393343
Parents: c96c6ac
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Wed Feb 14 09:22:01 2018 +0100
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Wed Feb 14 09:22:01 2018 +0100
----------------------------------------------------------------------
.../llap/bucket_map_join_tez1.q.out | 282 +++++++++----------
1 file changed, 134 insertions(+), 148 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/c2c188e5/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
index 19a503e..543fccd 100644
--- a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
@@ -235,25 +235,25 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col0 (type: int)
outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string)
sort order: +++
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
Reducer 3
Execution mode: llap
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -823,15 +823,15 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col3
input vertices:
0 Map 1
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string)
sort order: +++
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: llap
LLAP IO: no inputs
Reducer 3
@@ -840,10 +840,10 @@ STAGE PLANS:
Select Operator
expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1436,7 +1436,7 @@ STAGE PLANS:
keys:
0 _col0 (type: int)
1 _col0 (type: int)
- Statistics: Num rows: 196 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 195 Data size: 1560 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
mode: hash
@@ -1565,7 +1565,7 @@ STAGE PLANS:
1 _col0 (type: int)
input vertices:
1 Map 4
- Statistics: Num rows: 196 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 195 Data size: 1560 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
mode: hash
@@ -1696,7 +1696,7 @@ STAGE PLANS:
keys:
0 _col0 (type: int)
1 _col0 (type: int)
- Statistics: Num rows: 636 Data size: 5088 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 632 Data size: 5056 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
mode: hash
@@ -1731,16 +1731,16 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col0 (type: int)
outputColumnNames: _col1
- Statistics: Num rows: 392 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 1564 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col1 (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 392 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 1564 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 392 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 1564 Basic stats: COMPLETE Column stats: COMPLETE
Stage: Stage-0
Fetch Operator
@@ -1838,11 +1838,11 @@ STAGE PLANS:
outputColumnNames: _col1
input vertices:
1 Map 4
- Statistics: Num rows: 392 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 1564 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col1 (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 392 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 1564 Basic stats: COMPLETE Column stats: COMPLETE
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -1851,7 +1851,7 @@ STAGE PLANS:
1 _col0 (type: int)
input vertices:
0 Map 1
- Statistics: Num rows: 636 Data size: 5088 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 632 Data size: 5056 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
mode: hash
@@ -2007,7 +2007,7 @@ STAGE PLANS:
keys:
0 _col0 (type: int)
1 _col0 (type: int)
- Statistics: Num rows: 636 Data size: 5088 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 632 Data size: 5056 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
mode: hash
@@ -2042,16 +2042,16 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col0 (type: int)
outputColumnNames: _col1
- Statistics: Num rows: 392 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 1564 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col1 (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 392 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 1564 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 392 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 1564 Basic stats: COMPLETE Column stats: COMPLETE
Stage: Stage-0
Fetch Operator
@@ -2149,11 +2149,11 @@ STAGE PLANS:
outputColumnNames: _col1
input vertices:
1 Map 4
- Statistics: Num rows: 392 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 1564 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col1 (type: int)
outputColumnNames: _col0
- Statistics: Num rows: 392 Data size: 1568 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 1564 Basic stats: COMPLETE Column stats: COMPLETE
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -2162,7 +2162,7 @@ STAGE PLANS:
1 _col0 (type: int)
input vertices:
0 Map 1
- Statistics: Num rows: 636 Data size: 5088 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 632 Data size: 5056 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
mode: hash
@@ -2566,11 +2566,11 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col0 (type: int)
outputColumnNames: _col1, _col2
- Statistics: Num rows: 392 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 37145 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col1 (type: int), substr(_col2, 5) (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 392 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 37145 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: sum(_col1)
keys: _col0 (type: int)
@@ -2708,11 +2708,11 @@ STAGE PLANS:
outputColumnNames: _col1, _col2
input vertices:
0 Map 2
- Statistics: Num rows: 392 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 37145 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col1 (type: int), substr(_col2, 5) (type: string)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 392 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 37145 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: sum(_col1)
keys: _col0 (type: int)
@@ -2866,14 +2866,14 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col1 (type: int)
outputColumnNames: _col1, _col2, _col3
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col3 (type: int), _col2 (type: double), _col1 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -2949,14 +2949,14 @@ STAGE PLANS:
outputColumnNames: _col1, _col2, _col3
input vertices:
1 Reducer 3
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col3 (type: int), _col2 (type: double), _col1 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3139,14 +3139,14 @@ STAGE PLANS:
1 _col0 (type: int)
2 _col0 (type: int)
outputColumnNames: _col0, _col1, _col4
- Statistics: Num rows: 621 Data size: 115506 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 619 Data size: 115134 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: string), _col4 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 621 Data size: 115506 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 619 Data size: 115134 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 621 Data size: 115506 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 619 Data size: 115134 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3202,14 +3202,14 @@ STAGE PLANS:
input vertices:
1 Map 2
2 Map 3
- Statistics: Num rows: 621 Data size: 115506 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 619 Data size: 115134 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: string), _col4 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 621 Data size: 115506 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 619 Data size: 115134 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 621 Data size: 115506 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 619 Data size: 115134 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3304,12 +3304,12 @@ STAGE PLANS:
outputColumnNames: _col0, _col1
input vertices:
1 Map 3
- Statistics: Num rows: 392 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 37145 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
sort order: +
Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 392 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 37145 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: string)
Execution mode: llap
LLAP IO: no inputs
@@ -3362,14 +3362,14 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col0 (type: int)
outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 620 Data size: 115320 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 114948 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 620 Data size: 115320 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 114948 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 620 Data size: 115320 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 114948 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3422,7 +3422,7 @@ STAGE PLANS:
outputColumnNames: _col0, _col1
input vertices:
1 Map 2
- Statistics: Num rows: 392 Data size: 37240 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 37145 Basic stats: COMPLETE Column stats: COMPLETE
Map Join Operator
condition map:
Inner Join 0 to 1
@@ -3432,14 +3432,14 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col3
input vertices:
1 Map 3
- Statistics: Num rows: 620 Data size: 115320 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 114948 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 620 Data size: 115320 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 114948 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 620 Data size: 115320 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 618 Data size: 114948 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3586,14 +3586,14 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col0 (type: int)
outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 196 Data size: 20188 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 195 Data size: 20085 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 196 Data size: 20188 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 195 Data size: 20085 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 196 Data size: 20188 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 195 Data size: 20085 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3675,14 +3675,14 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col3
input vertices:
0 Reducer 2
- Statistics: Num rows: 196 Data size: 20188 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 195 Data size: 20085 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 196 Data size: 20188 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 195 Data size: 20085 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 196 Data size: 20188 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 195 Data size: 20085 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3799,14 +3799,14 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col0 (type: int)
outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -3882,14 +3882,14 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col3
input vertices:
0 Reducer 2
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 248 Data size: 25544 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 247 Data size: 25441 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -4178,14 +4178,14 @@ STAGE PLANS:
0 _col0 (type: int)
1 _col0 (type: int)
outputColumnNames: _col0, _col1, _col3
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -4258,14 +4258,14 @@ STAGE PLANS:
outputColumnNames: _col0, _col1, _col3
input vertices:
0 Map 1
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -4293,7 +4293,7 @@ STAGE PLANS:
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
- Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+ Reducer 3 <- Map 1 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
@@ -4309,18 +4309,11 @@ STAGE PLANS:
outputColumnNames: _col0, _col1
Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
- key expressions: _col1 (type: string)
+ key expressions: _col0 (type: int)
sort order: +
- Map-reduce partition columns: _col1 (type: string)
+ Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: int)
- Execution mode: llap
- LLAP IO: no inputs
- Map 4
- Map Operator Tree:
- TableScan
- alias: b
- Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col1 (type: string)
Filter Operator
predicate: value is not null (type: boolean)
Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
@@ -4336,7 +4329,7 @@ STAGE PLANS:
value expressions: _col0 (type: int)
Execution mode: llap
LLAP IO: no inputs
- Map 5
+ Map 4
Map Operator Tree:
TableScan
alias: c
@@ -4362,16 +4355,16 @@ STAGE PLANS:
condition map:
Inner Join 0 to 1
keys:
- 0 _col1 (type: string)
- 1 _col1 (type: string)
- outputColumnNames: _col0, _col2
- Statistics: Num rows: 809 Data size: 6472 Basic stats: COMPLETE Column stats: COMPLETE
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col1
+ Statistics: Num rows: 809 Data size: 76855 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
- key expressions: _col0 (type: int)
+ key expressions: _col1 (type: string)
sort order: +
- Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 809 Data size: 6472 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col2 (type: int)
+ Map-reduce partition columns: _col1 (type: string)
+ Statistics: Num rows: 809 Data size: 76855 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: int)
Reducer 3
Execution mode: llap
Reduce Operator Tree:
@@ -4379,17 +4372,17 @@ STAGE PLANS:
condition map:
Inner Join 0 to 1
keys:
- 0 _col0 (type: int)
- 1 _col0 (type: int)
- outputColumnNames: _col0, _col2
- Statistics: Num rows: 1313 Data size: 10504 Basic stats: COMPLETE Column stats: COMPLETE
+ 0 _col1 (type: string)
+ 1 _col1 (type: string)
+ outputColumnNames: _col0, _col3
+ Statistics: Num rows: 1309 Data size: 10472 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col2 (type: int)
+ expressions: _col0 (type: int), _col3 (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 1313 Data size: 10504 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1309 Data size: 10472 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 1313 Data size: 10504 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1309 Data size: 10472 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -4414,8 +4407,8 @@ STAGE PLANS:
Tez
#### A masked pattern was here ####
Edges:
+ Map 1 <- Map 3 (CUSTOM_EDGE)
Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
- Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
@@ -4430,12 +4423,41 @@ STAGE PLANS:
expressions: key (type: int), value (type: string)
outputColumnNames: _col0, _col1
Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+ Map Join Operator
+ condition map:
+ Inner Join 0 to 1
+ keys:
+ 0 _col0 (type: int)
+ 1 _col0 (type: int)
+ outputColumnNames: _col0, _col1
+ input vertices:
+ 1 Map 3
+ Statistics: Num rows: 809 Data size: 76855 Basic stats: COMPLETE Column stats: COMPLETE
+ Reduce Output Operator
+ key expressions: _col1 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col1 (type: string)
+ Statistics: Num rows: 809 Data size: 76855 Basic stats: COMPLETE Column stats: COMPLETE
+ value expressions: _col0 (type: int)
+ Execution mode: llap
+ LLAP IO: no inputs
+ Map 3
+ Map Operator Tree:
+ TableScan
+ alias: c
+ Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+ Filter Operator
+ predicate: key is not null (type: boolean)
+ Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+ Select Operator
+ expressions: key (type: int)
+ outputColumnNames: _col0
+ Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
- key expressions: _col1 (type: string)
+ key expressions: _col0 (type: int)
sort order: +
- Map-reduce partition columns: _col1 (type: string)
- Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: int)
+ Map-reduce partition columns: _col0 (type: int)
+ Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: llap
LLAP IO: no inputs
Map 4
@@ -4458,25 +4480,6 @@ STAGE PLANS:
value expressions: _col0 (type: int)
Execution mode: llap
LLAP IO: no inputs
- Map 5
- Map Operator Tree:
- TableScan
- alias: c
- Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
- Filter Operator
- predicate: key is not null (type: boolean)
- Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
- Select Operator
- expressions: key (type: int)
- outputColumnNames: _col0
- Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type: int)
- sort order: +
- Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
- Execution mode: llap
- LLAP IO: no inputs
Reducer 2
Execution mode: llap
Reduce Operator Tree:
@@ -4486,32 +4489,15 @@ STAGE PLANS:
keys:
0 _col1 (type: string)
1 _col1 (type: string)
- outputColumnNames: _col0, _col2
- Statistics: Num rows: 809 Data size: 6472 Basic stats: COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- key expressions: _col0 (type: int)
- sort order: +
- Map-reduce partition columns: _col0 (type: int)
- Statistics: Num rows: 809 Data size: 6472 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col2 (type: int)
- Reducer 3
- Execution mode: llap
- Reduce Operator Tree:
- Merge Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: int)
- 1 _col0 (type: int)
- outputColumnNames: _col0, _col2
- Statistics: Num rows: 1313 Data size: 10504 Basic stats: COMPLETE Column stats: COMPLETE
+ outputColumnNames: _col0, _col3
+ Statistics: Num rows: 1309 Data size: 10472 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
- expressions: _col0 (type: int), _col2 (type: int)
+ expressions: _col0 (type: int), _col3 (type: int)
outputColumnNames: _col0, _col1
- Statistics: Num rows: 1313 Data size: 10504 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1309 Data size: 10472 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 1313 Data size: 10504 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 1309 Data size: 10472 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -4608,14 +4594,14 @@ STAGE PLANS:
0 _col0 (type: int), _col2 (type: string)
1 _col0 (type: int), _col2 (type: string)
outputColumnNames: _col0, _col1, _col4
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: string), _col4 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -4712,14 +4698,14 @@ STAGE PLANS:
0 _col0 (type: int), _col2 (type: string)
1 _col0 (type: int), _col2 (type: string)
outputColumnNames: _col0, _col1, _col4
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: int), _col1 (type: string), _col4 (type: string)
outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
- Statistics: Num rows: 392 Data size: 72912 Basic stats: COMPLETE Column stats: COMPLETE
+ Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
[09/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/escape_comments.q.out b/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
index 1b81d7c..43b9399 100644
--- a/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
+++ b/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
@@ -36,13 +36,6 @@ POSTHOOK: Output: database:escape_comments_db
POSTHOOK: Output: escape_comments_db@escape_comments_view1
POSTHOOK: Lineage: escape_comments_view1.col1 SIMPLE [(escape_comments_tbl1)escape_comments_tbl1.FieldSchema(name:col1, type:string, comment:a
b';), ]
-PREHOOK: query: create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: escape_comments_db@escape_comments_tbl1
-POSTHOOK: query: create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
-POSTHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
PREHOOK: query: describe database extended escape_comments_db
PREHOOK: type: DESCDATABASE
PREHOOK: Input: database:escape_comments_db
@@ -169,23 +162,15 @@ Sort Columns: [] NULL
View Original Text: select col1 from escape_comments_tbl1 NULL
View Expanded Text: SELECT `col1` AS `col1` FROM (select `escape_comments_tbl1`.`col1` from `escape_comments_db`.`escape_comments_tbl1`) `escape_comments_db.escape_comments_view1` NULL
View Rewrite Enabled: No NULL
-PREHOOK: query: show formatted index on escape_comments_tbl1
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: show formatted index on escape_comments_tbl1
-POSTHOOK: type: SHOWINDEXES
-idx_name tab_name col_names idx_tab_name idx_type comment
-index2 escape_comments_tbl1 col1 escape_comments_db__escape_comments_tbl1_index2__ compact a\nb
PREHOOK: query: drop database escape_comments_db cascade
PREHOOK: type: DROPDATABASE
PREHOOK: Input: database:escape_comments_db
PREHOOK: Output: database:escape_comments_db
-PREHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
PREHOOK: Output: escape_comments_db@escape_comments_tbl1
PREHOOK: Output: escape_comments_db@escape_comments_view1
POSTHOOK: query: drop database escape_comments_db cascade
POSTHOOK: type: DROPDATABASE
POSTHOOK: Input: database:escape_comments_db
POSTHOOK: Output: database:escape_comments_db
-POSTHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
POSTHOOK: Output: escape_comments_db@escape_comments_tbl1
POSTHOOK: Output: escape_comments_db@escape_comments_view1
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/database_drop.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/database_drop.q.out b/ql/src/test/results/clientpositive/database_drop.q.out
index 9059c38..dac2e97 100644
--- a/ql/src/test/results/clientpositive/database_drop.q.out
+++ b/ql/src/test/results/clientpositive/database_drop.q.out
@@ -45,24 +45,6 @@ POSTHOOK: Output: database:db5
POSTHOOK: Output: db5@temp_tbl_view
POSTHOOK: Lineage: temp_tbl_view.id SIMPLE [(temp_tbl)temp_tbl.FieldSchema(name:id, type:int, comment:null), ]
POSTHOOK: Lineage: temp_tbl_view.name SIMPLE [(temp_tbl)temp_tbl.FieldSchema(name:name, type:string, comment:null), ]
-PREHOOK: query: CREATE INDEX idx1 ON TABLE temp_tbl(id) AS 'COMPACT' with DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: db5@temp_tbl
-POSTHOOK: query: CREATE INDEX idx1 ON TABLE temp_tbl(id) AS 'COMPACT' with DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: db5@temp_tbl
-POSTHOOK: Output: db5@db5__temp_tbl_idx1__
-PREHOOK: query: ALTER INDEX idx1 ON temp_tbl REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: db5@temp_tbl
-PREHOOK: Output: db5@db5__temp_tbl_idx1__
-POSTHOOK: query: ALTER INDEX idx1 ON temp_tbl REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: db5@temp_tbl
-POSTHOOK: Output: db5@db5__temp_tbl_idx1__
-POSTHOOK: Lineage: db5__temp_tbl_idx1__._bucketname SIMPLE [(temp_tbl)temp_tbl.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: db5__temp_tbl_idx1__._offsets EXPRESSION [(temp_tbl)temp_tbl.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: db5__temp_tbl_idx1__.id SIMPLE [(temp_tbl)temp_tbl.FieldSchema(name:id, type:int, comment:null), ]
#### A masked pattern was here ####
PREHOOK: type: CREATETABLE
#### A masked pattern was here ####
@@ -93,25 +75,6 @@ POSTHOOK: Output: database:db5
POSTHOOK: Output: db5@temp_tbl2_view
POSTHOOK: Lineage: temp_tbl2_view.id SIMPLE [(temp_tbl2)temp_tbl2.FieldSchema(name:id, type:int, comment:null), ]
POSTHOOK: Lineage: temp_tbl2_view.name SIMPLE [(temp_tbl2)temp_tbl2.FieldSchema(name:name, type:string, comment:null), ]
-#### A masked pattern was here ####
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: db5@temp_tbl2
-#### A masked pattern was here ####
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: db5@temp_tbl2
-POSTHOOK: Output: db5@db5__temp_tbl2_idx2__
-#### A masked pattern was here ####
-PREHOOK: query: ALTER INDEX idx2 ON temp_tbl2 REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: db5@temp_tbl2
-PREHOOK: Output: db5@db5__temp_tbl2_idx2__
-POSTHOOK: query: ALTER INDEX idx2 ON temp_tbl2 REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: db5@temp_tbl2
-POSTHOOK: Output: db5@db5__temp_tbl2_idx2__
-POSTHOOK: Lineage: db5__temp_tbl2_idx2__._bucketname SIMPLE [(temp_tbl2)temp_tbl2.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: db5__temp_tbl2_idx2__._offsets EXPRESSION [(temp_tbl2)temp_tbl2.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: db5__temp_tbl2_idx2__.id SIMPLE [(temp_tbl2)temp_tbl2.FieldSchema(name:id, type:int, comment:null), ]
PREHOOK: query: CREATE TABLE part_tab (id INT, name STRING) PARTITIONED BY (ds string)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:db5
@@ -138,39 +101,6 @@ POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: db5@part_tab
POSTHOOK: Output: db5@part_tab@ds=2009-04-09
-PREHOOK: query: CREATE INDEX idx3 ON TABLE part_tab(id) AS 'COMPACT' with DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: db5@part_tab
-POSTHOOK: query: CREATE INDEX idx3 ON TABLE part_tab(id) AS 'COMPACT' with DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: db5@part_tab
-POSTHOOK: Output: db5@db5__part_tab_idx3__
-PREHOOK: query: ALTER INDEX idx3 ON part_tab PARTITION (ds='2008-04-09') REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: db5@part_tab
-PREHOOK: Input: db5@part_tab@ds=2008-04-09
-PREHOOK: Output: db5@db5__part_tab_idx3__@ds=2008-04-09
-POSTHOOK: query: ALTER INDEX idx3 ON part_tab PARTITION (ds='2008-04-09') REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: db5@part_tab
-POSTHOOK: Input: db5@part_tab@ds=2008-04-09
-POSTHOOK: Output: db5@db5__part_tab_idx3__@ds=2008-04-09
-POSTHOOK: Lineage: db5__part_tab_idx3__ PARTITION(ds=2008-04-09)._bucketname SIMPLE [(part_tab)part_tab.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: db5__part_tab_idx3__ PARTITION(ds=2008-04-09)._offsets EXPRESSION [(part_tab)part_tab.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: db5__part_tab_idx3__ PARTITION(ds=2008-04-09).id SIMPLE [(part_tab)part_tab.FieldSchema(name:id, type:int, comment:null), ]
-PREHOOK: query: ALTER INDEX idx3 ON part_tab PARTITION (ds='2009-04-09') REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: db5@part_tab
-PREHOOK: Input: db5@part_tab@ds=2009-04-09
-PREHOOK: Output: db5@db5__part_tab_idx3__@ds=2009-04-09
-POSTHOOK: query: ALTER INDEX idx3 ON part_tab PARTITION (ds='2009-04-09') REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: db5@part_tab
-POSTHOOK: Input: db5@part_tab@ds=2009-04-09
-POSTHOOK: Output: db5@db5__part_tab_idx3__@ds=2009-04-09
-POSTHOOK: Lineage: db5__part_tab_idx3__ PARTITION(ds=2009-04-09)._bucketname SIMPLE [(part_tab)part_tab.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: db5__part_tab_idx3__ PARTITION(ds=2009-04-09)._offsets EXPRESSION [(part_tab)part_tab.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: db5__part_tab_idx3__ PARTITION(ds=2009-04-09).id SIMPLE [(part_tab)part_tab.FieldSchema(name:id, type:int, comment:null), ]
PREHOOK: query: CREATE TABLE part_tab2 (id INT, name STRING) PARTITIONED BY (ds string)
#### A masked pattern was here ####
PREHOOK: type: CREATETABLE
@@ -201,43 +131,6 @@ POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: db5@part_tab2
POSTHOOK: Output: db5@part_tab2@ds=2009-04-09
-PREHOOK: query: CREATE INDEX idx4 ON TABLE part_tab2(id) AS 'COMPACT' with DEFERRED REBUILD
-#### A masked pattern was here ####
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: db5@part_tab2
-#### A masked pattern was here ####
-POSTHOOK: query: CREATE INDEX idx4 ON TABLE part_tab2(id) AS 'COMPACT' with DEFERRED REBUILD
-#### A masked pattern was here ####
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: db5@part_tab2
-POSTHOOK: Output: db5@db5__part_tab2_idx4__
-#### A masked pattern was here ####
-PREHOOK: query: ALTER INDEX idx4 ON part_tab2 PARTITION (ds='2008-04-09') REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: db5@part_tab2
-PREHOOK: Input: db5@part_tab2@ds=2008-04-09
-PREHOOK: Output: db5@db5__part_tab2_idx4__@ds=2008-04-09
-POSTHOOK: query: ALTER INDEX idx4 ON part_tab2 PARTITION (ds='2008-04-09') REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: db5@part_tab2
-POSTHOOK: Input: db5@part_tab2@ds=2008-04-09
-POSTHOOK: Output: db5@db5__part_tab2_idx4__@ds=2008-04-09
-POSTHOOK: Lineage: db5__part_tab2_idx4__ PARTITION(ds=2008-04-09)._bucketname SIMPLE [(part_tab2)part_tab2.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: db5__part_tab2_idx4__ PARTITION(ds=2008-04-09)._offsets EXPRESSION [(part_tab2)part_tab2.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: db5__part_tab2_idx4__ PARTITION(ds=2008-04-09).id SIMPLE [(part_tab2)part_tab2.FieldSchema(name:id, type:int, comment:null), ]
-PREHOOK: query: ALTER INDEX idx4 ON part_tab2 PARTITION (ds='2009-04-09') REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: db5@part_tab2
-PREHOOK: Input: db5@part_tab2@ds=2009-04-09
-PREHOOK: Output: db5@db5__part_tab2_idx4__@ds=2009-04-09
-POSTHOOK: query: ALTER INDEX idx4 ON part_tab2 PARTITION (ds='2009-04-09') REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: db5@part_tab2
-POSTHOOK: Input: db5@part_tab2@ds=2009-04-09
-POSTHOOK: Output: db5@db5__part_tab2_idx4__@ds=2009-04-09
-POSTHOOK: Lineage: db5__part_tab2_idx4__ PARTITION(ds=2009-04-09)._bucketname SIMPLE [(part_tab2)part_tab2.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: db5__part_tab2_idx4__ PARTITION(ds=2009-04-09)._offsets EXPRESSION [(part_tab2)part_tab2.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: db5__part_tab2_idx4__ PARTITION(ds=2009-04-09).id SIMPLE [(part_tab2)part_tab2.FieldSchema(name:id, type:int, comment:null), ]
PREHOOK: query: CREATE TABLE part_tab3 (id INT, name STRING) PARTITIONED BY (ds string)
#### A masked pattern was here ####
PREHOOK: type: CREATETABLE
@@ -277,43 +170,6 @@ POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: db5@part_tab3
POSTHOOK: Output: db5@part_tab3@ds=2009-04-09
-PREHOOK: query: CREATE INDEX idx5 ON TABLE part_tab3(id) AS 'COMPACT' with DEFERRED REBUILD
-#### A masked pattern was here ####
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: db5@part_tab3
-#### A masked pattern was here ####
-POSTHOOK: query: CREATE INDEX idx5 ON TABLE part_tab3(id) AS 'COMPACT' with DEFERRED REBUILD
-#### A masked pattern was here ####
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: db5@part_tab3
-POSTHOOK: Output: db5@db5__part_tab3_idx5__
-#### A masked pattern was here ####
-PREHOOK: query: ALTER INDEX idx5 ON part_tab3 PARTITION (ds='2008-04-09') REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: db5@part_tab3
-PREHOOK: Input: db5@part_tab3@ds=2008-04-09
-PREHOOK: Output: db5@db5__part_tab3_idx5__@ds=2008-04-09
-POSTHOOK: query: ALTER INDEX idx5 ON part_tab3 PARTITION (ds='2008-04-09') REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: db5@part_tab3
-POSTHOOK: Input: db5@part_tab3@ds=2008-04-09
-POSTHOOK: Output: db5@db5__part_tab3_idx5__@ds=2008-04-09
-POSTHOOK: Lineage: db5__part_tab3_idx5__ PARTITION(ds=2008-04-09)._bucketname SIMPLE [(part_tab3)part_tab3.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: db5__part_tab3_idx5__ PARTITION(ds=2008-04-09)._offsets EXPRESSION [(part_tab3)part_tab3.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: db5__part_tab3_idx5__ PARTITION(ds=2008-04-09).id SIMPLE [(part_tab3)part_tab3.FieldSchema(name:id, type:int, comment:null), ]
-PREHOOK: query: ALTER INDEX idx5 ON part_tab3 PARTITION (ds='2009-04-09') REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: db5@part_tab3
-PREHOOK: Input: db5@part_tab3@ds=2009-04-09
-PREHOOK: Output: db5@db5__part_tab3_idx5__@ds=2009-04-09
-POSTHOOK: query: ALTER INDEX idx5 ON part_tab3 PARTITION (ds='2009-04-09') REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: db5@part_tab3
-POSTHOOK: Input: db5@part_tab3@ds=2009-04-09
-POSTHOOK: Output: db5@db5__part_tab3_idx5__@ds=2009-04-09
-POSTHOOK: Lineage: db5__part_tab3_idx5__ PARTITION(ds=2009-04-09)._bucketname SIMPLE [(part_tab3)part_tab3.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: db5__part_tab3_idx5__ PARTITION(ds=2009-04-09)._offsets EXPRESSION [(part_tab3)part_tab3.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: db5__part_tab3_idx5__ PARTITION(ds=2009-04-09).id SIMPLE [(part_tab3)part_tab3.FieldSchema(name:id, type:int, comment:null), ]
PREHOOK: query: CREATE EXTERNAL TABLE extab1(id INT, name STRING) ROW FORMAT
DELIMITED FIELDS TERMINATED BY ''
LINES TERMINATED BY '\n'
@@ -348,33 +204,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table te
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: db5@temp_tbl3
-PREHOOK: query: CREATE INDEX temp_tbl3_idx ON TABLE temp_tbl3(id) AS 'COMPACT' with DEFERRED REBUILD IN TABLE temp_tbl3_idx_tbl
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: db5@temp_tbl3
-POSTHOOK: query: CREATE INDEX temp_tbl3_idx ON TABLE temp_tbl3(id) AS 'COMPACT' with DEFERRED REBUILD IN TABLE temp_tbl3_idx_tbl
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: db5@temp_tbl3
-POSTHOOK: Output: db5@temp_tbl3_idx_tbl
-PREHOOK: query: ALTER INDEX temp_tbl3_idx ON temp_tbl3 REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: db5@temp_tbl3
-PREHOOK: Output: db5@temp_tbl3_idx_tbl
-POSTHOOK: query: ALTER INDEX temp_tbl3_idx ON temp_tbl3 REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: db5@temp_tbl3
-POSTHOOK: Output: db5@temp_tbl3_idx_tbl
-POSTHOOK: Lineage: temp_tbl3_idx_tbl._bucketname SIMPLE [(temp_tbl3)temp_tbl3.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: temp_tbl3_idx_tbl._offsets EXPRESSION [(temp_tbl3)temp_tbl3.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: temp_tbl3_idx_tbl.id SIMPLE [(temp_tbl3)temp_tbl3.FieldSchema(name:id, type:int, comment:null), ]
PREHOOK: query: DROP DATABASE db5 CASCADE
PREHOOK: type: DROPDATABASE
PREHOOK: Input: database:db5
PREHOOK: Output: database:db5
-PREHOOK: Output: db5@db5__part_tab2_idx4__
-PREHOOK: Output: db5@db5__part_tab3_idx5__
-PREHOOK: Output: db5@db5__part_tab_idx3__
-PREHOOK: Output: db5@db5__temp_tbl2_idx2__
-PREHOOK: Output: db5@db5__temp_tbl_idx1__
PREHOOK: Output: db5@extab1
PREHOOK: Output: db5@part_tab
PREHOOK: Output: db5@part_tab2
@@ -383,17 +216,11 @@ PREHOOK: Output: db5@temp_tbl
PREHOOK: Output: db5@temp_tbl2
PREHOOK: Output: db5@temp_tbl2_view
PREHOOK: Output: db5@temp_tbl3
-PREHOOK: Output: db5@temp_tbl3_idx_tbl
PREHOOK: Output: db5@temp_tbl_view
POSTHOOK: query: DROP DATABASE db5 CASCADE
POSTHOOK: type: DROPDATABASE
POSTHOOK: Input: database:db5
POSTHOOK: Output: database:db5
-POSTHOOK: Output: db5@db5__part_tab2_idx4__
-POSTHOOK: Output: db5@db5__part_tab3_idx5__
-POSTHOOK: Output: db5@db5__part_tab_idx3__
-POSTHOOK: Output: db5@db5__temp_tbl2_idx2__
-POSTHOOK: Output: db5@db5__temp_tbl_idx1__
POSTHOOK: Output: db5@extab1
POSTHOOK: Output: db5@part_tab
POSTHOOK: Output: db5@part_tab2
@@ -402,6 +229,5 @@ POSTHOOK: Output: db5@temp_tbl
POSTHOOK: Output: db5@temp_tbl2
POSTHOOK: Output: db5@temp_tbl2_view
POSTHOOK: Output: db5@temp_tbl3
-POSTHOOK: Output: db5@temp_tbl3_idx_tbl
POSTHOOK: Output: db5@temp_tbl_view
#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/escape_comments.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/escape_comments.q.out b/ql/src/test/results/clientpositive/escape_comments.q.out
index 6c86e81..15e0bc3 100644
--- a/ql/src/test/results/clientpositive/escape_comments.q.out
+++ b/ql/src/test/results/clientpositive/escape_comments.q.out
@@ -36,13 +36,6 @@ POSTHOOK: Output: database:escape_comments_db
POSTHOOK: Output: escape_comments_db@escape_comments_view1
POSTHOOK: Lineage: escape_comments_view1.col1 SIMPLE [(escape_comments_tbl1)escape_comments_tbl1.FieldSchema(name:col1, type:string, comment:a
b';), ]
-PREHOOK: query: create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: escape_comments_db@escape_comments_tbl1
-POSTHOOK: query: create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb'
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: escape_comments_db@escape_comments_tbl1
-POSTHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
PREHOOK: query: describe database extended escape_comments_db
PREHOOK: type: DESCDATABASE
PREHOOK: Input: database:escape_comments_db
@@ -175,24 +168,15 @@ Sort Columns: []
View Original Text: select col1 from escape_comments_tbl1
View Expanded Text: SELECT `col1` AS `col1` FROM (select `escape_comments_tbl1`.`col1` from `escape_comments_db`.`escape_comments_tbl1`) `escape_comments_db.escape_comments_view1`
View Rewrite Enabled: No
-PREHOOK: query: show formatted index on escape_comments_tbl1
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: show formatted index on escape_comments_tbl1
-POSTHOOK: type: SHOWINDEXES
-idx_name tab_name col_names idx_tab_name idx_type comment
-index2 escape_comments_tbl1 col1 escape_comments_db__escape_comments_tbl1_index2__ compact a
- b
PREHOOK: query: drop database escape_comments_db cascade
PREHOOK: type: DROPDATABASE
PREHOOK: Input: database:escape_comments_db
PREHOOK: Output: database:escape_comments_db
-PREHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
PREHOOK: Output: escape_comments_db@escape_comments_tbl1
PREHOOK: Output: escape_comments_db@escape_comments_view1
POSTHOOK: query: drop database escape_comments_db cascade
POSTHOOK: type: DROPDATABASE
POSTHOOK: Input: database:escape_comments_db
POSTHOOK: Output: database:escape_comments_db
-POSTHOOK: Output: escape_comments_db@escape_comments_db__escape_comments_tbl1_index2__
POSTHOOK: Output: escape_comments_db@escape_comments_tbl1
POSTHOOK: Output: escape_comments_db@escape_comments_view1
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auth.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auth.q.out b/ql/src/test/results/clientpositive/index_auth.q.out
deleted file mode 100644
index 385b639..0000000
--- a/ql/src/test/results/clientpositive/index_auth.q.out
+++ /dev/null
@@ -1,79 +0,0 @@
-PREHOOK: query: create table foobar(key int, value string) PARTITIONED BY (ds string, hr string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@foobar
-POSTHOOK: query: create table foobar(key int, value string) PARTITIONED BY (ds string, hr string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@foobar
-PREHOOK: query: alter table foobar add partition (ds='2008-04-08',hr='12')
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Output: default@foobar
-POSTHOOK: query: alter table foobar add partition (ds='2008-04-08',hr='12')
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Output: default@foobar
-POSTHOOK: Output: default@foobar@ds=2008-04-08/hr=12
-PREHOOK: query: CREATE INDEX srcpart_AUTH_index ON TABLE foobar(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@foobar
-POSTHOOK: query: CREATE INDEX srcpart_AUTH_index ON TABLE foobar(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@foobar
-POSTHOOK: Output: default@default__foobar_srcpart_auth_index__
-PREHOOK: query: SHOW INDEXES ON foobar
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: SHOW INDEXES ON foobar
-POSTHOOK: type: SHOWINDEXES
-srcpart_auth_index foobar key default__foobar_srcpart_auth_index__ bitmap
-PREHOOK: query: grant select on table foobar to user hive_test_user
-PREHOOK: type: GRANT_PRIVILEGE
-PREHOOK: Output: default@foobar
-POSTHOOK: query: grant select on table foobar to user hive_test_user
-POSTHOOK: type: GRANT_PRIVILEGE
-POSTHOOK: Output: default@foobar
-PREHOOK: query: grant select on table default__foobar_srcpart_auth_indeX__ to user hive_test_user
-PREHOOK: type: GRANT_PRIVILEGE
-PREHOOK: Output: default@default__foobar_srcpart_auth_index__
-POSTHOOK: query: grant select on table default__foobar_srcpart_auth_indeX__ to user hive_test_user
-POSTHOOK: type: GRANT_PRIVILEGE
-POSTHOOK: Output: default@default__foobar_srcpart_auth_index__
-PREHOOK: query: grant update on table default__foobar_srcpart_auth_indEx__ to user hive_test_user
-PREHOOK: type: GRANT_PRIVILEGE
-PREHOOK: Output: default@default__foobar_srcpart_auth_index__
-POSTHOOK: query: grant update on table default__foobar_srcpart_auth_indEx__ to user hive_test_user
-POSTHOOK: type: GRANT_PRIVILEGE
-POSTHOOK: Output: default@default__foobar_srcpart_auth_index__
-PREHOOK: query: grant create on table default__foobar_srcpart_auth_inDex__ to user hive_test_user
-PREHOOK: type: GRANT_PRIVILEGE
-PREHOOK: Output: default@default__foobar_srcpart_auth_index__
-POSTHOOK: query: grant create on table default__foobar_srcpart_auth_inDex__ to user hive_test_user
-POSTHOOK: type: GRANT_PRIVILEGE
-POSTHOOK: Output: default@default__foobar_srcpart_auth_index__
-PREHOOK: query: ALTER INDEX srcpart_auth_INDEX ON foobar PARTITION (ds='2008-04-08',hr='12') REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@foobar
-PREHOOK: Input: default@foobar@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__foobar_srcpart_auth_index__@ds=2008-04-08/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_auth_INDEX ON foobar PARTITION (ds='2008-04-08',hr='12') REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@foobar
-POSTHOOK: Input: default@foobar@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__foobar_srcpart_auth_index__@ds=2008-04-08/hr=12
-POSTHOOK: Lineage: default__foobar_srcpart_auth_index__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(foobar)foobar.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__foobar_srcpart_auth_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(foobar)foobar.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__foobar_srcpart_auth_index__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(foobar)foobar.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__foobar_srcpart_auth_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(foobar)foobar.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: DROP INDEX srcpart_auth_index on foobar
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@foobar
-POSTHOOK: query: DROP INDEX srcpart_auth_index on foobar
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@foobar
-PREHOOK: query: DROP TABLE foobar
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@foobar
-PREHOOK: Output: default@foobar
-POSTHOOK: query: DROP TABLE foobar
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@foobar
-POSTHOOK: Output: default@foobar
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auto.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto.q.out b/ql/src/test/results/clientpositive/index_auto.q.out
deleted file mode 100644
index 654e419..0000000
--- a/ql/src/test/results/clientpositive/index_auto.q.out
+++ /dev/null
@@ -1,255 +0,0 @@
-PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82 val_82
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-85 val_85
-86 val_86
-87 val_87
-90 val_90
-90 val_90
-90 val_90
-92 val_92
-95 val_95
-95 val_95
-96 val_96
-97 val_97
-97 val_97
-98 val_98
-98 val_98
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82 val_82
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-85 val_85
-86 val_86
-87 val_87
-90 val_90
-90 val_90
-90 val_90
-92 val_92
-95 val_95
-95 val_95
-96 val_96
-97 val_97
-97 val_97
-98 val_98
-98 val_98
-PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6
- Stage-5
- Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
- Stage-1 depends on stages: Stage-2
- Stage-4
- Stage-6
- Stage-7 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src_index__
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Conditional Operator
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82 val_82
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-85 val_85
-86 val_86
-87 val_87
-90 val_90
-90 val_90
-90 val_90
-92 val_92
-95 val_95
-95 val_95
-96 val_96
-97 val_97
-97 val_97
-98 val_98
-98 val_98
-PREHOOK: query: DROP INDEX src_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auto_empty.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_empty.q.out b/ql/src/test/results/clientpositive/index_auto_empty.q.out
deleted file mode 100644
index 0191339..0000000
--- a/ql/src/test/results/clientpositive/index_auto_empty.q.out
+++ /dev/null
@@ -1,101 +0,0 @@
-PREHOOK: query: CREATE DATABASE it
-PREHOOK: type: CREATEDATABASE
-PREHOOK: Output: database:it
-POSTHOOK: query: CREATE DATABASE it
-POSTHOOK: type: CREATEDATABASE
-POSTHOOK: Output: database:it
-PREHOOK: query: CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:it
-PREHOOK: Output: it@temp
-POSTHOOK: query: CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:it
-POSTHOOK: Output: it@temp
-PREHOOK: query: CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: it@temp
-POSTHOOK: query: CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: it@temp
-POSTHOOK: Output: it@it__temp_temp_index__
-PREHOOK: query: ALTER INDEX temp_index ON it.temp REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: it@temp
-PREHOOK: Output: it@it__temp_temp_index__
-POSTHOOK: query: ALTER INDEX temp_index ON it.temp REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: it@temp
-POSTHOOK: Output: it@it__temp_temp_index__
-POSTHOOK: Lineage: it__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: it__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: it__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ]
-PREHOOK: query: SELECT * FROM it.it__temp_temp_index__ WHERE key = 86
-PREHOOK: type: QUERY
-PREHOOK: Input: it@it__temp_temp_index__
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM it.it__temp_temp_index__ WHERE key = 86
-POSTHOOK: type: QUERY
-POSTHOOK: Input: it@it__temp_temp_index__
-#### A masked pattern was here ####
-PREHOOK: query: EXPLAIN SELECT * FROM it.temp WHERE key = 86
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM it.temp WHERE key = 86
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: temp
- filterExpr: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: key (type: string), val (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT * FROM it.temp WHERE key = 86
-PREHOOK: type: QUERY
-PREHOOK: Input: it@temp
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM it.temp WHERE key = 86
-POSTHOOK: type: QUERY
-POSTHOOK: Input: it@temp
-#### A masked pattern was here ####
-PREHOOK: query: DROP table it.temp
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: it@temp
-PREHOOK: Output: it@temp
-POSTHOOK: query: DROP table it.temp
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: it@temp
-POSTHOOK: Output: it@temp
-PREHOOK: query: DROP DATABASE it
-PREHOOK: type: DROPDATABASE
-PREHOOK: Input: database:it
-PREHOOK: Output: database:it
-POSTHOOK: query: DROP DATABASE it
-POSTHOOK: type: DROPDATABASE
-POSTHOOK: Input: database:it
-POSTHOOK: Output: database:it
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auto_file_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_file_format.q.out b/ql/src/test/results/clientpositive/index_auto_file_format.q.out
deleted file mode 100644
index 21c8085..0000000
--- a/ql/src/test/results/clientpositive/index_auto_file_format.q.out
+++ /dev/null
@@ -1,256 +0,0 @@
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6
- Stage-5
- Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
- Stage-1 depends on stages: Stage-2
- Stage-4
- Stage-6
- Stage-7 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src_index__
- filterExpr: (UDFToDouble(key) = 86.0) (type: boolean)
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Conditional Operator
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- filterExpr: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT key, value FROM src WHERE key=86
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key=86
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-86 val_86
-PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6
- Stage-5
- Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
- Stage-1 depends on stages: Stage-2
- Stage-4
- Stage-6
- Stage-7 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src_index__
- filterExpr: (UDFToDouble(key) = 86.0) (type: boolean)
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Conditional Operator
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- filterExpr: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT key, value FROM src WHERE key=86
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key=86
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-86 val_86
-PREHOOK: query: DROP INDEX src_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out b/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
deleted file mode 100644
index d970b25..0000000
--- a/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
+++ /dev/null
@@ -1,438 +0,0 @@
-PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: a
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string)
- TableScan
- alias: b
- Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string)
- outputColumnNames: _col0
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Reduce Operator Tree:
- Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: string)
- 1 _col0 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-82 val_82
-82 val_82
-82 val_82
-82 val_82
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-85 val_85
-85 val_85
-85 val_85
-85 val_85
-86 val_86
-86 val_86
-86 val_86
-86 val_86
-87 val_87
-87 val_87
-87 val_87
-87 val_87
-PREHOOK: query: CREATE INDEX src_index_bitmap ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index_bitmap ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_bitmap__
-PREHOOK: query: ALTER INDEX src_index_bitmap ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index_bitmap__
-POSTHOOK: query: ALTER INDEX src_index_bitmap ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_bitmap__
-POSTHOOK: Lineage: default__src_src_index_bitmap__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index_bitmap__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index_bitmap__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index_bitmap__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: CREATE INDEX srcpart_index_bitmap ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: CREATE INDEX srcpart_index_bitmap ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Output: default@default__srcpart_srcpart_index_bitmap__
-PREHOOK: query: ALTER INDEX srcpart_index_bitmap ON srcpart REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_index_bitmap ON srcpart REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_bitmap__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-4 is a root stage
- Stage-3 depends on stages: Stage-4
- Stage-1 depends on stages: Stage-3, Stage-5
- Stage-6 is a root stage
- Stage-5 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src_index_bitmap__
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offset (type: bigint)
- outputColumnNames: _bucketname, _offset
- Group By Operator
- aggregations: collect_set(_offset)
- keys: _bucketname (type: string)
- mode: hash
- outputColumnNames: _col0, _col1
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- value expressions: _col1 (type: array<bigint>)
- Reduce Operator Tree:
- Group By Operator
- aggregations: collect_set(VALUE._col0)
- keys: KEY._col0 (type: string)
- mode: mergepartial
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-3
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: a
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string)
- TableScan
- alias: b
- filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string)
- outputColumnNames: _col0
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Reduce Operator Tree:
- Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: string)
- 1 _col0 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__srcpart_srcpart_index_bitmap__
- filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offset (type: bigint)
- outputColumnNames: _bucketname, _offset
- Group By Operator
- aggregations: collect_set(_offset)
- keys: _bucketname (type: string)
- mode: hash
- outputColumnNames: _col0, _col1
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- value expressions: _col1 (type: array<bigint>)
- Reduce Operator Tree:
- Group By Operator
- aggregations: collect_set(VALUE._col0)
- keys: KEY._col0 (type: string)
- mode: mergepartial
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index_bitmap__
-PREHOOK: Input: default@default__srcpart_srcpart_index_bitmap__
-PREHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=12
-PREHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=12
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index_bitmap__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_bitmap__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_bitmap__@ds=2008-04-09/hr=12
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-82 val_82
-82 val_82
-82 val_82
-82 val_82
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-85 val_85
-85 val_85
-85 val_85
-85 val_85
-86 val_86
-86 val_86
-86 val_86
-86 val_86
-87 val_87
-87 val_87
-87 val_87
-87 val_87
-PREHOOK: query: DROP INDEX src_index_bitmap on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index_bitmap on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: DROP INDEX srcpart_index_bitmap on srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX srcpart_index_bitmap on srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
[08/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out b/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
deleted file mode 100644
index 65eb0de..0000000
--- a/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
+++ /dev/null
@@ -1,485 +0,0 @@
-PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: a
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string)
- TableScan
- alias: b
- Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string)
- outputColumnNames: _col0
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Reduce Operator Tree:
- Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: string)
- 1 _col0 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-82 val_82
-82 val_82
-82 val_82
-82 val_82
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-85 val_85
-85 val_85
-85 val_85
-85 val_85
-86 val_86
-86 val_86
-86 val_86
-86 val_86
-87 val_87
-87 val_87
-87 val_87
-87 val_87
-PREHOOK: query: CREATE INDEX src_index_compact ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index_compact ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_compact__
-PREHOOK: query: ALTER INDEX src_index_compact ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index_compact__
-POSTHOOK: query: ALTER INDEX src_index_compact ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_compact__
-POSTHOOK: Lineage: default__src_src_index_compact__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index_compact__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index_compact__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: CREATE INDEX srcpart_index_compact ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: CREATE INDEX srcpart_index_compact ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Output: default@default__srcpart_srcpart_index_compact__
-PREHOOK: query: ALTER INDEX srcpart_index_compact ON srcpart REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_index_compact ON srcpart REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-4 is a root stage
- Stage-9 depends on stages: Stage-4 , consists of Stage-6, Stage-5, Stage-7
- Stage-6
- Stage-3 depends on stages: Stage-6, Stage-5, Stage-8
- Stage-1 depends on stages: Stage-3, Stage-10
- Stage-5
- Stage-7
- Stage-8 depends on stages: Stage-7
- Stage-11 is a root stage
- Stage-16 depends on stages: Stage-11 , consists of Stage-13, Stage-12, Stage-14
- Stage-13
- Stage-10 depends on stages: Stage-13, Stage-12, Stage-15
- Stage-12
- Stage-14
- Stage-15 depends on stages: Stage-14
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src_index_compact__
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-9
- Conditional Operator
-
- Stage: Stage-6
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-3
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: a
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: string)
- TableScan
- alias: b
- filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string)
- outputColumnNames: _col0
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
- Reduce Operator Tree:
- Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: string)
- 1 _col0 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-11
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__srcpart_srcpart_index_compact__
- filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-16
- Conditional Operator
-
- Stage: Stage-13
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-10
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-12
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-14
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-15
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index_compact__
-PREHOOK: Input: default@default__srcpart_srcpart_index_compact__
-PREHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=12
-PREHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=12
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index_compact__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_compact__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_compact__@ds=2008-04-09/hr=12
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-82 val_82
-82 val_82
-82 val_82
-82 val_82
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-84 val_84
-85 val_85
-85 val_85
-85 val_85
-85 val_85
-86 val_86
-86 val_86
-86 val_86
-86 val_86
-87 val_87
-87 val_87
-87 val_87
-87 val_87
-PREHOOK: query: DROP INDEX src_index_compact on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index_compact on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: DROP INDEX srcpart_index_compact on srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX srcpart_index_compact on srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auto_multiple.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_multiple.q.out b/ql/src/test/results/clientpositive/index_auto_multiple.q.out
deleted file mode 100644
index dfc2f34..0000000
--- a/ql/src/test/results/clientpositive/index_auto_multiple.q.out
+++ /dev/null
@@ -1,164 +0,0 @@
-PREHOOK: query: CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_key_index__
-PREHOOK: query: CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_val_index__
-PREHOOK: query: ALTER INDEX src_key_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_key_index__
-POSTHOOK: query: ALTER INDEX src_key_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_key_index__
-POSTHOOK: Lineage: default__src_src_key_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_key_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_key_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: ALTER INDEX src_val_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_val_index__
-POSTHOOK: query: ALTER INDEX src_val_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_val_index__
-POSTHOOK: Lineage: default__src_src_val_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_val_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_val_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key=86
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6
- Stage-5
- Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
- Stage-1 depends on stages: Stage-2
- Stage-4
- Stage-6
- Stage-7 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src_key_index__
- filterExpr: (UDFToDouble(key) = 86.0) (type: boolean)
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Conditional Operator
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- filterExpr: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT key, value FROM src WHERE key=86
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_key_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key=86
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_key_index__
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-86 val_86
-PREHOOK: query: DROP INDEX src_key_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_key_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: DROP INDEX src_val_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_val_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auto_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_partitioned.q.out b/ql/src/test/results/clientpositive/index_auto_partitioned.q.out
deleted file mode 100644
index 8c2d6e4..0000000
--- a/ql/src/test/results/clientpositive/index_auto_partitioned.q.out
+++ /dev/null
@@ -1,172 +0,0 @@
-PREHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Output: default@default__srcpart_src_part_index__
-PREHOOK: query: ALTER INDEX src_part_index ON srcpart REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX src_part_index ON srcpart REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6
- Stage-5
- Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
- Stage-1 depends on stages: Stage-2
- Stage-4
- Stage-6
- Stage-7 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__srcpart_src_part_index__
- filterExpr: ((UDFToDouble(key) = 86.0) and (ds = '2008-04-09')) (type: boolean)
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Conditional Operator
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: srcpart
- filterExpr: ((UDFToDouble(key) = 86.0) and (ds = '2008-04-09')) (type: boolean)
- Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_src_part_index__
-PREHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_src_part_index__
-POSTHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-86 val_86
-86 val_86
-PREHOOK: query: DROP INDEX src_part_index ON srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX src_part_index ON srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auto_self_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_self_join.q.out b/ql/src/test/results/clientpositive/index_auto_self_join.q.out
deleted file mode 100644
index 08c851b..0000000
--- a/ql/src/test/results/clientpositive/index_auto_self_join.q.out
+++ /dev/null
@@ -1,295 +0,0 @@
-PREHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: a
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0) and value is not null) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col1 (type: string)
- sort order: +
- Map-reduce partition columns: _col1 (type: string)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string)
- TableScan
- alias: b
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 70.0) and value is not null) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col1 (type: string)
- sort order: +
- Map-reduce partition columns: _col1 (type: string)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string)
- Reduce Operator Tree:
- Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col1 (type: string)
- 1 _col1 (type: string)
- outputColumnNames: _col0, _col2
- Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), _col2 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82 82
-83 83
-83 83
-83 83
-83 83
-84 84
-84 84
-84 84
-84 84
-85 85
-86 86
-87 87
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-4 is a root stage
- Stage-3 depends on stages: Stage-4
- Stage-1 depends on stages: Stage-3, Stage-5
- Stage-6 is a root stage
- Stage-5 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src_index__
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offset (type: bigint)
- outputColumnNames: _bucketname, _offset
- Group By Operator
- aggregations: collect_set(_offset)
- keys: _bucketname (type: string)
- mode: hash
- outputColumnNames: _col0, _col1
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- value expressions: _col1 (type: array<bigint>)
- Reduce Operator Tree:
- Group By Operator
- aggregations: collect_set(VALUE._col0)
- keys: KEY._col0 (type: string)
- mode: mergepartial
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-3
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: a
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and value is not null) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0) and value is not null) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col1 (type: string)
- sort order: +
- Map-reduce partition columns: _col1 (type: string)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string)
- TableScan
- alias: b
- filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and value is not null) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 70.0) and value is not null) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col1 (type: string)
- sort order: +
- Map-reduce partition columns: _col1 (type: string)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string)
- Reduce Operator Tree:
- Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col1 (type: string)
- 1 _col1 (type: string)
- outputColumnNames: _col0, _col2
- Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), _col2 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src_index__
- filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 70.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offset (type: bigint)
- outputColumnNames: _bucketname, _offset
- Group By Operator
- aggregations: collect_set(_offset)
- keys: _bucketname (type: string)
- mode: hash
- outputColumnNames: _col0, _col1
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- value expressions: _col1 (type: array<bigint>)
- Reduce Operator Tree:
- Group By Operator
- aggregations: collect_set(VALUE._col0)
- keys: KEY._col0 (type: string)
- mode: mergepartial
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82 82
-83 83
-83 83
-83 83
-83 83
-84 84
-84 84
-84 84
-84 84
-85 85
-86 86
-87 87
-PREHOOK: query: DROP INDEX src_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auto_unused.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_unused.q.out b/ql/src/test/results/clientpositive/index_auto_unused.q.out
deleted file mode 100644
index a960d96..0000000
--- a/ql/src/test/results/clientpositive/index_auto_unused.q.out
+++ /dev/null
@@ -1,388 +0,0 @@
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82 val_82
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-85 val_85
-86 val_86
-87 val_87
-90 val_90
-90 val_90
-90 val_90
-92 val_92
-95 val_95
-95 val_95
-96 val_96
-97 val_97
-97 val_97
-98 val_98
-98 val_98
-PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82 val_82
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-85 val_85
-86 val_86
-87 val_87
-90 val_90
-90 val_90
-90 val_90
-92 val_92
-95 val_95
-95 val_95
-96 val_96
-97 val_97
-97 val_97
-98 val_98
-98 val_98
-PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- filterExpr: ((UDFToDouble(key) < 10.0) or (UDFToDouble(key) > 480.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 10.0) or (UDFToDouble(key) > 480.0)) (type: boolean)
- Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT * FROM src WHERE key < 10 OR key > 480
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM src WHERE key < 10 OR key > 480
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-2 val_2
-4 val_4
-481 val_481
-482 val_482
-483 val_483
-484 val_484
-485 val_485
-487 val_487
-489 val_489
-489 val_489
-489 val_489
-489 val_489
-490 val_490
-491 val_491
-492 val_492
-492 val_492
-493 val_493
-494 val_494
-495 val_495
-496 val_496
-497 val_497
-498 val_498
-498 val_498
-498 val_498
-5 val_5
-5 val_5
-5 val_5
-8 val_8
-9 val_9
-PREHOOK: query: DROP INDEX src_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_val_index__
-PREHOOK: query: ALTER INDEX src_val_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_val_index__
-POSTHOOK: query: ALTER INDEX src_val_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_val_index__
-POSTHOOK: Lineage: default__src_src_val_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_val_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_val_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82 val_82
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-85 val_85
-86 val_86
-87 val_87
-90 val_90
-90 val_90
-90 val_90
-92 val_92
-95 val_95
-95 val_95
-96 val_96
-97 val_97
-97 val_97
-98 val_98
-98 val_98
-PREHOOK: query: DROP INDEX src_val_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_val_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Output: default@default__srcpart_src_part_index__
-PREHOOK: query: ALTER INDEX src_part_index ON srcpart PARTITION (ds='2008-04-08', hr=11) REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11
-POSTHOOK: query: ALTER INDEX src_part_index ON srcpart PARTITION (ds='2008-04-08', hr=11) REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- TableScan
- alias: srcpart
- filterExpr: ((ds = '2008-04-09') and (12.0 = 12.0) and (UDFToDouble(key) < 10.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (UDFToDouble(key) < 10.0) (type: boolean)
- Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string), '2008-04-09' (type: string), hr (type: string)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
- ListSink
-
-PREHOOK: query: SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-0 val_0 2008-04-09 12
-0 val_0 2008-04-09 12
-0 val_0 2008-04-09 12
-2 val_2 2008-04-09 12
-4 val_4 2008-04-09 12
-5 val_5 2008-04-09 12
-5 val_5 2008-04-09 12
-5 val_5 2008-04-09 12
-8 val_8 2008-04-09 12
-9 val_9 2008-04-09 12
-PREHOOK: query: DROP INDEX src_part_index on srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX src_part_index on srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_auto_update.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_update.q.out b/ql/src/test/results/clientpositive/index_auto_update.q.out
deleted file mode 100644
index e48b657..0000000
--- a/ql/src/test/results/clientpositive/index_auto_update.q.out
+++ /dev/null
@@ -1,353 +0,0 @@
-PREHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@temp
-POSTHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@temp
-PREHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@temp
-POSTHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@temp
-POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@temp
-POSTHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@temp
-POSTHOOK: Output: default@default__temp_temp_index__
-PREHOOK: query: ALTER INDEX temp_index ON temp REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@temp
-PREHOOK: Output: default@default__temp_temp_index__
-POSTHOOK: query: ALTER INDEX temp_index ON temp REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@temp
-POSTHOOK: Output: default@default__temp_temp_index__
-POSTHOOK: Lineage: default__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-6 is a root stage
- Stage-12 depends on stages: Stage-6 , consists of Stage-9, Stage-8, Stage-10
- Stage-9
- Stage-0 depends on stages: Stage-9, Stage-8, Stage-11
- Stage-2 depends on stages: Stage-0
- Stage-1 depends on stages: Stage-2
- Stage-3 depends on stages: Stage-1, Stage-4, Stage-5
- Stage-4 depends on stages: Stage-2
- Stage-7 depends on stages: Stage-0, Stage-4, Stage-5
- Stage-5 depends on stages: Stage-2
- Stage-8
- Stage-10
- Stage-11 depends on stages: Stage-10
-
-STAGE PLANS:
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.temp
- Select Operator
- expressions: _col0 (type: string), _col1 (type: string)
- outputColumnNames: key, val
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Group By Operator
- aggregations: compute_stats(key, 'hll'), compute_stats(val, 'hll')
- mode: hash
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
- Reduce Operator Tree:
- Group By Operator
- aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
- mode: mergepartial
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 880 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-12
- Conditional Operator
-
- Stage: Stage-9
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Move Operator
- tables:
- replace: true
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.temp
-
- Stage: Stage-2
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: temp
- Select Operator
- expressions: key (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string)
- outputColumnNames: key, BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME
- Group By Operator
- aggregations: collect_set(BLOCK__OFFSET__INSIDE__FILE)
- keys: key (type: string), INPUT__FILE__NAME (type: string)
- mode: hash
- outputColumnNames: _col0, _col1, _col2
- Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: string)
- sort order: ++
- Map-reduce partition columns: _col0 (type: string)
- value expressions: _col2 (type: array<bigint>)
- Reduce Operator Tree:
- Group By Operator
- aggregations: collect_set(VALUE._col0)
- keys: KEY._col0 (type: string), KEY._col1 (type: string)
- mode: mergepartial
- outputColumnNames: _col0, _col1, _col2
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.default__temp_temp_index__
-
- Stage: Stage-1
- Move Operator
- tables:
- replace: true
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.default__temp_temp_index__
-
- Stage: Stage-3
- Stats Work
- Basic Stats Work:
-
- Stage: Stage-4
-
- Stage: Stage-7
- Stats Work
- Basic Stats Work:
- Column Stats Desc:
- Columns: key, val
- Column Types: string, string
- Table: default.temp
-
- Stage: Stage-5
-
- Stage: Stage-8
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.temp
-
- Stage: Stage-10
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- name: default.temp
-
- Stage: Stage-11
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@temp
-PREHOOK: Output: default@temp
-POSTHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@temp
-POSTHOOK: Output: default@temp
-POSTHOOK: Lineage: default__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ]
-POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6
- Stage-5
- Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
- Stage-1 depends on stages: Stage-2
- Stage-4
- Stage-6
- Stage-7 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__temp_temp_index__
- filterExpr: (UDFToDouble(key) = 86.0) (type: boolean)
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Conditional Operator
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: temp
- filterExpr: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), val (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT * FROM temp WHERE key = 86
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__temp_temp_index__
-PREHOOK: Input: default@temp
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM temp WHERE key = 86
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__temp_temp_index__
-POSTHOOK: Input: default@temp
-#### A masked pattern was here ####
-86 val_86
-PREHOOK: query: drop index temp_index on temp
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@temp
-POSTHOOK: query: drop index temp_index on temp
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@temp
-PREHOOK: query: DROP table temp
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@temp
-PREHOOK: Output: default@temp
-POSTHOOK: query: DROP table temp
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@temp
-POSTHOOK: Output: default@temp
[07/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_bitmap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap.q.out b/ql/src/test/results/clientpositive/index_bitmap.q.out
deleted file mode 100644
index 5017027..0000000
--- a/ql/src/test/results/clientpositive/index_bitmap.q.out
+++ /dev/null
@@ -1,291 +0,0 @@
-PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
-PREHOOK: query: EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__
-PREHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname`
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname`
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname`
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname`
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
-PREHOOK: query: EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__
-PREHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__
-WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__
-WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_bitmap1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap1.q.out b/ql/src/test/results/clientpositive/index_bitmap1.q.out
deleted file mode 100644
index 8f3af66..0000000
--- a/ql/src/test/results/clientpositive/index_bitmap1.q.out
+++ /dev/null
@@ -1,75 +0,0 @@
-PREHOOK: query: EXPLAIN
-CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: SELECT x.* FROM default__src_src_index__ x
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__src_src_index__ x
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__src_src_index__ WHERE NOT
-EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__src_src_index__ WHERE NOT
-EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM src WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM src WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX src_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_bitmap2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap2.q.out b/ql/src/test/results/clientpositive/index_bitmap2.q.out
deleted file mode 100644
index 716e3c8..0000000
--- a/ql/src/test/results/clientpositive/index_bitmap2.q.out
+++ /dev/null
@@ -1,138 +0,0 @@
-PREHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-PREHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src2_index__
-PREHOOK: query: ALTER INDEX src1_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src1_index__
-POSTHOOK: query: ALTER INDEX src1_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-POSTHOOK: Lineage: default__src_src1_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: ALTER INDEX src2_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src2_index__
-POSTHOOK: query: ALTER INDEX src2_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src2_index__
-POSTHOOK: Lineage: default__src_src2_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: SELECT * FROM default__src_src1_index__
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src1_index__
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM default__src_src1_index__
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src1_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT * FROM default__src_src2_index__
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM default__src_src2_index__
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-SELECT t.bucketname as `_bucketname`, COLLECT_SET(t.offset) AS `_offsets` FROM
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset
- FROM default__src_src1_index__
- WHERE key = 0 AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`) UNION ALL
- SELECT `_bucketname` AS bucketname, `_offset` AS offset
- FROM default__src_src2_index__
- WHERE value = "val2" AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`)) t
-GROUP BY t.bucketname
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src1_index__
-PREHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-SELECT t.bucketname as `_bucketname`, COLLECT_SET(t.offset) AS `_offsets` FROM
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset
- FROM default__src_src1_index__
- WHERE key = 0 AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`) UNION ALL
- SELECT `_bucketname` AS bucketname, `_offset` AS offset
- FROM default__src_src2_index__
- WHERE value = "val2" AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`)) t
-GROUP BY t.bucketname
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src1_index__
-POSTHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 OR value = "val_2"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 OR value = "val_2"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-2 val_2
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 OR value = "val_2"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 OR value = "val_2"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-2 val_2
-PREHOOK: query: DROP INDEX src1_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src1_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: DROP INDEX src2_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src2_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
[12/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
deleted file mode 100644
index 658422c..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
+++ /dev/null
@@ -1,325 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.index;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-
-import org.apache.hadoop.hive.ql.optimizer.FieldNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.exec.ColumnInfo;
-import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
-import org.apache.hadoop.hive.ql.exec.GroupByOperator;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.OperatorUtils;
-import org.apache.hadoop.hive.ql.exec.RowSchema;
-import org.apache.hadoop.hive.ql.exec.SelectOperator;
-import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.optimizer.ColumnPrunerProcFactory;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.AggregationDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.GroupByDesc;
-import org.apache.hadoop.hive.ql.plan.TableScanDesc;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.StructField;
-import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-
-/**
- * RewriteQueryUsingAggregateIndexCtx class stores the
- * context for the {@link RewriteQueryUsingAggregateIndex}
- * used to rewrite operator plan with index table instead of base table.
- */
-
-public final class RewriteQueryUsingAggregateIndexCtx implements NodeProcessorCtx {
- private static final Logger LOG = LoggerFactory.getLogger(RewriteQueryUsingAggregateIndexCtx.class.getName());
- private RewriteQueryUsingAggregateIndexCtx(ParseContext parseContext, Hive hiveDb,
- RewriteCanApplyCtx canApplyCtx) {
- this.parseContext = parseContext;
- this.hiveDb = hiveDb;
- this.canApplyCtx = canApplyCtx;
- this.indexTableName = canApplyCtx.getIndexTableName();
- this.alias = canApplyCtx.getAlias();
- this.aggregateFunction = canApplyCtx.getAggFunction();
- this.indexKey = canApplyCtx.getIndexKey();
- }
-
- public static RewriteQueryUsingAggregateIndexCtx getInstance(ParseContext parseContext,
- Hive hiveDb, RewriteCanApplyCtx canApplyCtx) {
- return new RewriteQueryUsingAggregateIndexCtx(
- parseContext, hiveDb, canApplyCtx);
- }
-
- // Assumes one instance of this + single-threaded compilation for each query.
- private final Hive hiveDb;
- private final ParseContext parseContext;
- private final RewriteCanApplyCtx canApplyCtx;
- //We need the GenericUDAFEvaluator for GenericUDAF function "sum"
- private GenericUDAFEvaluator eval = null;
- private final String indexTableName;
- private final String alias;
- private final String aggregateFunction;
- private ExprNodeColumnDesc aggrExprNode = null;
- private String indexKey;
-
- public ParseContext getParseContext() {
- return parseContext;
- }
-
- public Hive getHiveDb() {
- return hiveDb;
- }
-
- public String getIndexName() {
- return indexTableName;
- }
-
- public GenericUDAFEvaluator getEval() {
- return eval;
- }
-
- public void setEval(GenericUDAFEvaluator eval) {
- this.eval = eval;
- }
-
- public void setAggrExprNode(ExprNodeColumnDesc aggrExprNode) {
- this.aggrExprNode = aggrExprNode;
- }
-
- public ExprNodeColumnDesc getAggrExprNode() {
- return aggrExprNode;
- }
-
- public String getAlias() {
- return alias;
- }
-
- public String getAggregateFunction() {
- return aggregateFunction;
- }
-
- public String getIndexKey() {
- return indexKey;
- }
-
- public void setIndexKey(String indexKey) {
- this.indexKey = indexKey;
- }
-
- public void invokeRewriteQueryProc() throws SemanticException {
- this.replaceTableScanProcess(canApplyCtx.getTableScanOperator());
- //We need aggrExprNode. Thus, replaceGroupByOperatorProcess should come before replaceSelectOperatorProcess
- for (int index = 0; index < canApplyCtx.getGroupByOperators().size(); index++) {
- this.replaceGroupByOperatorProcess(canApplyCtx.getGroupByOperators().get(index), index);
- }
- for (SelectOperator selectperator : canApplyCtx.getSelectOperators()) {
- this.replaceSelectOperatorProcess(selectperator);
- }
- }
-
- /**
- * This method replaces the original TableScanOperator with the new
- * TableScanOperator and metadata that scans over the index table rather than
- * scanning over the original table.
- *
- */
- private void replaceTableScanProcess(TableScanOperator scanOperator) throws SemanticException {
- RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = this;
- String alias = rewriteQueryCtx.getAlias();
-
- // Need to remove the original TableScanOperators from these data structures
- // and add new ones
- HashMap<String, TableScanOperator> topOps = rewriteQueryCtx.getParseContext()
- .getTopOps();
-
- // remove original TableScanOperator
- topOps.remove(alias);
-
- String indexTableName = rewriteQueryCtx.getIndexName();
- Table indexTableHandle = null;
- try {
- indexTableHandle = rewriteQueryCtx.getHiveDb().getTable(indexTableName);
- } catch (HiveException e) {
- LOG.error("Error while getting the table handle for index table.");
- LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
- throw new SemanticException(e.getMessage(), e);
- }
-
- // construct a new descriptor for the index table scan
- TableScanDesc indexTableScanDesc = new TableScanDesc(indexTableHandle);
- indexTableScanDesc.setGatherStats(false);
-
- String k = org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(indexTableName) + Path.SEPARATOR;
- indexTableScanDesc.setStatsAggPrefix(k);
- scanOperator.setConf(indexTableScanDesc);
-
- // Construct the new RowResolver for the new TableScanOperator
- ArrayList<ColumnInfo> sigRS = new ArrayList<ColumnInfo>();
- try {
- StructObjectInspector rowObjectInspector = (StructObjectInspector) indexTableHandle
- .getDeserializer().getObjectInspector();
- StructField field = rowObjectInspector.getStructFieldRef(rewriteQueryCtx.getIndexKey());
- sigRS.add(new ColumnInfo(field.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(
- field.getFieldObjectInspector()), indexTableName, false));
- } catch (SerDeException e) {
- LOG.error("Error while creating the RowResolver for new TableScanOperator.");
- LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
- throw new SemanticException(e.getMessage(), e);
- }
- RowSchema rs = new RowSchema(sigRS);
-
- // Set row resolver for new table
- String newAlias = indexTableName;
- int index = alias.lastIndexOf(":");
- if (index >= 0) {
- newAlias = alias.substring(0, index) + ":" + indexTableName;
- }
-
- // Scan operator now points to other table
- scanOperator.getConf().setAlias(newAlias);
- scanOperator.setAlias(indexTableName);
- topOps.put(newAlias, scanOperator);
- rewriteQueryCtx.getParseContext().setTopOps(topOps);
-
- ColumnPrunerProcFactory.setupNeededColumns(scanOperator, rs,
- Arrays.asList(new FieldNode(rewriteQueryCtx.getIndexKey())));
- }
-
- /**
- * This method replaces the original SelectOperator with the new
- * SelectOperator with a new column indexed_key_column.
- */
- private void replaceSelectOperatorProcess(SelectOperator operator) throws SemanticException {
- RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = this;
- // we need to set the colList, outputColumnNames, colExprMap,
- // rowSchema for only that SelectOperator which precedes the GroupByOperator
- // count(indexed_key_column) needs to be replaced by
- // sum(`_count_of_indexed_key_column`)
- List<ExprNodeDesc> selColList = operator.getConf().getColList();
- selColList.add(rewriteQueryCtx.getAggrExprNode());
-
- List<String> selOutputColNames = operator.getConf().getOutputColumnNames();
- selOutputColNames.add(rewriteQueryCtx.getAggrExprNode().getColumn());
-
- operator.getColumnExprMap().put(rewriteQueryCtx.getAggrExprNode().getColumn(),
- rewriteQueryCtx.getAggrExprNode());
-
- RowSchema selRS = operator.getSchema();
- List<ColumnInfo> selRSSignature = selRS.getSignature();
- // Need to create a new type for Column[_count_of_indexed_key_column] node
- PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo("bigint");
- pti.setTypeName("bigint");
- ColumnInfo newCI = new ColumnInfo(rewriteQueryCtx.getAggregateFunction(), pti, "", false);
- selRSSignature.add(newCI);
- selRS.setSignature((ArrayList<ColumnInfo>) selRSSignature);
- operator.setSchema(selRS);
- }
-
- /**
- * We need to replace the count(indexed_column_key) GenericUDAF aggregation
- * function for group-by construct to "sum" GenericUDAF. This method creates a
- * new operator tree for a sample query that creates a GroupByOperator with
- * sum aggregation function and uses that GroupByOperator information to
- * replace the original GroupByOperator aggregation information. It replaces
- * the AggregationDesc (aggregation descriptor) of the old GroupByOperator
- * with the new Aggregation Desc of the new GroupByOperator.
- * @return
- */
- private void replaceGroupByOperatorProcess(GroupByOperator operator, int index)
- throws SemanticException {
- RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx = this;
-
- // We need to replace the GroupByOperator which is before RS
- if (index == 0) {
- // the query contains the sum aggregation GenericUDAF
- String selReplacementCommand = "select sum(`" + rewriteQueryCtx.getAggregateFunction() + "`)"
- + " from `" + rewriteQueryCtx.getIndexName() + "` group by "
- + rewriteQueryCtx.getIndexKey() + " ";
- // retrieve the operator tree for the query, and the required GroupByOperator from it
- Operator<?> newOperatorTree = RewriteParseContextGenerator.generateOperatorTree(
- rewriteQueryCtx.getParseContext().getQueryState(),
- selReplacementCommand);
-
- // we get our new GroupByOperator here
- GroupByOperator newGbyOperator = OperatorUtils.findLastOperatorUpstream(
- newOperatorTree, GroupByOperator.class);
- if (newGbyOperator == null) {
- throw new SemanticException("Error replacing GroupBy operator.");
- }
-
- // we need this information to set the correct colList, outputColumnNames
- // in SelectOperator
- ExprNodeColumnDesc aggrExprNode = null;
-
- // Construct the new AggregationDesc to get rid of the current
- // internal names and replace them with new internal names
- // as required by the operator tree
- GroupByDesc newConf = newGbyOperator.getConf();
- List<AggregationDesc> newAggrList = newConf.getAggregators();
- if (newAggrList != null && newAggrList.size() > 0) {
- for (AggregationDesc aggregationDesc : newAggrList) {
- rewriteQueryCtx.setEval(aggregationDesc.getGenericUDAFEvaluator());
- aggrExprNode = (ExprNodeColumnDesc) aggregationDesc.getParameters().get(0);
- rewriteQueryCtx.setAggrExprNode(aggrExprNode);
- }
- }
-
- // Now the GroupByOperator has the new AggregationList;
- // sum(`_count_of_indexed_key`)
- // instead of count(indexed_key)
- GroupByDesc oldConf = operator.getConf();
- oldConf.setAggregators((ArrayList<AggregationDesc>) newAggrList);
- operator.setConf(oldConf);
-
- } else {
- // we just need to reset the GenericUDAFEvaluator and its name for this
- // GroupByOperator whose parent is the ReduceSinkOperator
- GroupByDesc childConf = operator.getConf();
- List<AggregationDesc> childAggrList = childConf.getAggregators();
- if (childAggrList != null && childAggrList.size() > 0) {
- for (AggregationDesc aggregationDesc : childAggrList) {
- List<ExprNodeDesc> paraList = aggregationDesc.getParameters();
- List<ObjectInspector> parametersOIList = new ArrayList<ObjectInspector>();
- for (ExprNodeDesc expr : paraList) {
- parametersOIList.add(expr.getWritableObjectInspector());
- }
- GenericUDAFEvaluator evaluator = FunctionRegistry.getGenericUDAFEvaluator("sum",
- parametersOIList, false, false);
- aggregationDesc.setGenericUDAFEvaluator(evaluator);
- aggregationDesc.setGenericUDAFName("sum");
- }
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java
deleted file mode 100644
index d204fe8..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/IndexWhereResolver.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.physical;
-
-import java.util.ArrayList;
-
-import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
-import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.optimizer.physical.index.IndexWhereTaskDispatcher;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-
-public class IndexWhereResolver implements PhysicalPlanResolver {
-
- @Override
- public PhysicalContext resolve(PhysicalContext physicalContext) throws SemanticException {
- Dispatcher dispatcher = new IndexWhereTaskDispatcher(physicalContext);
- GraphWalker opGraphWalker = new DefaultGraphWalker(dispatcher);
- ArrayList<Node> topNodes = new ArrayList<Node>();
- topNodes.addAll(physicalContext.getRootTasks());
- opGraphWalker.startWalking(topNodes, null);
-
- return physicalContext;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
index a64a498..0f3c5f2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
@@ -60,9 +60,6 @@ public class PhysicalOptimizer {
}
}
- if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER)) {
- resolvers.add(new IndexWhereResolver());
- }
resolvers.add(new MapJoinResolver());
if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES)) {
resolvers.add(new MetadataOnlyOptimizer());
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcCtx.java
deleted file mode 100644
index 179d4c2..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcCtx.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.physical.index;
-
-import java.io.Serializable;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-
-public class IndexWhereProcCtx implements NodeProcessorCtx {
-
- private static final Logger LOG = LoggerFactory.getLogger(IndexWhereProcCtx.class.getName());
-
- private final Task<? extends Serializable> currentTask;
- private final ParseContext parseCtx;
-
- public IndexWhereProcCtx(Task<? extends Serializable> task, ParseContext parseCtx) {
- this.currentTask = task;
- this.parseCtx = parseCtx;
- }
-
- public ParseContext getParseContext() {
- return parseCtx;
- }
-
- public Task<? extends Serializable> getCurrentTask() {
- return currentTask;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java
deleted file mode 100644
index b284afa..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereProcessor.java
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.physical.index;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Stack;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
-import org.apache.hadoop.hive.ql.index.HiveIndexQueryContext;
-import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.MapWork;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.ql.plan.TableScanDesc;
-
-/**
-*
-* IndexWhereProcessor.
-* Processes Operator Nodes to look for WHERE queries with a predicate column
-* on which we have an index. Creates an index subquery Task for these
-* WHERE queries to use the index automatically.
-*/
-public class IndexWhereProcessor implements NodeProcessor {
-
- private static final Logger LOG = LoggerFactory.getLogger(IndexWhereProcessor.class.getName());
- private final Map<TableScanOperator, List<Index>> tsToIndices;
-
- public IndexWhereProcessor(Map<TableScanOperator, List<Index>> tsToIndices) {
- super();
- this.tsToIndices = tsToIndices;
- }
-
- @Override
- /**
- * Process a node of the operator tree. This matches on the rule in IndexWhereTaskDispatcher
- */
- public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
- Object... nodeOutputs) throws SemanticException {
-
- TableScanOperator operator = (TableScanOperator) nd;
- List<Node> opChildren = operator.getChildren();
- TableScanDesc operatorDesc = operator.getConf();
- if (operatorDesc == null || !tsToIndices.containsKey(operator)) {
- return null;
- }
- List<Index> indexes = tsToIndices.get(operator);
-
- ExprNodeDesc predicate = operatorDesc.getFilterExpr();
-
- IndexWhereProcCtx context = (IndexWhereProcCtx) procCtx;
- ParseContext pctx = context.getParseContext();
- LOG.info("Processing predicate for index optimization");
-
- if (predicate == null) {
- LOG.info("null predicate pushed down");
- return null;
- }
- LOG.info(predicate.getExprString());
-
- // check if we have tsToIndices on all partitions in this table scan
- Set<Partition> queryPartitions;
- try {
- queryPartitions = IndexUtils.checkPartitionsCoveredByIndex(operator, pctx, indexes);
- if (queryPartitions == null) { // partitions not covered
- return null;
- }
- } catch (HiveException e) {
- LOG.error("Fatal Error: problem accessing metastore", e);
- throw new SemanticException(e);
- }
-
- // we can only process MapReduce tasks to check input size
- if (!context.getCurrentTask().isMapRedTask()) {
- return null;
- }
- MapRedTask currentTask = (MapRedTask) context.getCurrentTask();
-
- // get potential reentrant index queries from each index
- Map<Index, HiveIndexQueryContext> queryContexts = new HashMap<Index, HiveIndexQueryContext>();
- // make sure we have an index on the table being scanned
- TableDesc tblDesc = operator.getTableDescSkewJoin();
-
- Map<String, List<Index>> indexesByType = new HashMap<String, List<Index>>();
- for (Index indexOnTable : indexes) {
- if (indexesByType.get(indexOnTable.getIndexHandlerClass()) == null) {
- List<Index> newType = new ArrayList<Index>();
- newType.add(indexOnTable);
- indexesByType.put(indexOnTable.getIndexHandlerClass(), newType);
- } else {
- indexesByType.get(indexOnTable.getIndexHandlerClass()).add(indexOnTable);
- }
- }
-
- // choose index type with most tsToIndices of the same type on the table
- // TODO HIVE-2130 This would be a good place for some sort of cost based choice?
- List<Index> bestIndexes = indexesByType.values().iterator().next();
- for (List<Index> indexTypes : indexesByType.values()) {
- if (bestIndexes.size() < indexTypes.size()) {
- bestIndexes = indexTypes;
- }
- }
-
- // rewrite index queries for the chosen index type
- HiveIndexQueryContext tmpQueryContext = new HiveIndexQueryContext();
- tmpQueryContext.setQueryPartitions(queryPartitions);
- rewriteForIndexes(predicate, bestIndexes, pctx, currentTask, tmpQueryContext);
- List<Task<?>> indexTasks = tmpQueryContext.getQueryTasks();
-
- if (indexTasks != null && indexTasks.size() > 0) {
- queryContexts.put(bestIndexes.get(0), tmpQueryContext);
- }
- // choose an index rewrite to use
- if (queryContexts.size() > 0) {
- // TODO HIVE-2130 This would be a good place for some sort of cost based choice?
- Index chosenIndex = queryContexts.keySet().iterator().next();
-
- // modify the parse context to use indexing
- // we need to delay this until we choose one index so that we don't attempt to modify pctx multiple times
- HiveIndexQueryContext queryContext = queryContexts.get(chosenIndex);
-
- // prepare the map reduce job to use indexing
- MapWork work = currentTask.getWork().getMapWork();
- work.setInputformat(queryContext.getIndexInputFormat());
- work.addIndexIntermediateFile(queryContext.getIndexIntermediateFile());
- // modify inputs based on index query
- Set<ReadEntity> inputs = pctx.getSemanticInputs();
- inputs.addAll(queryContext.getAdditionalSemanticInputs());
- List<Task<?>> chosenRewrite = queryContext.getQueryTasks();
-
- // add dependencies so index query runs first
- insertIndexQuery(pctx, context, chosenRewrite);
- }
-
- return null;
- }
-
- /**
- * Get a list of Tasks to activate use of tsToIndices.
- * Generate the tasks for the index query (where we store results of
- * querying the index in a tmp file) inside the IndexHandler
- * @param predicate Predicate of query to rewrite
- * @param index Index to use for rewrite
- * @param pctx
- * @param task original task before rewrite
- * @param queryContext stores return values
- */
- private void rewriteForIndexes(ExprNodeDesc predicate, List<Index> indexes,
- ParseContext pctx, Task<MapredWork> task,
- HiveIndexQueryContext queryContext)
- throws SemanticException {
- HiveIndexHandler indexHandler;
- // All tsToIndices in the list are of the same type, and therefore can use the
- // same handler to generate the index query tasks
- Index index = indexes.get(0);
- try {
- indexHandler = HiveUtils.getIndexHandler(pctx.getConf(), index.getIndexHandlerClass());
- } catch (HiveException e) {
- LOG.error("Exception while loading IndexHandler: " + index.getIndexHandlerClass(), e);
- throw new SemanticException("Failed to load indexHandler: " + index.getIndexHandlerClass(), e);
- }
-
- // check the size
- try {
- ContentSummary inputSummary = Utilities.getInputSummary(pctx.getContext(), task.getWork().getMapWork(), null);
- long inputSize = inputSummary.getLength();
- if (!indexHandler.checkQuerySize(inputSize, pctx.getConf())) {
- queryContext.setQueryTasks(null);
- return;
- }
- } catch (IOException e) {
- throw new SemanticException("Failed to get task size", e);
- }
-
- // use the IndexHandler to generate the index query
- indexHandler.generateIndexQuery(indexes, predicate, pctx, queryContext);
- // TODO HIVE-2115 use queryContext.residualPredicate to process residual predicate
-
- return;
- }
-
-
- /**
- * Insert the rewrite tasks at the head of the pctx task tree
- * @param pctx
- * @param context
- * @param chosenRewrite
- */
- private void insertIndexQuery(ParseContext pctx, IndexWhereProcCtx context, List<Task<?>> chosenRewrite) {
- Task<?> wholeTableScan = context.getCurrentTask();
- LinkedHashSet<Task<?>> rewriteLeaves = new LinkedHashSet<Task<?>>();
- findLeaves(chosenRewrite, rewriteLeaves);
-
- for (Task<?> leaf : rewriteLeaves) {
- leaf.addDependentTask(wholeTableScan); // add full scan task as child for every index query task
- }
-
- // replace the original with the index sub-query as a root task
- pctx.replaceRootTask(wholeTableScan, chosenRewrite);
- }
-
- /**
- * Find the leaves of the task tree
- */
- private void findLeaves(List<Task<?>> tasks, Set<Task<?>> leaves) {
- for (Task<?> t : tasks) {
- if (t.getDependentTasks() == null) {
- leaves.add(t);
- } else {
- findLeaves(t.getDependentTasks(), leaves);
- }
- }
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java
deleted file mode 100644
index c9dae8f..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/index/IndexWhereTaskDispatcher.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.physical.index;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Stack;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.cache.CacheUtils;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapIndexHandler;
-import org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler;
-import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
-import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
-import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
-import org.apache.hadoop.hive.ql.lib.RuleRegExp;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
-import org.apache.hadoop.hive.ql.optimizer.physical.PhysicalContext;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hive.common.util.HiveStringUtils;
-
-/**
- *
- * IndexWhereTaskDispatcher. Walks a Task tree, and for the right kind of Task,
- * walks the operator tree to create an index subquery. Then attaches the
- * subquery task to the task tree.
- *
- */
-public class IndexWhereTaskDispatcher implements Dispatcher {
-
- private final PhysicalContext physicalContext;
- // To store table to index mapping
- private final Map<String, List<Index>> indexMap;
- private final List<String> supportedIndexes;
-
- public IndexWhereTaskDispatcher(PhysicalContext context) {
- super();
- physicalContext = context;
- indexMap = Maps.newHashMap();
- supportedIndexes = new ArrayList<String>();
- supportedIndexes.add(CompactIndexHandler.class.getName());
- supportedIndexes.add(BitmapIndexHandler.class.getName());
- }
-
- @Override
- public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs)
- throws SemanticException {
-
- Task<? extends Serializable> task = (Task<? extends Serializable>) nd;
-
- ParseContext pctx = physicalContext.getParseContext();
-
- // create the regex's so the walker can recognize our WHERE queries
- Map<Rule, NodeProcessor> operatorRules = createOperatorRules(pctx);
-
- // check for no indexes on any table
- if (operatorRules == null) {
- return null;
- }
-
- // create context so the walker can carry the current task with it.
- IndexWhereProcCtx indexWhereOptimizeCtx = new IndexWhereProcCtx(task, pctx);
-
- // create the dispatcher, which fires the processor according to the rule that
- // best matches
- Dispatcher dispatcher = new DefaultRuleDispatcher(getDefaultProcessor(),
- operatorRules,
- indexWhereOptimizeCtx);
-
- // walk the mapper operator(not task) tree for each specific task
- GraphWalker ogw = new DefaultGraphWalker(dispatcher);
- ArrayList<Node> topNodes = new ArrayList<Node>();
- if (task.getWork() instanceof MapredWork) {
- topNodes.addAll(((MapredWork)task.getWork()).getMapWork().getAliasToWork().values());
- } else {
- return null;
- }
- ogw.startWalking(topNodes, null);
-
- return null;
- }
-
- private List<Index> getIndex(Table table) throws SemanticException {
- String indexCacheKey = CacheUtils.buildKey(
- HiveStringUtils.normalizeIdentifier(table.getDbName()),
- HiveStringUtils.normalizeIdentifier(table.getTableName()));
- List<Index>indexList = indexMap.get(indexCacheKey);
- if (indexList == null) {
- indexList = IndexUtils.getIndexes(table, supportedIndexes);
- if (indexList == null) {
- indexList = Collections.emptyList();
- }
- indexMap.put(indexCacheKey, indexList);
- }
- return indexList;
- }
-
- /**
- * Create a set of rules that only matches WHERE predicates on columns we have
- * an index on.
- * @return
- */
- private Map<Rule, NodeProcessor> createOperatorRules(ParseContext pctx) throws SemanticException {
- Map<Rule, NodeProcessor> operatorRules = new LinkedHashMap<Rule, NodeProcessor>();
-
- // query the metastore to know what columns we have indexed
- Map<TableScanOperator, List<Index>> indexes = new HashMap<TableScanOperator, List<Index>>();
- for (Operator<? extends OperatorDesc> op : pctx.getTopOps().values()) {
- if (op instanceof TableScanOperator) {
- List<Index> tblIndexes = getIndex(((TableScanOperator) op).getConf().getTableMetadata());
- if (tblIndexes.size() > 0) {
- indexes.put((TableScanOperator) op, tblIndexes);
- }
- }
- }
-
- // quit if our tables don't have any indexes
- if (indexes.size() == 0) {
- return null;
- }
-
- // We set the pushed predicate from the WHERE clause as the filter expr on
- // all table scan operators, so we look for table scan operators(TS%)
- operatorRules.put(new RuleRegExp("RULEWhere", TableScanOperator.getOperatorName() + "%"),
- new IndexWhereProcessor(indexes));
-
- return operatorRules;
- }
-
-
- private NodeProcessor getDefaultProcessor() {
- return new NodeProcessor() {
- @Override
- public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
- Object... nodeOutputs) throws SemanticException {
- return null;
- }
- };
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 4338fa6..41d878f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -43,7 +43,6 @@ import org.antlr.runtime.tree.CommonTree;
import org.antlr.runtime.tree.Tree;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -52,9 +51,7 @@ import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
@@ -82,9 +79,6 @@ import org.apache.hadoop.hive.ql.hooks.Entity.Type;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType;
-import org.apache.hadoop.hive.ql.index.HiveIndex;
-import org.apache.hadoop.hive.ql.index.HiveIndex.IndexType;
-import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
@@ -106,8 +100,6 @@ import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc;
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc.OnePartitionDesc;
import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
-import org.apache.hadoop.hive.ql.plan.AlterIndexDesc.AlterIndexTypes;
import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc;
import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc.AlterMaterializedViewTypes;
import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc;
@@ -121,7 +113,6 @@ import org.apache.hadoop.hive.ql.plan.BasicStatsWork;
import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc;
import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc;
import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
@@ -132,7 +123,6 @@ import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
import org.apache.hadoop.hive.ql.plan.DescTableDesc;
import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.DropIndexDesc;
import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.DropTableDesc;
import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc;
@@ -162,7 +152,6 @@ import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc;
import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
import org.apache.hadoop.hive.ql.plan.ShowGrantDesc;
-import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc;
import org.apache.hadoop.hive.ql.plan.ShowLocksDesc;
import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc;
@@ -176,7 +165,6 @@ import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.TruncateTableDesc;
import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
-import org.apache.hadoop.hive.ql.session.LineageState;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.apache.hadoop.hive.serde.serdeConstants;
@@ -373,12 +361,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
case HiveParser.TOK_TRUNCATETABLE:
analyzeTruncateTable(ast);
break;
- case HiveParser.TOK_CREATEINDEX:
- analyzeCreateIndex(ast);
- break;
- case HiveParser.TOK_DROPINDEX:
- analyzeDropIndex(ast);
- break;
case HiveParser.TOK_DESCTABLE:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeDescribeTable(ast);
@@ -485,12 +467,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
}
break;
}
- case HiveParser.TOK_ALTERINDEX_REBUILD:
- analyzeAlterIndexRebuild(ast);
- break;
- case HiveParser.TOK_ALTERINDEX_PROPERTIES:
- analyzeAlterIndexProps(ast);
- break;
case HiveParser.TOK_SHOWPARTITIONS:
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowPartitions(ast);
@@ -503,10 +479,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
ctx.setResFile(ctx.getLocalTmpPath());
analyzeShowCreateTable(ast);
break;
- case HiveParser.TOK_SHOWINDEXES:
- ctx.setResFile(ctx.getLocalTmpPath());
- analyzeShowIndexes(ast);
- break;
case HiveParser.TOK_LOCKTABLE:
analyzeLockTable(ast);
break;
@@ -1486,11 +1458,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
try {
columnNames = getColumnNames((ASTNode)ast.getChild(1));
- // Throw an error if the table is indexed
- List<Index> indexes = db.getIndexes(table.getDbName(), tableName, (short)1);
- if (indexes != null && indexes.size() > 0) {
- throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
- }
// It would be possible to support this, but this is such a pointless command.
if (AcidUtils.isInsertOnlyTable(table.getParameters())) {
throw new SemanticException("Truncating MM table columns not presently supported");
@@ -1634,235 +1601,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
return true;
}
- private void analyzeCreateIndex(ASTNode ast) throws SemanticException {
- String indexName = unescapeIdentifier(ast.getChild(0).getText());
- String typeName = unescapeSQLString(ast.getChild(1).getText());
- String[] qTabName = getQualifiedTableName((ASTNode) ast.getChild(2));
- List<String> indexedCols = getColumnNames((ASTNode) ast.getChild(3));
-
- IndexType indexType = HiveIndex.getIndexType(typeName);
- if (indexType != null) {
- typeName = indexType.getHandlerClsName();
- } else {
- try {
- JavaUtils.loadClass(typeName);
- } catch (Exception e) {
- throw new SemanticException("class name provided for index handler not found.", e);
- }
- }
-
- String indexTableName = null;
- boolean deferredRebuild = false;
- String location = null;
- Map<String, String> tblProps = null;
- Map<String, String> idxProps = null;
- String indexComment = null;
-
- RowFormatParams rowFormatParams = new RowFormatParams();
- StorageFormat storageFormat = new StorageFormat(conf);
-
- for (int idx = 4; idx < ast.getChildCount(); idx++) {
- ASTNode child = (ASTNode) ast.getChild(idx);
- if (storageFormat.fillStorageFormat(child)) {
- continue;
- }
- switch (child.getToken().getType()) {
- case HiveParser.TOK_TABLEROWFORMAT:
- rowFormatParams.analyzeRowFormat(child);
- break;
- case HiveParser.TOK_CREATEINDEX_INDEXTBLNAME:
- ASTNode ch = (ASTNode) child.getChild(0);
- indexTableName = getUnescapedName(ch);
- break;
- case HiveParser.TOK_DEFERRED_REBUILDINDEX:
- deferredRebuild = true;
- break;
- case HiveParser.TOK_TABLELOCATION:
- location = unescapeSQLString(child.getChild(0).getText());
- addLocationToOutputs(location);
- break;
- case HiveParser.TOK_TABLEPROPERTIES:
- tblProps = DDLSemanticAnalyzer.getProps((ASTNode) child.getChild(0));
- break;
- case HiveParser.TOK_INDEXPROPERTIES:
- idxProps = DDLSemanticAnalyzer.getProps((ASTNode) child.getChild(0));
- break;
- case HiveParser.TOK_TABLESERIALIZER:
- child = (ASTNode) child.getChild(0);
- storageFormat.setSerde(unescapeSQLString(child.getChild(0).getText()));
- if (child.getChildCount() == 2) {
- readProps((ASTNode) (child.getChild(1).getChild(0)),
- storageFormat.getSerdeProps());
- }
- break;
- case HiveParser.TOK_INDEXCOMMENT:
- child = (ASTNode) child.getChild(0);
- indexComment = unescapeSQLString(child.getText());
- }
- }
-
- storageFormat.fillDefaultStorageFormat(false, false);
- if (indexTableName == null) {
- indexTableName = MetaStoreUtils.getIndexTableName(qTabName[0], qTabName[1], indexName);
- indexTableName = qTabName[0] + "." + indexTableName; // on same database with base table
- } else {
- indexTableName = getDotName(Utilities.getDbTableName(indexTableName));
- }
- inputs.add(new ReadEntity(getTable(qTabName)));
-
- CreateIndexDesc crtIndexDesc = new CreateIndexDesc(getDotName(qTabName), indexName,
- indexedCols, indexTableName, deferredRebuild, storageFormat.getInputFormat(),
- storageFormat.getOutputFormat(),
- storageFormat.getStorageHandler(), typeName, location, idxProps, tblProps,
- storageFormat.getSerde(), storageFormat.getSerdeProps(), rowFormatParams.collItemDelim,
- rowFormatParams.fieldDelim, rowFormatParams.fieldEscape,
- rowFormatParams.lineDelim, rowFormatParams.mapKeyDelim, indexComment);
- Task<?> createIndex =
- TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtIndexDesc), conf);
- rootTasks.add(createIndex);
- }
-
- private void analyzeDropIndex(ASTNode ast) throws SemanticException {
- String indexName = unescapeIdentifier(ast.getChild(0).getText());
- String tableName = getUnescapedName((ASTNode) ast.getChild(1));
- boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null);
- // we want to signal an error if the index doesn't exist and we're
- // configured not to ignore this
- boolean throwException =
- !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
- Table tbl = getTable(tableName, false);
- if (throwException && tbl == null) {
- throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
- }
- try {
- Index idx = db.getIndex(tableName, indexName);
- } catch (HiveException e) {
- if (!(e.getCause() instanceof NoSuchObjectException)) {
- throw new SemanticException(ErrorMsg.CANNOT_DROP_INDEX.getMsg("dropping index"), e);
- }
- if (throwException) {
- throw new SemanticException(ErrorMsg.INVALID_INDEX.getMsg(indexName));
- }
- }
- if (tbl != null) {
- inputs.add(new ReadEntity(tbl));
- }
-
- DropIndexDesc dropIdxDesc = new DropIndexDesc(indexName, tableName, throwException);
- rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
- dropIdxDesc), conf));
- }
-
- private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException {
- String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
- String indexName = unescapeIdentifier(ast.getChild(1).getText());
- HashMap<String, String> partSpec = null;
- Tree part = ast.getChild(2);
- if (part != null) {
- partSpec = getValidatedPartSpec(getTable(qualified), (ASTNode)part, conf, false);
- }
- List<Task<?>> indexBuilder = getIndexBuilderMapRed(qualified, indexName, partSpec);
- rootTasks.addAll(indexBuilder);
-
- // Handle updating index timestamps
- AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.UPDATETIMESTAMP);
- alterIdxDesc.setIndexName(indexName);
- alterIdxDesc.setBaseTableName(getDotName(qualified));
- alterIdxDesc.setSpec(partSpec);
-
- Task<?> tsTask = TaskFactory.get(new DDLWork(alterIdxDesc), conf);
- for (Task<?> t : indexBuilder) {
- t.addDependentTask(tsTask);
- }
- }
-
- private void analyzeAlterIndexProps(ASTNode ast)
- throws SemanticException {
-
- String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0));
- String indexName = unescapeIdentifier(ast.getChild(1).getText());
- HashMap<String, String> mapProp = getProps((ASTNode) (ast.getChild(2))
- .getChild(0));
-
- AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.ADDPROPS);
- alterIdxDesc.setProps(mapProp);
- alterIdxDesc.setIndexName(indexName);
- alterIdxDesc.setBaseTableName(getDotName(qualified));
-
- rootTasks.add(TaskFactory.get(new DDLWork(alterIdxDesc), conf));
- }
-
- private List<Task<?>> getIndexBuilderMapRed(String[] names, String indexName,
- HashMap<String, String> partSpec) throws SemanticException {
- try {
- Index index = db.getIndex(names[0], names[1], indexName);
- Table indexTbl = null;
- String indexTableName = index.getIndexTableName();
- if (indexTableName != null) {
- indexTbl = getTable(Utilities.getDbTableName(index.getDbName(), indexTableName));
- }
- Table baseTbl = getTable(new String[] {index.getDbName(), index.getOrigTableName()});
-
- String handlerCls = index.getIndexHandlerClass();
- HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, handlerCls);
-
- List<Partition> indexTblPartitions = null;
- List<Partition> baseTblPartitions = null;
- if (indexTbl != null) {
- indexTblPartitions = new ArrayList<Partition>();
- baseTblPartitions = preparePartitions(baseTbl, partSpec,
- indexTbl, db, indexTblPartitions);
- }
-
- LineageState lineageState = queryState.getLineageState();
- List<Task<?>> ret = handler.generateIndexBuildTaskList(baseTbl,
- index, indexTblPartitions, baseTblPartitions, indexTbl, getInputs(), getOutputs(),
- lineageState);
- return ret;
- } catch (Exception e) {
- throw new SemanticException(e);
- }
- }
-
- private List<Partition> preparePartitions(
- org.apache.hadoop.hive.ql.metadata.Table baseTbl,
- HashMap<String, String> partSpec,
- org.apache.hadoop.hive.ql.metadata.Table indexTbl, Hive db,
- List<Partition> indexTblPartitions)
- throws HiveException, MetaException {
- List<Partition> baseTblPartitions = new ArrayList<Partition>();
- if (partSpec != null) {
- // if partspec is specified, then only producing index for that
- // partition
- Partition part = db.getPartition(baseTbl, partSpec, false);
- if (part == null) {
- throw new HiveException("Partition "
- + Warehouse.makePartName(partSpec, false)
- + " does not exist in table "
- + baseTbl.getTableName());
- }
- baseTblPartitions.add(part);
- Partition indexPart = db.getPartition(indexTbl, partSpec, false);
- if (indexPart == null) {
- indexPart = db.createPartition(indexTbl, partSpec);
- }
- indexTblPartitions.add(indexPart);
- } else if (baseTbl.isPartitioned()) {
- // if no partition is specified, create indexes for all partitions one
- // by one.
- baseTblPartitions = db.getPartitions(baseTbl);
- for (Partition basePart : baseTblPartitions) {
- HashMap<String, String> pSpec = basePart.getSpec();
- Partition indexPart = db.getPartition(indexTbl, pSpec, false);
- if (indexPart == null) {
- indexPart = db.createPartition(indexTbl, pSpec);
- }
- indexTblPartitions.add(indexPart);
- }
- }
- return baseTblPartitions;
- }
-
private void validateAlterTableType(Table tbl, AlterTableTypes op) throws SemanticException {
validateAlterTableType(tbl, op, false);
}
@@ -2190,17 +1928,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
List<String> bucketCols = null;
Class<? extends InputFormat> inputFormatClass = null;
boolean isArchived = false;
- boolean checkIndex = HiveConf.getBoolVar(conf,
- HiveConf.ConfVars.HIVE_CONCATENATE_CHECK_INDEX);
- if (checkIndex) {
- List<Index> indexes = db.getIndexes(tblObj.getDbName(), tblObj.getTableName(),
- Short.MAX_VALUE);
- if (indexes != null && indexes.size() > 0) {
- throw new SemanticException("can not do merge because source table "
- + tableName + " is indexed.");
- }
- }
-
if (tblObj.isPartitioned()) {
if (partSpec == null) {
throw new SemanticException("source table " + tableName
@@ -2762,6 +2489,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
setFetchTask(createFetchTask(showCreateDbDesc.getSchema()));
}
+
private void analyzeShowCreateTable(ASTNode ast) throws SemanticException {
ShowCreateTableDesc showCreateTblDesc;
String tableName = getUnescapedName((ASTNode)ast.getChild(0));
@@ -2896,21 +2624,6 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
setFetchTask(createFetchTask(showTblPropertiesDesc.getSchema()));
}
- private void analyzeShowIndexes(ASTNode ast) throws SemanticException {
- ShowIndexesDesc showIndexesDesc;
- String tableName = getUnescapedName((ASTNode) ast.getChild(0));
- showIndexesDesc = new ShowIndexesDesc(tableName, ctx.getResFile());
-
- if (ast.getChildCount() == 2) {
- int descOptions = ast.getChild(1).getType();
- showIndexesDesc.setFormatted(descOptions == HiveParser.KW_FORMATTED);
- }
-
- rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
- showIndexesDesc), conf));
- setFetchTask(createFetchTask(showIndexesDesc.getSchema()));
- }
-
/**
* Add the task according to the parsed command tree. This is used for the CLI
* command "SHOW FUNCTIONS;".
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 9073623..3e84fd6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -151,10 +151,6 @@ TOK_COLTYPELIST;
TOK_CREATEDATABASE;
TOK_CREATETABLE;
TOK_TRUNCATETABLE;
-TOK_CREATEINDEX;
-TOK_CREATEINDEX_INDEXTBLNAME;
-TOK_DEFERRED_REBUILDINDEX;
-TOK_DROPINDEX;
TOK_LIKETABLE;
TOK_DESCTABLE;
TOK_DESCFUNCTION;
@@ -189,8 +185,6 @@ TOK_ALTERTABLE_CLUSTER_SORT;
TOK_ALTERTABLE_COMPACT;
TOK_ALTERTABLE_DROPCONSTRAINT;
TOK_ALTERTABLE_ADDCONSTRAINT;
-TOK_ALTERINDEX_REBUILD;
-TOK_ALTERINDEX_PROPERTIES;
TOK_MSCK;
TOK_SHOWDATABASES;
TOK_SHOWTABLES;
@@ -271,8 +265,6 @@ TOK_EXPLAIN_SQ_REWRITE;
TOK_TABLESERIALIZER;
TOK_TABLEPROPERTIES;
TOK_TABLEPROPLIST;
-TOK_INDEXPROPERTIES;
-TOK_INDEXPROPLIST;
TOK_TABTYPE;
TOK_LIMIT;
TOK_OFFSET;
@@ -310,7 +302,6 @@ TOK_PRIV_ALTER_METADATA;
TOK_PRIV_ALTER_DATA;
TOK_PRIV_DELETE;
TOK_PRIV_DROP;
-TOK_PRIV_INDEX;
TOK_PRIV_INSERT;
TOK_PRIV_LOCK;
TOK_PRIV_SELECT;
@@ -324,9 +315,7 @@ TOK_SHOW_ROLE_GRANT;
TOK_SHOW_ROLES;
TOK_SHOW_SET_ROLE;
TOK_SHOW_ROLE_PRINCIPALS;
-TOK_SHOWINDEXES;
TOK_SHOWDBLOCKS;
-TOK_INDEXCOMMENT;
TOK_DESCDATABASE;
TOK_DATABASEPROPERTIES;
TOK_DATABASELOCATION;
@@ -929,8 +918,6 @@ ddlStatement
| dropMaterializedViewStatement
| createFunctionStatement
| createMacroStatement
- | createIndexStatement
- | dropIndexStatement
| dropFunctionStatement
| reloadFunctionStatement
| dropMacroStatement
@@ -1102,80 +1089,6 @@ truncateTableStatement
@after { popMsg(state); }
: KW_TRUNCATE KW_TABLE tablePartitionPrefix (KW_COLUMNS LPAREN columnNameList RPAREN)? -> ^(TOK_TRUNCATETABLE tablePartitionPrefix columnNameList?);
-createIndexStatement
-@init { pushMsg("create index statement", state);}
-@after {popMsg(state);}
- : KW_CREATE KW_INDEX indexName=identifier
- KW_ON KW_TABLE tab=tableName LPAREN indexedCols=columnNameList RPAREN
- KW_AS typeName=StringLiteral
- autoRebuild?
- indexPropertiesPrefixed?
- indexTblName?
- tableRowFormat?
- tableFileFormat?
- tableLocation?
- tablePropertiesPrefixed?
- indexComment?
- ->^(TOK_CREATEINDEX $indexName $typeName $tab $indexedCols
- autoRebuild?
- indexPropertiesPrefixed?
- indexTblName?
- tableRowFormat?
- tableFileFormat?
- tableLocation?
- tablePropertiesPrefixed?
- indexComment?)
- ;
-
-indexComment
-@init { pushMsg("comment on an index", state);}
-@after {popMsg(state);}
- :
- KW_COMMENT comment=StringLiteral -> ^(TOK_INDEXCOMMENT $comment)
- ;
-
-autoRebuild
-@init { pushMsg("auto rebuild index", state);}
-@after {popMsg(state);}
- : KW_WITH KW_DEFERRED KW_REBUILD
- ->^(TOK_DEFERRED_REBUILDINDEX)
- ;
-
-indexTblName
-@init { pushMsg("index table name", state);}
-@after {popMsg(state);}
- : KW_IN KW_TABLE indexTbl=tableName
- ->^(TOK_CREATEINDEX_INDEXTBLNAME $indexTbl)
- ;
-
-indexPropertiesPrefixed
-@init { pushMsg("table properties with prefix", state); }
-@after { popMsg(state); }
- :
- KW_IDXPROPERTIES! indexProperties
- ;
-
-indexProperties
-@init { pushMsg("index properties", state); }
-@after { popMsg(state); }
- :
- LPAREN indexPropertiesList RPAREN -> ^(TOK_INDEXPROPERTIES indexPropertiesList)
- ;
-
-indexPropertiesList
-@init { pushMsg("index properties list", state); }
-@after { popMsg(state); }
- :
- keyValueProperty (COMMA keyValueProperty)* -> ^(TOK_INDEXPROPLIST keyValueProperty+)
- ;
-
-dropIndexStatement
-@init { pushMsg("drop index statement", state);}
-@after {popMsg(state);}
- : KW_DROP KW_INDEX ifExists? indexName=identifier KW_ON tab=tableName
- ->^(TOK_DROPINDEX $indexName $tab ifExists?)
- ;
-
dropTableStatement
@init { pushMsg("drop statement", state); }
@after { popMsg(state); }
@@ -1190,7 +1103,6 @@ alterStatement
| KW_ALTER KW_VIEW tableName KW_AS? alterViewStatementSuffix -> ^(TOK_ALTERVIEW tableName alterViewStatementSuffix)
| KW_ALTER KW_MATERIALIZED KW_VIEW tableName alterMaterializedViewStatementSuffix
-> ^(TOK_ALTER_MATERIALIZED_VIEW tableName alterMaterializedViewStatementSuffix)
- | KW_ALTER KW_INDEX alterIndexStatementSuffix -> alterIndexStatementSuffix
| KW_ALTER (KW_DATABASE|KW_SCHEMA) alterDatabaseStatementSuffix -> alterDatabaseStatementSuffix
;
@@ -1254,20 +1166,6 @@ alterMaterializedViewStatementSuffix
| alterMaterializedViewSuffixRebuild
;
-alterIndexStatementSuffix
-@init { pushMsg("alter index statement", state); }
-@after { popMsg(state); }
- : indexName=identifier KW_ON tableName partitionSpec?
- (
- KW_REBUILD
- ->^(TOK_ALTERINDEX_REBUILD tableName $indexName partitionSpec?)
- |
- KW_SET KW_IDXPROPERTIES
- indexProperties
- ->^(TOK_ALTERINDEX_PROPERTIES tableName $indexName indexProperties)
- )
- ;
-
alterDatabaseStatementSuffix
@init { pushMsg("alter database statement", state); }
@after { popMsg(state); }
@@ -1652,8 +1550,6 @@ showStatement
|
(parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?)
)
- | KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)?
- -> ^(TOK_SHOWINDEXES showStmtIdentifier $showOptions? $db_name?)
| KW_SHOW KW_COMPACTIONS -> ^(TOK_SHOW_COMPACTIONS)
| KW_SHOW KW_TRANSACTIONS -> ^(TOK_SHOW_TRANSACTIONS)
| KW_SHOW KW_CONF StringLiteral -> ^(TOK_SHOWCONF StringLiteral)
@@ -1839,7 +1735,6 @@ privilegeType
| KW_UPDATE -> ^(TOK_PRIV_ALTER_DATA)
| KW_CREATE -> ^(TOK_PRIV_CREATE)
| KW_DROP -> ^(TOK_PRIV_DROP)
- | KW_INDEX -> ^(TOK_PRIV_INDEX)
| KW_LOCK -> ^(TOK_PRIV_LOCK)
| KW_SELECT -> ^(TOK_PRIV_SELECT)
| KW_SHOW_DATABASE -> ^(TOK_PRIV_SHOW_DATABASE)
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
deleted file mode 100644
index 22b6697..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.parse;
-
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
-import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.ql.session.LineageState;
-
-import java.io.Serializable;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-public class IndexUpdater {
- private List<LoadTableDesc> loadTableWork;
- private HiveConf conf;
- // Assumes one instance of this + single-threaded compilation for each query.
- private Hive hive;
- private List<Task<? extends Serializable>> tasks;
- private Set<ReadEntity> inputs;
- private LineageState lineageState;
-
- public IndexUpdater(List<LoadTableDesc> loadTableWork, Set<ReadEntity> inputs, Configuration conf,
- LineageState lineageState) {
- this.loadTableWork = loadTableWork;
- this.inputs = inputs;
- this.conf = new HiveConf(conf, IndexUpdater.class);
- this.lineageState = lineageState;
- this.tasks = new LinkedList<Task<? extends Serializable>>();
- }
-
- public IndexUpdater(LoadTableDesc loadTableWork, Set<ReadEntity> inputs,
- Configuration conf) {
- this.loadTableWork = new LinkedList<LoadTableDesc>();
- this.loadTableWork.add(loadTableWork);
- this.conf = new HiveConf(conf, IndexUpdater.class);
- this.tasks = new LinkedList<Task<? extends Serializable>>();
- this.inputs = inputs;
- }
-
- public List<Task<? extends Serializable>> generateUpdateTasks() throws
- HiveException {
- hive = Hive.get(this.conf);
- for (LoadTableDesc ltd : loadTableWork) {
- TableDesc td = ltd.getTable();
- Table srcTable = hive.getTable(td.getTableName());
- List<Index> tblIndexes = IndexUtils.getAllIndexes(srcTable, (short)-1);
- Map<String, String> partSpec = ltd.getPartitionSpec();
- if (partSpec == null || partSpec.size() == 0) {
- //unpartitioned table, update whole index
- doIndexUpdate(tblIndexes);
- } else {
- doIndexUpdate(tblIndexes, partSpec);
- }
- }
- return tasks;
- }
-
- private void doIndexUpdate(List<Index> tblIndexes) throws HiveException {
- for (Index idx : tblIndexes) {
- StringBuilder sb = new StringBuilder();
- sb.append("ALTER INDEX ");
- sb.append(idx.getIndexName());
- sb.append(" ON ");
- sb.append(idx.getDbName()).append('.');
- sb.append(idx.getOrigTableName());
- sb.append(" REBUILD");
- compileRebuild(sb.toString());
- }
- }
-
- private void doIndexUpdate(List<Index> tblIndexes, Map<String, String>
- partSpec) throws HiveException {
- for (Index index : tblIndexes) {
- if (containsPartition(index, partSpec)) {
- doIndexUpdate(index, partSpec);
- }
- }
- }
-
- private void doIndexUpdate(Index index, Map<String, String> partSpec) {
- StringBuilder ps = new StringBuilder();
- boolean first = true;
- ps.append("(");
- for (String key : partSpec.keySet()) {
- if (!first) {
- ps.append(", ");
- } else {
- first = false;
- }
- ps.append(key);
- ps.append("=");
- ps.append(partSpec.get(key));
- }
- ps.append(")");
- StringBuilder sb = new StringBuilder();
- sb.append("ALTER INDEX ");
- sb.append(index.getIndexName());
- sb.append(" ON ");
- sb.append(index.getDbName()).append('.');
- sb.append(index.getOrigTableName());
- sb.append(" PARTITION ");
- sb.append(ps.toString());
- sb.append(" REBUILD");
- compileRebuild(sb.toString());
- }
-
- private void compileRebuild(String query) {
- Driver driver = new Driver(this.conf, lineageState);
- driver.compile(query, false);
- tasks.addAll(driver.getPlan().getRootTasks());
- inputs.addAll(driver.getPlan().getInputs());
- }
-
-
- private boolean containsPartition(Index index,
- Map<String, String> partSpec) throws HiveException {
- String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName());
- Table indexTable = hive.getTable(qualified[0], qualified[1]);
- List<Partition> parts = hive.getPartitions(indexTable, partSpec);
- return (parts == null || parts.size() == 0);
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index 3619763..cc66936 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -360,25 +360,7 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
statTask = TaskFactory.get(columnStatsWork, conf);
}
- // HIVE-3334 has been filed for load file with index auto update
- if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) {
- IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, getInputs(), conf);
- try {
- List<Task<? extends Serializable>> indexUpdateTasks = indexUpdater.generateUpdateTasks();
-
- for (Task<? extends Serializable> updateTask : indexUpdateTasks) {
- //LOAD DATA will either have a copy & move or just a move,
- // we always want the update to be dependent on the move
- childTask.addDependentTask(updateTask);
- if (statTask != null) {
- updateTask.addDependentTask(statTask);
- }
- }
- } catch (HiveException e) {
- console.printInfo("WARNING: could not auto-update stale indexes, indexes are not out of sync");
- }
- }
- else if (statTask != null) {
+ if (statTask != null) {
childTask.addDependentTask(statTask);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
index 2e1f50e..34963ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
@@ -74,7 +74,6 @@ public final class SemanticAnalyzerFactory {
commandType.put(HiveParser.TOK_SHOW_CREATEDATABASE, HiveOperation.SHOW_CREATEDATABASE);
commandType.put(HiveParser.TOK_SHOW_CREATETABLE, HiveOperation.SHOW_CREATETABLE);
commandType.put(HiveParser.TOK_SHOWFUNCTIONS, HiveOperation.SHOWFUNCTIONS);
- commandType.put(HiveParser.TOK_SHOWINDEXES, HiveOperation.SHOWINDEXES);
commandType.put(HiveParser.TOK_SHOWPARTITIONS, HiveOperation.SHOWPARTITIONS);
commandType.put(HiveParser.TOK_SHOWLOCKS, HiveOperation.SHOWLOCKS);
commandType.put(HiveParser.TOK_SHOWDBLOCKS, HiveOperation.SHOWLOCKS);
@@ -90,10 +89,6 @@ public final class SemanticAnalyzerFactory {
commandType.put(HiveParser.TOK_CREATE_MATERIALIZED_VIEW, HiveOperation.CREATE_MATERIALIZED_VIEW);
commandType.put(HiveParser.TOK_DROPVIEW, HiveOperation.DROPVIEW);
commandType.put(HiveParser.TOK_DROP_MATERIALIZED_VIEW, HiveOperation.DROP_MATERIALIZED_VIEW);
- commandType.put(HiveParser.TOK_CREATEINDEX, HiveOperation.CREATEINDEX);
- commandType.put(HiveParser.TOK_DROPINDEX, HiveOperation.DROPINDEX);
- commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, HiveOperation.ALTERINDEX_REBUILD);
- commandType.put(HiveParser.TOK_ALTERINDEX_PROPERTIES, HiveOperation.ALTERINDEX_PROPS);
commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES);
commandType.put(HiveParser.TOK_ALTERVIEW_DROPPROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES);
commandType.put(HiveParser.TOK_ALTERVIEW_ADDPARTS, HiveOperation.ALTERTABLE_ADDPARTS);
@@ -299,8 +294,6 @@ public final class SemanticAnalyzerFactory {
case HiveParser.TOK_DESCTABLE:
case HiveParser.TOK_DESCFUNCTION:
case HiveParser.TOK_MSCK:
- case HiveParser.TOK_ALTERINDEX_REBUILD:
- case HiveParser.TOK_ALTERINDEX_PROPERTIES:
case HiveParser.TOK_SHOWDATABASES:
case HiveParser.TOK_SHOWTABLES:
case HiveParser.TOK_SHOWCOLUMNS:
@@ -310,7 +303,6 @@ public final class SemanticAnalyzerFactory {
case HiveParser.TOK_SHOW_CREATETABLE:
case HiveParser.TOK_SHOWFUNCTIONS:
case HiveParser.TOK_SHOWPARTITIONS:
- case HiveParser.TOK_SHOWINDEXES:
case HiveParser.TOK_SHOWLOCKS:
case HiveParser.TOK_SHOWDBLOCKS:
case HiveParser.TOK_SHOW_COMPACTIONS:
@@ -319,8 +311,6 @@ public final class SemanticAnalyzerFactory {
case HiveParser.TOK_SHOWCONF:
case HiveParser.TOK_SHOWVIEWS:
case HiveParser.TOK_SHOWMATERIALIZEDVIEWS:
- case HiveParser.TOK_CREATEINDEX:
- case HiveParser.TOK_DROPINDEX:
case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT:
case HiveParser.TOK_LOCKTABLE:
case HiveParser.TOK_UNLOCKTABLE:
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 92d29e3..3122db8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -220,21 +220,6 @@ public abstract class TaskCompiler {
.get(new MoveWork(null, null, ltd, null, false),
conf);
mvTask.add(tsk);
- // Check to see if we are stale'ing any indexes and auto-update them if we want
- if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) {
- IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, inputs, conf,
- queryState.getLineageState());
- try {
- List<Task<? extends Serializable>> indexUpdateTasks = indexUpdater
- .generateUpdateTasks();
- for (Task<? extends Serializable> updateTask : indexUpdateTasks) {
- tsk.addDependentTask(updateTask);
- }
- } catch (HiveException e) {
- console
- .printInfo("WARNING: could not auto-update stale indexes, which are not in sync");
- }
- }
}
boolean oneLoadFileForCtas = true;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java
deleted file mode 100644
index a335495..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import java.io.Serializable;
-import java.util.Map;
-
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
-
-/**
- * AlterIndexDesc.
- *
- */
-@Explain(displayName = "Alter Index", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class AlterIndexDesc extends DDLDesc implements Serializable {
- private static final long serialVersionUID = 1L;
- private String indexName;
- private String baseTable;
- private Map<String, String> partSpec; // partition specification of partitions touched
- private Map<String, String> props;
-
- /**
- * alterIndexTypes.
- *
- */
- public static enum AlterIndexTypes {
- UPDATETIMESTAMP,
- ADDPROPS};
-
- AlterIndexTypes op;
-
- public AlterIndexDesc() {
- }
-
- public AlterIndexDesc(AlterIndexTypes type) {
- this.op = type;
- }
-
- /**
- * @return the name of the index
- */
- @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getIndexName() {
- return indexName;
- }
-
- /**
- * @param indexName
- * the indexName to set
- */
- public void setIndexName(String indexName) {
- this.indexName = indexName;
- }
-
- /**
- * @return the baseTable
- */
- @Explain(displayName = "new name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
- public String getBaseTableName() {
- return baseTable;
- }
-
- /**
- * @param baseTable
- * the baseTable to set
- */
- public void setBaseTableName(String baseTable) {
- this.baseTable = baseTable;
- }
-
- /**
- * @return the partition spec
- */
- public Map<String, String> getSpec() {
- return partSpec;
- }
-
- /**
- * @param partSpec
- * the partition spec to set
- */
- public void setSpec(Map<String, String> partSpec) {
- this.partSpec = partSpec;
- }
-
- /**
- * @return the op
- */
- public AlterIndexTypes getOp() {
- return op;
- }
-
- /**
- * @param op
- * the op to set
- */
- public void setOp(AlterIndexTypes op) {
- this.op = op;
- }
-
- /**
- * @return the props
- */
- @Explain(displayName = "properties")
- public Map<String, String> getProps() {
- return props;
- }
-
- /**
- * @param props
- * the props to set
- */
- public void setProps(Map<String, String> props) {
- this.props = props;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateIndexDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateIndexDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateIndexDesc.java
deleted file mode 100644
index c003ee5..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateIndexDesc.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import java.io.Serializable;
-import java.util.List;
-import java.util.Map;
-
-/**
- * create index descriptor
- */
-public class CreateIndexDesc extends DDLDesc implements Serializable {
-
- private static final long serialVersionUID = 1L;
- String tableName;
- String indexName;
- List<String> indexedCols;
- String indexTableName;
- boolean deferredRebuild;
- String inputFormat;
- String outputFormat;
- String serde;
- String storageHandler;
- String indexTypeHandlerClass;
- String location;
- Map<String, String> idxProps;
- Map<String, String> tblProps;
- Map<String, String> serdeProps;
- String collItemDelim;
- String fieldDelim;
- String fieldEscape;
- String lineDelim;
- String mapKeyDelim;
-
- String indexComment;
-
- public CreateIndexDesc() {
- super();
- }
-
- public CreateIndexDesc(String tableName, String indexName,
- List<String> indexedCols, String indexTableName, boolean deferredRebuild,
- String inputFormat, String outputFormat, String storageHandler,
- String typeName, String location, Map<String, String> idxProps, Map<String, String> tblProps,
- String serde, Map<String, String> serdeProps, String collItemDelim,
- String fieldDelim, String fieldEscape, String lineDelim,
- String mapKeyDelim, String indexComment) {
- super();
- this.tableName = tableName;
- this.indexName = indexName;
- this.indexedCols = indexedCols;
- this.indexTableName = indexTableName;
- this.deferredRebuild = deferredRebuild;
- this.inputFormat = inputFormat;
- this.outputFormat = outputFormat;
- this.serde = serde;
- this.storageHandler = storageHandler;
- this.indexTypeHandlerClass = typeName;
- this.location = location;
- this.idxProps = idxProps;
- this.tblProps = tblProps;
- this.serde = serde;
- this.serdeProps = serdeProps;
- this.collItemDelim = collItemDelim;
- this.fieldDelim = fieldDelim;
- this.fieldEscape = fieldEscape;
- this.lineDelim = lineDelim;
- this.mapKeyDelim = mapKeyDelim;
- this.indexComment = indexComment;
- }
-
- public String getTableName() {
- return tableName;
- }
-
- public void setTableName(String tableName) {
- this.tableName = tableName;
- }
-
- public String getIndexName() {
- return indexName;
- }
-
- public void setIndexName(String indexName) {
- this.indexName = indexName;
- }
-
- public List<String> getIndexedCols() {
- return indexedCols;
- }
-
- public void setIndexedCols(List<String> indexedCols) {
- this.indexedCols = indexedCols;
- }
-
- public String getIndexTableName() {
- return indexTableName;
- }
-
- public void setIndexTableName(String indexTableName) {
- this.indexTableName = indexTableName;
- }
-
- public boolean isDeferredRebuild() {
- return deferredRebuild;
- }
-
- public boolean getDeferredRebuild() {
- return deferredRebuild;
- }
-
- public void setDeferredRebuild(boolean deferredRebuild) {
- this.deferredRebuild = deferredRebuild;
- }
-
- public String getInputFormat() {
- return inputFormat;
- }
-
- public void setInputFormat(String inputFormat) {
- this.inputFormat = inputFormat;
- }
-
- public String getOutputFormat() {
- return outputFormat;
- }
-
- public void setOutputFormat(String outputFormat) {
- this.outputFormat = outputFormat;
- }
-
- public String getSerde() {
- return serde;
- }
-
- public void setSerde(String serde) {
- this.serde = serde;
- }
-
- public String getStorageHandler() {
- return storageHandler;
- }
-
- public void setStorageHandler(String storageHandler) {
- this.storageHandler = storageHandler;
- }
-
- public String getLocation() {
- return location;
- }
-
- public void setLocation(String location) {
- this.location = location;
- }
-
- public Map<String, String> getIdxProps() {
- return idxProps;
- }
-
- public void setIdxProps(Map<String, String> idxProps) {
- this.idxProps = idxProps;
- }
-
- public Map<String, String> getTblProps() {
- return tblProps;
- }
-
- public void setTblProps(Map<String, String> tblProps) {
- this.tblProps = tblProps;
- }
-
- public Map<String, String> getSerdeProps() {
- return serdeProps;
- }
-
- public void setSerdeProps(Map<String, String> serdeProps) {
- this.serdeProps = serdeProps;
- }
-
- public String getCollItemDelim() {
- return collItemDelim;
- }
-
- public void setCollItemDelim(String collItemDelim) {
- this.collItemDelim = collItemDelim;
- }
-
- public String getFieldDelim() {
- return fieldDelim;
- }
-
- public void setFieldDelim(String fieldDelim) {
- this.fieldDelim = fieldDelim;
- }
-
- public String getFieldEscape() {
- return fieldEscape;
- }
-
- public void setFieldEscape(String fieldEscape) {
- this.fieldEscape = fieldEscape;
- }
-
- public String getLineDelim() {
- return lineDelim;
- }
-
- public void setLineDelim(String lineDelim) {
- this.lineDelim = lineDelim;
- }
-
- public String getMapKeyDelim() {
- return mapKeyDelim;
- }
-
- public void setMapKeyDelim(String mapKeyDelim) {
- this.mapKeyDelim = mapKeyDelim;
- }
-
- public String getIndexTypeHandlerClass() {
- return indexTypeHandlerClass;
- }
-
- public void setIndexTypeHandlerClass(String indexTypeHandlerClass) {
- this.indexTypeHandlerClass = indexTypeHandlerClass;
- }
-
- public String getIndexComment() {
- return indexComment;
- }
-
- public void setIndexComment(String indexComment) {
- this.indexComment = indexComment;
- }
-
-}
[14/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java
deleted file mode 100644
index b5bddec..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexedInputFormat.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Iterator;
-import java.util.Set;
-import java.util.Arrays;
-import java.util.Collection;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat;
-import org.apache.hadoop.hive.ql.io.IOPrepareCache;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.PartitionDesc;
-import org.apache.hadoop.mapred.FileInputFormat;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-
-/**
- * Input format for doing queries that use indexes.
- * Uses a blockfilter file to specify the blocks to query.
- */
-public class HiveIndexedInputFormat extends HiveInputFormat {
- public static final Logger l4j = LoggerFactory.getLogger("HiveIndexInputFormat");
- private final String indexFile;
-
- public HiveIndexedInputFormat() {
- super();
- indexFile = "hive.index.blockfilter.file";
- }
-
- public HiveIndexedInputFormat(String indexFileName) {
- indexFile = indexFileName;
- }
-
- public InputSplit[] doGetSplits(JobConf job, int numSplits) throws IOException {
-
- super.init(job);
-
- Path[] dirs = FileInputFormat.getInputPaths(job);
- if (dirs.length == 0) {
- throw new IOException("No input paths specified in job");
- }
- JobConf newjob = new JobConf(job);
- ArrayList<InputSplit> result = new ArrayList<InputSplit>();
-
- // for each dir, get the InputFormat, and do getSplits.
- PartitionDesc part;
- for (Path dir : dirs) {
- part = HiveFileFormatUtils
- .getFromPathRecursively(pathToPartitionInfo, dir,
- IOPrepareCache.get().allocatePartitionDescMap(), true);
- // create a new InputFormat instance if this is the first time to see this
- // class
- Class inputFormatClass = part.getInputFileFormatClass();
- InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job);
-
- try {
- Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), newjob);
- } catch (HiveException e) {
- throw new IOException(e);
- }
-
- FileInputFormat.setInputPaths(newjob, dir);
- newjob.setInputFormat(inputFormat.getClass());
- InputSplit[] iss = inputFormat.getSplits(newjob, numSplits / dirs.length);
- for (InputSplit is : iss) {
- result.add(new HiveInputSplit(is, inputFormatClass.getName()));
- }
- }
- return result.toArray(new HiveInputSplit[result.size()]);
- }
-
- public static List<String> getIndexFiles(String indexFileStr) {
- // tokenize and store string of form (path,)+
- if (indexFileStr == null) {
- return null;
- }
- String[] chunks = indexFileStr.split(",");
- return Arrays.asList(chunks);
- }
-
- @Override
- public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
- String indexFileStr = job.get(indexFile);
- l4j.info("index_file is " + indexFileStr);
- List<String> indexFiles = getIndexFiles(indexFileStr);
-
- HiveIndexResult hiveIndexResult = null;
- if (indexFiles != null) {
- boolean first = true;
- StringBuilder newInputPaths = new StringBuilder();
- try {
- hiveIndexResult = new HiveIndexResult(indexFiles, job);
- } catch (HiveException e) {
- l4j.error("Unable to read index..");
- throw new IOException(e);
- }
-
- Set<String> inputFiles = hiveIndexResult.buckets.keySet();
- if (inputFiles == null || inputFiles.size() <= 0) {
- // return empty splits if index results were empty
- return new InputSplit[0];
- }
- Iterator<String> iter = inputFiles.iterator();
- while(iter.hasNext()) {
- String path = iter.next();
- if (path.trim().equalsIgnoreCase("")) {
- continue;
- }
- if (!first) {
- newInputPaths.append(",");
- } else {
- first = false;
- }
- newInputPaths.append(path);
- }
- FileInputFormat.setInputPaths(job, newInputPaths.toString());
- } else {
- return super.getSplits(job, numSplits);
- }
-
- HiveInputSplit[] splits = (HiveInputSplit[]) this.doGetSplits(job, numSplits);
-
- long maxInputSize = HiveConf.getLongVar(job, ConfVars.HIVE_INDEX_COMPACT_QUERY_MAX_SIZE);
- if (maxInputSize < 0) {
- maxInputSize=Long.MAX_VALUE;
- }
-
- SplitFilter filter = new SplitFilter(hiveIndexResult, maxInputSize);
- Collection<HiveInputSplit> newSplits = filter.filter(splits);
-
- return newSplits.toArray(new FileSplit[newSplits.size()]);
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java
deleted file mode 100644
index 9e714e4..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.ql.DriverContext;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.plan.api.StageType;
-
-public class IndexMetadataChangeTask extends Task<IndexMetadataChangeWork>{
-
- private static final long serialVersionUID = 1L;
-
- @Override
- protected int execute(DriverContext driverContext) {
-
- try {
- Hive db = Hive.get(conf);
- IndexMetadataChangeWork work = this.getWork();
- String tblName = work.getIndexTbl();
- Table tbl = db.getTable(work.getDbName(), tblName);
- if (tbl == null ) {
- console.printError("Index table can not be null.");
- return 1;
- }
-
- if (!tbl.getTableType().equals(TableType.INDEX_TABLE)) {
- console.printError("Table " + tbl.getTableName() + " not specified.");
- return 1;
- }
-
- if (tbl.isPartitioned() && work.getPartSpec() == null) {
- console.printError("Index table is partitioned, but no partition specified.");
- return 1;
- }
-
- if (work.getPartSpec() != null) {
- Partition part = db.getPartition(tbl, work.getPartSpec(), false);
- if (part == null) {
- console.printError("Partition " +
- Warehouse.makePartName(work.getPartSpec(), false).toString()
- + " does not exist.");
- return 1;
- }
-
- Path path = part.getDataLocation();
- FileSystem fs = path.getFileSystem(conf);
- FileStatus fstat = fs.getFileStatus(path);
-
- part.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime()));
- db.alterPartition(tbl.getTableName(), part, null);
- } else {
- Path url = new Path(tbl.getPath().toString());
- FileSystem fs = url.getFileSystem(conf);
- FileStatus fstat = fs.getFileStatus(url);
- tbl.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime()));
- db.alterTable(tbl, null);
- }
- } catch (Exception e) {
- e.printStackTrace();
- console.printError("Error changing index table/partition metadata "
- + e.getMessage());
- return 1;
- }
- return 0;
- }
-
- @Override
- public String getName() {
- return IndexMetadataChangeTask.class.getSimpleName();
- }
-
- @Override
- public StageType getType() {
- return StageType.DDL;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeWork.java b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeWork.java
deleted file mode 100644
index 6d77ea4..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeWork.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index;
-
-import java.io.Serializable;
-import java.util.HashMap;
-
-public class IndexMetadataChangeWork implements Serializable {
-
- private static final long serialVersionUID = 1L;
-
- private HashMap<String, String> partSpec;
- private String indexTbl;
- private String dbName;
-
- public IndexMetadataChangeWork() {
- }
-
- public IndexMetadataChangeWork(HashMap<String, String> partSpec,
- String indexTbl, String dbName) {
- super();
- this.partSpec = partSpec;
- this.indexTbl = indexTbl;
- this.dbName = dbName;
- }
-
- public HashMap<String, String> getPartSpec() {
- return partSpec;
- }
-
- public void setPartSpec(HashMap<String, String> partSpec) {
- this.partSpec = partSpec;
- }
-
- public String getIndexTbl() {
- return indexTbl;
- }
-
- public void setIndexTbl(String indexTbl) {
- this.indexTbl = indexTbl;
- }
-
- public String getDbName() {
- return dbName;
- }
-
- public void setDbName(String dbName) {
- this.dbName = dbName;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
index 7476036..6a3f3b4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
@@ -61,7 +61,10 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare;
* Currently, it only supports pure conjunctions over binary expressions
* comparing a column reference with a constant value. It is assumed
* that all column aliases encountered refer to the same table.
+ *
+ * @deprecated kept only because some storagehandlers are using it internally
*/
+@Deprecated
public class IndexPredicateAnalyzer {
private final Set<String> udfNames;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/IndexResult.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexResult.java b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexResult.java
deleted file mode 100644
index e8f2daf..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexResult.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.mapred.FileSplit;
-
-public interface IndexResult {
- boolean contains(FileSplit split) throws HiveException;
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java
index 15cb1f7..3985246 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java
@@ -25,7 +25,9 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
* IndexSearchCondition represents an individual search condition
* found by {@link IndexPredicateAnalyzer}.
*
+ * @deprecated kept only because some storagehandlers are using it internally
*/
+@Deprecated
public class IndexSearchCondition
{
private ExprNodeColumnDesc columnDesc;
@@ -56,7 +58,7 @@ public class IndexSearchCondition
* @param constantDesc constant value to search for
*
* @param indexExpr the comparison expression for the index
- *
+ *
* @param originalExpr the original comparison expression
*/
public IndexSearchCondition(
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/SplitFilter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/SplitFilter.java b/ql/src/java/org/apache/hadoop/hive/ql/index/SplitFilter.java
deleted file mode 100644
index c51dec6..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/SplitFilter.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Iterables;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.List;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplit;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.mapred.FileSplit;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class SplitFilter {
- public static final Logger LOG = LoggerFactory.getLogger(SplitFilter.class);
-
- private final IndexResult indexResult;
- private final long maxInputSize;
-
- public SplitFilter(IndexResult indexResult, long maxInputSize) {
- this.indexResult = indexResult;
- this.maxInputSize = maxInputSize;
- }
-
- public List<HiveInputSplit> filter(HiveInputSplit[] splits) throws IOException {
- long sumSplitLengths = 0;
- List<HiveInputSplit> newSplits = new ArrayList<>();
-
- Arrays.sort(splits, new HiveInputSplitComparator());
-
- for (HiveInputSplit split : splits) {
- LOG.info("split start : " + split.getStart());
- LOG.info("split end : " + (split.getStart() + split.getLength()));
-
- try {
- if (indexResult.contains(split)) {
- HiveInputSplit newSplit = split;
- if (isAdjustmentRequired(newSplits, split)) {
- newSplit = adjustSplit(split);
- }
- sumSplitLengths += newSplit.getLength();
- if (sumSplitLengths > maxInputSize) {
- String messageTemplate = "Size of data to read during a compact-index-based query " +
- "exceeded the maximum of %d set in %s";
- throw new IOException(String.format(messageTemplate, maxInputSize,
- HiveConf.ConfVars.HIVE_INDEX_COMPACT_QUERY_MAX_SIZE.varname));
- }
- newSplits.add(newSplit);
- }
- } catch (HiveException e) {
- throw new RuntimeException("Unable to get metadata for input table split " +
- split.getPath(), e);
- }
- }
- LOG.info("Number of input splits: {}, new input splits: {}, sum of split lengths: {}",
- splits.length, newSplits.size(), sumSplitLengths);
- return newSplits;
- }
-
- private boolean isAdjustmentRequired(List<HiveInputSplit> newSplits, HiveInputSplit split) {
- return (split.inputFormatClassName().contains("RCFile") ||
- split.inputFormatClassName().contains("SequenceFile")) && split.getStart() > 0 &&
- !doesOverlap(newSplits, split.getPath(), adjustStart(split.getStart()));
- }
-
- private boolean doesOverlap(List<HiveInputSplit> newSplits, Path path, long start) {
- if (newSplits.isEmpty()) {
- return false;
- }
- HiveInputSplit lastSplit = Iterables.getLast(newSplits);
- if (lastSplit.getPath().equals(path)) {
- return lastSplit.getStart() + lastSplit.getLength() > start;
- }
- return false;
- }
-
- private long adjustStart(long start) {
- return start > SequenceFile.SYNC_INTERVAL ? start - SequenceFile.SYNC_INTERVAL : 0;
- }
-
- private HiveInputSplit adjustSplit(HiveInputSplit split) throws IOException {
- long adjustedStart = adjustStart(split.getStart());
- return new HiveInputSplit(new FileSplit(split.getPath(), adjustedStart,
- split.getStart() - adjustedStart + split.getLength(), split.getLocations()),
- split.inputFormatClassName());
- }
-
- @VisibleForTesting
- static final class HiveInputSplitComparator implements Comparator<HiveInputSplit> {
- @Override
- public int compare(HiveInputSplit o1, HiveInputSplit o2) {
- int pathCompare = comparePath(o1.getPath(), o2.getPath());
- if (pathCompare != 0) {
- return pathCompare;
- }
- return Long.compare(o1.getStart(), o2.getStart());
- }
-
- private int comparePath(Path p1, Path p2) {
- return p1.compareTo(p2);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
deleted file mode 100644
index d861522..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Set;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.PartitionDesc;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.ql.session.LineageState;
-
-/**
- * Index handler for indexes that use tables to store indexes.
- */
-public abstract class TableBasedIndexHandler extends AbstractIndexHandler {
- protected Configuration configuration;
-
- @Override
- public List<Task<?>> generateIndexBuildTaskList(
- org.apache.hadoop.hive.ql.metadata.Table baseTbl,
- org.apache.hadoop.hive.metastore.api.Index index,
- List<Partition> indexTblPartitions, List<Partition> baseTblPartitions,
- org.apache.hadoop.hive.ql.metadata.Table indexTbl,
- Set<ReadEntity> inputs, Set<WriteEntity> outputs,
- LineageState lineageState) throws HiveException {
- try {
-
- TableDesc desc = Utilities.getTableDesc(indexTbl);
-
- List<Partition> newBaseTblPartitions = new ArrayList<Partition>();
-
- List<Task<?>> indexBuilderTasks = new ArrayList<Task<?>>();
-
- if (!baseTbl.isPartitioned()) {
- // the table does not have any partition, then create index for the
- // whole table
- Task<?> indexBuilder = getIndexBuilderMapRedTask(inputs, outputs, index, false,
- new PartitionDesc(desc, null), indexTbl.getTableName(),
- new PartitionDesc(Utilities.getTableDesc(baseTbl), null),
- baseTbl.getTableName(), indexTbl.getDbName(), lineageState);
- indexBuilderTasks.add(indexBuilder);
- } else {
-
- // check whether the index table partitions are still exists in base
- // table
- for (int i = 0; i < indexTblPartitions.size(); i++) {
- Partition indexPart = indexTblPartitions.get(i);
- Partition basePart = null;
- for (int j = 0; j < baseTblPartitions.size(); j++) {
- if (baseTblPartitions.get(j).getName().equals(indexPart.getName())) {
- basePart = baseTblPartitions.get(j);
- newBaseTblPartitions.add(baseTblPartitions.get(j));
- break;
- }
- }
- if (basePart == null) {
- throw new RuntimeException(
- "Partitions of base table and index table are inconsistent.");
- }
- // for each partition, spawn a map reduce task.
- Task<?> indexBuilder = getIndexBuilderMapRedTask(inputs, outputs, index, true,
- new PartitionDesc(indexPart), indexTbl.getTableName(),
- new PartitionDesc(basePart), baseTbl.getTableName(), indexTbl.getDbName(),
- lineageState);
- indexBuilderTasks.add(indexBuilder);
- }
- }
- return indexBuilderTasks;
- } catch (Exception e) {
- throw new SemanticException(e);
- }
- }
-
- protected Task<?> getIndexBuilderMapRedTask(Set<ReadEntity> inputs, Set<WriteEntity> outputs,
- Index index, boolean partitioned,
- PartitionDesc indexTblPartDesc, String indexTableName,
- PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
- LineageState lineageState) throws HiveException {
- return getIndexBuilderMapRedTask(inputs, outputs, index.getSd().getCols(),
- partitioned, indexTblPartDesc, indexTableName, baseTablePartDesc, baseTableName, dbName,
- lineageState);
- }
-
- protected Task<?> getIndexBuilderMapRedTask(Set<ReadEntity> inputs, Set<WriteEntity> outputs,
- List<FieldSchema> indexField, boolean partitioned,
- PartitionDesc indexTblPartDesc, String indexTableName,
- PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
- LineageState lineageState) throws HiveException {
- return null;
- }
-
- protected List<String> getPartKVPairStringArray(
- LinkedHashMap<String, String> partSpec) {
- List<String> ret = new ArrayList<String>(partSpec.size());
- Iterator<Entry<String, String>> iter = partSpec.entrySet().iterator();
- while (iter.hasNext()) {
- StringBuilder sb = new StringBuilder();
- Entry<String, String> p = iter.next();
- sb.append(HiveUtils.unparseIdentifier(p.getKey()));
- sb.append(" = ");
- sb.append("'");
- sb.append(HiveUtils.escapeString(p.getValue()));
- sb.append("'");
- ret.add(sb.toString());
- }
- return ret;
- }
-
- @Override
- public boolean usesIndexTable() {
- return true;
- }
-
- @Override
- public Configuration getConf() {
- return configuration;
- }
-
- @Override
- public void setConf(Configuration conf) {
- this.configuration = conf;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
deleted file mode 100644
index 62db4db..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index.bitmap;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.index.HiveIndexQueryContext;
-import org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-import org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer;
-import org.apache.hadoop.hive.ql.index.IndexSearchCondition;
-import org.apache.hadoop.hive.ql.index.TableBasedIndexHandler;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.PartitionDesc;
-import org.apache.hadoop.hive.ql.session.LineageState;
-import org.apache.hadoop.hive.ql.stats.StatsUtils;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan;
-
-/**
- * Index handler for the bitmap index. Bitmap index uses an EWAH-compressed
- * bitmap to represent the values in a table.
- */
-public class BitmapIndexHandler extends TableBasedIndexHandler {
-
- private Configuration configuration;
- private static final Logger LOG = LoggerFactory.getLogger(BitmapIndexHandler.class.getName());
-
- @Override
- public void generateIndexQuery(List<Index> indexes, ExprNodeDesc predicate,
- ParseContext pctx, HiveIndexQueryContext queryContext) {
-
- Map<Index, ExprNodeDesc> indexPredicates = decomposePredicate(
- predicate,
- indexes,
- queryContext);
-
- if (indexPredicates == null) {
- LOG.info("No decomposed predicate found");
- queryContext.setQueryTasks(null);
- return; // abort if we couldn't pull out anything from the predicate
- }
-
- List<BitmapInnerQuery> iqs = new ArrayList<BitmapInnerQuery>(indexes.size());
- int i = 0;
- for (Index index : indexes) {
- ExprNodeDesc indexPredicate = indexPredicates.get(index);
- if (indexPredicate != null) {
- iqs.add(new BitmapInnerQuery(
- index.getIndexTableName(),
- indexPredicate,
- "ind" + i++));
- }
- }
- // setup TableScanOperator to change input format for original query
- queryContext.setIndexInputFormat(HiveIndexedInputFormat.class.getName());
-
- // Build reentrant QL for index query
- StringBuilder qlCommand = new StringBuilder("INSERT OVERWRITE DIRECTORY ");
-
- String tmpFile = pctx.getContext().getMRTmpPath().toUri().toString();
- qlCommand.append( "\"" + tmpFile + "\" "); // QL includes " around file name
- qlCommand.append("SELECT bucketname AS `_bucketname` , COLLECT_SET(offset) AS `_offsets` FROM ");
- qlCommand.append("(SELECT `_bucketname` AS bucketname , `_offset` AS offset FROM ");
-
-
- BitmapQuery head = iqs.get(0);
- for ( i = 1; i < iqs.size(); i++) {
- head = new BitmapOuterQuery("oind"+i, head, iqs.get(i));
- }
- qlCommand.append(head.toString());
- qlCommand.append(" WHERE NOT EWAH_BITMAP_EMPTY(" + head.getAlias() + ".`_bitmaps`) ) tmp_index GROUP BY bucketname");
-
- // generate tasks from index query string
- LOG.info("Generating tasks for re-entrant QL query: " + qlCommand.toString());
- HiveConf queryConf = new HiveConf(pctx.getConf(), BitmapIndexHandler.class);
- HiveConf.setBoolVar(queryConf, HiveConf.ConfVars.COMPRESSRESULT, false);
- Driver driver = new Driver(queryConf, pctx.getQueryState().getLineageState());
- driver.compile(qlCommand.toString(), false);
-
- queryContext.setIndexIntermediateFile(tmpFile);
- queryContext.addAdditionalSemanticInputs(driver.getPlan().getInputs());
- queryContext.setQueryTasks(driver.getPlan().getRootTasks());
- }
-
- /**
- * Split the predicate into the piece we can deal with (pushed), and the one we can't (residual)
- * @param predicate
- * @param index
- * @return
- */
- private Map<Index, ExprNodeDesc> decomposePredicate(ExprNodeDesc predicate, List<Index> indexes,
- HiveIndexQueryContext queryContext) {
-
- Map<Index, ExprNodeDesc> indexPredicates = new HashMap<Index, ExprNodeDesc>();
- // compute overall residual
- IndexPredicateAnalyzer analyzer = getIndexPredicateAnalyzer(indexes, queryContext.getQueryPartitions());
- List<IndexSearchCondition> searchConditions = new ArrayList<IndexSearchCondition>();
- ExprNodeDesc residualPredicate = analyzer.analyzePredicate(predicate, searchConditions);
- // pass residual predicate back out for further processing
- queryContext.setResidualPredicate(residualPredicate);
-
- if (searchConditions.size() == 0) {
- return null;
- }
-
- for (Index index : indexes) {
- ArrayList<Index> in = new ArrayList<Index>(1);
- in.add(index);
- analyzer = getIndexPredicateAnalyzer(in, queryContext.getQueryPartitions());
- searchConditions = new ArrayList<IndexSearchCondition>();
- // split predicate into pushed (what we can handle), and residual (what we can't handle)
- // pushed predicate from translateSearchConditions is stored for the current index
- // This ensures that we apply all possible predicates to each index
- analyzer.analyzePredicate(predicate, searchConditions);
- if (searchConditions.size() == 0) {
- indexPredicates.put(index, null);
- } else {
- indexPredicates.put(index, analyzer.translateSearchConditions(searchConditions));
- }
- }
-
- return indexPredicates;
- }
-
- /**
- * Instantiate a new predicate analyzer suitable for determining
- * whether we can use an index, based on rules for indexes in
- * WHERE clauses that we support
- *
- * @return preconfigured predicate analyzer for WHERE queries
- */
- private IndexPredicateAnalyzer getIndexPredicateAnalyzer(List<Index> indexes, Set<Partition> queryPartitions) {
- IndexPredicateAnalyzer analyzer = new IndexPredicateAnalyzer();
-
- analyzer.addComparisonOp(GenericUDFOPEqual.class.getName());
- analyzer.addComparisonOp(GenericUDFOPLessThan.class.getName());
- analyzer.addComparisonOp(GenericUDFOPEqualOrLessThan.class.getName());
- analyzer.addComparisonOp(GenericUDFOPGreaterThan.class.getName());
- analyzer.addComparisonOp(GenericUDFOPEqualOrGreaterThan.class.getName());
-
- // only return results for columns in the list of indexes
- for (Index index : indexes) {
- List<FieldSchema> columnSchemas = index.getSd().getCols();
- for (FieldSchema column : columnSchemas) {
- analyzer.allowColumnName(column.getName());
- }
- }
-
- // partitioned columns are treated as if they have indexes so that the partitions
- // are used during the index query generation
- for (Partition part : queryPartitions) {
- if (part.getSpec().isEmpty()) {
- continue; // empty partitions are from whole tables, so we don't want to add them in
- }
- for (String column : part.getSpec().keySet()) {
- analyzer.allowColumnName(column);
- }
- }
-
- return analyzer;
- }
-
- @Override
- public void analyzeIndexDefinition(Table baseTable, Index index,
- Table indexTable) throws HiveException {
- StorageDescriptor storageDesc = index.getSd();
- if (this.usesIndexTable() && indexTable != null) {
- StorageDescriptor indexTableSd = storageDesc.deepCopy();
- List<FieldSchema> indexTblCols = indexTableSd.getCols();
- FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", "");
- indexTblCols.add(bucketFileName);
- FieldSchema offSets = new FieldSchema("_offset", "bigint", "");
- indexTblCols.add(offSets);
- FieldSchema bitmaps = new FieldSchema("_bitmaps", "array<bigint>", "");
- indexTblCols.add(bitmaps);
- indexTable.setSd(indexTableSd);
- }
- }
-
- @Override
- protected Task<?> getIndexBuilderMapRedTask(Set<ReadEntity> inputs, Set<WriteEntity> outputs,
- List<FieldSchema> indexField, boolean partitioned,
- PartitionDesc indexTblPartDesc, String indexTableName,
- PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
- LineageState lineageState) throws HiveException {
-
- HiveConf builderConf = new HiveConf(getConf(), BitmapIndexHandler.class);
- HiveConf.setBoolVar(builderConf, HiveConf.ConfVars.HIVEROWOFFSET, true);
-
- String indexCols = HiveUtils.getUnparsedColumnNamesFromFieldSchema(indexField);
-
- //form a new insert overwrite query.
- StringBuilder command= new StringBuilder();
- LinkedHashMap<String, String> partSpec = indexTblPartDesc.getPartSpec();
-
- String fullIndexTableName = StatsUtils.getFullyQualifiedTableName(HiveUtils.unparseIdentifier(dbName),
- HiveUtils.unparseIdentifier(indexTableName));
- command.append("INSERT OVERWRITE TABLE " + fullIndexTableName);
- if (partitioned && indexTblPartDesc != null) {
- command.append(" PARTITION ( ");
- List<String> ret = getPartKVPairStringArray(partSpec);
- for (int i = 0; i < ret.size(); i++) {
- String partKV = ret.get(i);
- command.append(partKV);
- if (i < ret.size() - 1) {
- command.append(",");
- }
- }
- command.append(" ) ");
- }
-
- String fullBaseTableName = StatsUtils.getFullyQualifiedTableName(HiveUtils.unparseIdentifier(dbName),
- HiveUtils.unparseIdentifier(baseTableName));
- command.append(" SELECT ");
- command.append(indexCols);
- command.append(",");
- command.append(VirtualColumn.FILENAME.getName());
- command.append(",");
- command.append(VirtualColumn.BLOCKOFFSET.getName());
- command.append(",");
- command.append("EWAH_BITMAP(");
- command.append(VirtualColumn.ROWOFFSET.getName());
- command.append(")");
- command.append(" FROM " + fullBaseTableName);
- LinkedHashMap<String, String> basePartSpec = baseTablePartDesc.getPartSpec();
- if(basePartSpec != null) {
- command.append(" WHERE ");
- List<String> pkv = getPartKVPairStringArray(basePartSpec);
- for (int i = 0; i < pkv.size(); i++) {
- String partKV = pkv.get(i);
- command.append(partKV);
- if (i < pkv.size() - 1) {
- command.append(" AND ");
- }
- }
- }
- command.append(" GROUP BY ");
- command.append(VirtualColumn.FILENAME.getName());
- command.append(",");
- command.append(VirtualColumn.BLOCKOFFSET.getName());
- for (FieldSchema fieldSchema : indexField) {
- command.append(",");
- command.append(HiveUtils.unparseIdentifier(fieldSchema.getName()));
- }
-
- // Require clusterby ROWOFFSET if map-size aggregation is off.
- // TODO: Make this work without map side aggregation
- if (!builderConf.get("hive.map.aggr", null).equals("true")) {
- throw new HiveException("Cannot construct index without map-side aggregation");
- }
-
- Task<?> rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs,
- command, partSpec, indexTableName, dbName, lineageState);
- return rootTask;
- }
-
- @Override
- /**
- * No lower bound on bitmap index query size, so this will always return true
- */
- public boolean checkQuerySize(long querySize, HiveConf hiveConf) {
- return true;
- }
-
- @Override
- public boolean usesIndexTable() {
- return true;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapInnerQuery.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapInnerQuery.java b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapInnerQuery.java
deleted file mode 100644
index c7500a5..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapInnerQuery.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index.bitmap;
-
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapQuery;
-
-/**
- * Representation of inner bitmap index SELECT query that scans bitmap index
- * tables for a pushed predicate
- */
-public class BitmapInnerQuery implements BitmapQuery {
- private String tableName;
- private ExprNodeDesc predicate;
- private String alias;
- private String queryStr;
-
- public BitmapInnerQuery(String tableName, ExprNodeDesc predicate, String alias) {
- this.tableName = tableName;
- this.predicate = predicate;
- this.alias = alias;
- constructQueryStr();
- }
-
- /**
- * Return a string representation of the query string for compilation
- */
- public String toString() {
- return queryStr;
- }
-
- /**
- * Construct a string representation of the query to be compiled
- */
- private void constructQueryStr() {
- StringBuilder sb = new StringBuilder();
- sb.append("(SELECT * FROM ");
- sb.append(HiveUtils.unparseIdentifier(tableName));
- sb.append(" WHERE ");
- sb.append(predicate.getExprString());
- sb.append(") ");
- sb.append(alias);
- queryStr = sb.toString();
- }
-
- /**
- * Return the assigned alias of the SELECT statement
- */
- public String getAlias() {
- return alias;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectInput.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectInput.java b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectInput.java
deleted file mode 100644
index 0f312a3..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectInput.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index.bitmap;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.lazy.LazyLong;
-
-/**
- * An ObjectInput that allows for conversion from an List of LongWritable
- * to an EWAH-compressed bitmap.
- */
-public class BitmapObjectInput implements ObjectInput {
- Iterator<LongWritable> bufferIter;
- List<LongWritable> buffer;
-
- public BitmapObjectInput() {
- buffer = new ArrayList<LongWritable>();
- bufferIter = buffer.iterator();
- }
-
- public BitmapObjectInput(List<LongWritable> l) {
- readFromList(l);
- }
-
- public void readFromList(List<LongWritable> l) {
- buffer = l;
- bufferIter = buffer.iterator();
- }
-
- @Override
- public int available() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void close() throws IOException {
- throw new UnsupportedOperationException();
-
- }
-
- @Override
- public int read() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int read(byte[] arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int read(byte[] arg0, int arg1, int arg2) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Object readObject() throws ClassNotFoundException, IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public long skip(long arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public boolean readBoolean() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public byte readByte() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public char readChar() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public double readDouble() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public float readFloat() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void readFully(byte[] arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void readFully(byte[] arg0, int arg1, int arg2) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int readInt() throws IOException {
- if (bufferIter.hasNext()) {
- LongObjectInspector loi = PrimitiveObjectInspectorFactory.writableLongObjectInspector;
- Long l = PrimitiveObjectInspectorUtils.getLong(bufferIter.next(), loi);
- return l.intValue();
- //return bufferIter.next().intValue();
- }
- else {
- throw new IOException();
- }
- }
-
- @Override
- public String readLine() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public long readLong() throws IOException {
- //LongObjectInspector loi = PrimitiveObjectInspectorFactory.writableLongObjectInspector;
- if (bufferIter.hasNext()) {
- LongObjectInspector loi = PrimitiveObjectInspectorFactory.writableLongObjectInspector;
- return PrimitiveObjectInspectorUtils.getLong(bufferIter.next(), loi);
- //return bufferIter.next();
- }
- else {
- throw new IOException();
- }
- }
-
- @Override
- public short readShort() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String readUTF() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int readUnsignedByte() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int readUnsignedShort() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int skipBytes(int n) throws IOException {
- throw new UnsupportedOperationException();
- }
-
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectOutput.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectOutput.java b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectOutput.java
deleted file mode 100644
index e9d959d..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapObjectOutput.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index.bitmap;
-
-import java.io.IOException;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
-import java.util.List;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-
-/**
- * An ObjectOutput that allows conversion from an EWAH-compressed bitmap
- * to an List of LongWritable.
- */
-public class BitmapObjectOutput implements ObjectOutput {
- ArrayList<LongWritable> buffer = new ArrayList<LongWritable>();
-
- public List<LongWritable> list() {
- return buffer;
- }
-
- @Override
- public void close() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void flush() throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void write(int arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void write(byte[] arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void write(byte[] arg0, int arg1, int arg2) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void writeObject(Object arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void writeBoolean(boolean arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void writeByte(int arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void writeBytes(String arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void writeChar(int arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void writeChars(String arg0) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void writeDouble(double v) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void writeFloat(float v) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void writeInt(int v) throws IOException {
- buffer.add(new LongWritable(v));
- }
-
- @Override
- public void writeLong(long v) throws IOException {
- buffer.add(new LongWritable(v));
- }
-
- @Override
- public void writeShort(int v) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void writeUTF(String s) throws IOException {
- throw new UnsupportedOperationException();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapOuterQuery.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapOuterQuery.java b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapOuterQuery.java
deleted file mode 100644
index 135b1ed..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapOuterQuery.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index.bitmap;
-
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapQuery;
-
-/**
- * Representation of the outer query on bitmap indexes that JOINs the result of
- * inner SELECT scans on bitmap indexes (represented in BitmapQuery objects)
- * using EWAH_* bitwise operations
- */
-public class BitmapOuterQuery implements BitmapQuery {
- private String alias;
- private BitmapQuery lhs;
- private BitmapQuery rhs;
- private String queryStr;
-
- public BitmapOuterQuery(String alias, BitmapQuery lhs, BitmapQuery rhs) {
- this.alias = alias;
- this.lhs = lhs;
- this.rhs = rhs;
- constructQueryStr();
- }
-
- public String getAlias() {
- return alias;
- }
-
- /**
- * Return a string representation of the query for compilation
- */
- public String toString() {
- return queryStr;
- }
-
- /**
- * Construct a string representation of the query to be compiled
- */
- private void constructQueryStr() {
- StringBuilder sb = new StringBuilder();
- sb.append("(SELECT ");
- sb.append(lhs.getAlias());
- sb.append(".`_bucketname`, ");
- sb.append(rhs.getAlias());
- sb.append(".`_offset`, ");
- sb.append("EWAH_BITMAP_AND(");
- sb.append(lhs.getAlias());
- sb.append(".`_bitmaps`, ");
- sb.append(rhs.getAlias());
- sb.append(".`_bitmaps`) AS `_bitmaps` FROM ");
- sb.append(lhs.toString());
- sb.append(" JOIN ");
- sb.append(rhs.toString());
- sb.append(" ON ");
- sb.append(lhs.getAlias());
- sb.append(".`_bucketname` = ");
- sb.append(rhs.getAlias());
- sb.append(".`_bucketname` AND ");
- sb.append(lhs.getAlias());
- sb.append(".`_offset` = ");
- sb.append(rhs.getAlias());
- sb.append(".`_offset`) ");
- sb.append(this.alias);
- queryStr = sb.toString();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java
deleted file mode 100644
index 4b1ff46..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index.bitmap;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-
-/**
- * Generic interface to representations of queries on bitmap indexes
- */
-public interface BitmapQuery {
- public String getAlias();
-
- public String toString();
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
deleted file mode 100644
index c4d02ee..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index.compact;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.exec.FilterOperator;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.index.HiveIndexQueryContext;
-import org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer;
-import org.apache.hadoop.hive.ql.index.IndexSearchCondition;
-import org.apache.hadoop.hive.ql.index.TableBasedIndexHandler;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler.DecomposedPredicate;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
-import org.apache.hadoop.hive.ql.plan.MapWork;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.plan.PartitionDesc;
-import org.apache.hadoop.hive.ql.session.LineageState;
-import org.apache.hadoop.hive.ql.stats.StatsUtils;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan;
-
-public class CompactIndexHandler extends TableBasedIndexHandler {
-
- // The names of the partition columns
- private Set<String> partitionCols;
- // Whether or not the conditions have been met to use the fact the index is sorted
- private boolean useSorted;
- private static final Logger LOG = LoggerFactory.getLogger(CompactIndexHandler.class.getName());
-
-
- @Override
- public void analyzeIndexDefinition(Table baseTable, Index index,
- Table indexTable) throws HiveException {
- StorageDescriptor storageDesc = index.getSd();
- if (this.usesIndexTable() && indexTable != null) {
- StorageDescriptor indexTableSd = storageDesc.deepCopy();
- List<FieldSchema> indexTblCols = indexTableSd.getCols();
- FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", "");
- indexTblCols.add(bucketFileName);
- FieldSchema offSets = new FieldSchema("_offsets", "array<bigint>", "");
- indexTblCols.add(offSets);
- indexTable.setSd(indexTableSd);
- }
- }
-
- @Override
- protected Task<?> getIndexBuilderMapRedTask(Set<ReadEntity> inputs, Set<WriteEntity> outputs,
- List<FieldSchema> indexField, boolean partitioned,
- PartitionDesc indexTblPartDesc, String indexTableName,
- PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
- LineageState lineageState) throws HiveException {
-
- String indexCols = HiveUtils.getUnparsedColumnNamesFromFieldSchema(indexField);
-
- //form a new insert overwrite query.
- StringBuilder command= new StringBuilder();
- LinkedHashMap<String, String> partSpec = indexTblPartDesc.getPartSpec();
-
- String fullIndexTableName = StatsUtils.getFullyQualifiedTableName(HiveUtils.unparseIdentifier(dbName),
- HiveUtils.unparseIdentifier(indexTableName));
- command.append("INSERT OVERWRITE TABLE " + fullIndexTableName);
- if (partitioned && indexTblPartDesc != null) {
- command.append(" PARTITION ( ");
- List<String> ret = getPartKVPairStringArray(partSpec);
- for (int i = 0; i < ret.size(); i++) {
- String partKV = ret.get(i);
- command.append(partKV);
- if (i < ret.size() - 1) {
- command.append(",");
- }
- }
- command.append(" ) ");
- }
-
- String fullBaseTableName = StatsUtils.getFullyQualifiedTableName(HiveUtils.unparseIdentifier(dbName),
- HiveUtils.unparseIdentifier(baseTableName));
- command.append(" SELECT ");
- command.append(indexCols);
- command.append(",");
-
- command.append(VirtualColumn.FILENAME.getName());
- command.append(",");
- command.append(" collect_set (");
- command.append(VirtualColumn.BLOCKOFFSET.getName());
- command.append(") ");
- command.append(" FROM " + fullBaseTableName);
- LinkedHashMap<String, String> basePartSpec = baseTablePartDesc.getPartSpec();
- if(basePartSpec != null) {
- command.append(" WHERE ");
- List<String> pkv = getPartKVPairStringArray(basePartSpec);
- for (int i = 0; i < pkv.size(); i++) {
- String partKV = pkv.get(i);
- command.append(partKV);
- if (i < pkv.size() - 1) {
- command.append(" AND ");
- }
- }
- }
- command.append(" GROUP BY ");
- command.append(indexCols + ", " + VirtualColumn.FILENAME.getName());
-
- HiveConf builderConf = new HiveConf(getConf(), CompactIndexHandler.class);
- builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPFILES, false);
- builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPREDFILES, false);
- builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false);
- Task<?> rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs,
- command, partSpec, indexTableName, dbName, lineageState);
- return rootTask;
- }
-
- @Override
- public void generateIndexQuery(List<Index> indexes, ExprNodeDesc predicate,
- ParseContext pctx, HiveIndexQueryContext queryContext) {
-
- Index index = indexes.get(0);
- DecomposedPredicate decomposedPredicate = decomposePredicate(predicate, index,
- queryContext.getQueryPartitions());
-
- if (decomposedPredicate == null) {
- queryContext.setQueryTasks(null);
- return; // abort if we couldn't pull out anything from the predicate
- }
-
- // pass residual predicate back out for further processing
- queryContext.setResidualPredicate(decomposedPredicate.residualPredicate);
- // setup TableScanOperator to change input format for original query
- queryContext.setIndexInputFormat(HiveCompactIndexInputFormat.class.getName());
-
- // Build reentrant QL for index query
- StringBuilder qlCommand = new StringBuilder("INSERT OVERWRITE DIRECTORY ");
-
- String tmpFile = pctx.getContext().getMRTmpPath().toUri().toString();
- queryContext.setIndexIntermediateFile(tmpFile);
- qlCommand.append( "\"" + tmpFile + "\" "); // QL includes " around file name
- qlCommand.append("SELECT `_bucketname` , `_offsets` FROM ");
- qlCommand.append(HiveUtils.unparseIdentifier(index.getIndexTableName()));
- qlCommand.append(" WHERE ");
-
- String predicateString = decomposedPredicate.pushedPredicate.getExprString();
- qlCommand.append(predicateString);
-
- // generate tasks from index query string
- LOG.info("Generating tasks for re-entrant QL query: " + qlCommand.toString());
- HiveConf queryConf = new HiveConf(pctx.getConf(), CompactIndexHandler.class);
- HiveConf.setBoolVar(queryConf, HiveConf.ConfVars.COMPRESSRESULT, false);
- Driver driver = new Driver(queryConf, pctx.getQueryState().getLineageState());
- driver.compile(qlCommand.toString(), false);
-
- if (pctx.getConf().getBoolVar(ConfVars.HIVE_INDEX_COMPACT_BINARY_SEARCH) && useSorted) {
- // For now, only works if the predicate is a single condition
- MapWork work = null;
- String originalInputFormat = null;
- for (Task task : driver.getPlan().getRootTasks()) {
- // The index query should have one and only one map reduce task in the root tasks
- // Otherwise something is wrong, log the problem and continue using the default format
- if (task.getWork() instanceof MapredWork) {
- if (work != null) {
- LOG.error("Tried to use a binary search on a compact index but there were an " +
- "unexpected number (>1) of root level map reduce tasks in the " +
- "reentrant query plan.");
- work.setInputformat(null);
- work.setInputFormatSorted(false);
- break;
- }
- if (task.getWork() != null) {
- work = ((MapredWork)task.getWork()).getMapWork();
- }
- String inputFormat = work.getInputformat();
- originalInputFormat = inputFormat;
- if (inputFormat == null) {
- inputFormat = HiveConf.getVar(pctx.getConf(), HiveConf.ConfVars.HIVEINPUTFORMAT);
- }
-
- // We can only perform a binary search with HiveInputFormat and CombineHiveInputFormat
- // and BucketizedHiveInputFormat
- try {
- if (!HiveInputFormat.class.isAssignableFrom(JavaUtils.loadClass(inputFormat))) {
- work = null;
- break;
- }
- } catch (ClassNotFoundException e) {
- LOG.error("Map reduce work's input format class: " + inputFormat + " was not found. " +
- "Cannot use the fact the compact index is sorted.");
- work = null;
- break;
- }
-
- work.setInputFormatSorted(true);
- }
- }
-
- if (work != null) {
- // Find the filter operator and expr node which act on the index column and mark them
- if (!findIndexColumnFilter(work.getAliasToWork().values())) {
- LOG.error("Could not locate the index column's filter operator and expr node. Cannot " +
- "use the fact the compact index is sorted.");
- work.setInputformat(originalInputFormat);
- work.setInputFormatSorted(false);
- }
- }
- }
-
-
- queryContext.addAdditionalSemanticInputs(driver.getPlan().getInputs());
- queryContext.setQueryTasks(driver.getPlan().getRootTasks());
- return;
- }
-
- /**
- * Does a depth first search on the operator tree looking for a filter operator whose predicate
- * has one child which is a column which is not in the partition
- * @param operators
- * @return whether or not it has found its target
- */
- private boolean findIndexColumnFilter(
- Collection<Operator<? extends OperatorDesc>> operators) {
- for (Operator<? extends OperatorDesc> op : operators) {
- if (op instanceof FilterOperator &&
- ((FilterOperator)op).getConf().getPredicate().getChildren() != null) {
- // Is this the target
- if (findIndexColumnExprNodeDesc(((FilterOperator)op).getConf().getPredicate())) {
- ((FilterOperator)op).getConf().setSortedFilter(true);
- return true;
- }
- }
-
- // If the target has been found, no need to continue
- if (findIndexColumnFilter(op.getChildOperators())) {
- return true;
- }
- }
- return false;
- }
-
- private boolean findIndexColumnExprNodeDesc(ExprNodeDesc expression) {
- if (expression.getChildren() == null) {
- return false;
- }
-
- if (expression.getChildren().size() == 2) {
- ExprNodeColumnDesc columnDesc = null;
- if (expression.getChildren().get(0) instanceof ExprNodeColumnDesc) {
- columnDesc = (ExprNodeColumnDesc)expression.getChildren().get(0);
- } else if (expression.getChildren().get(1) instanceof ExprNodeColumnDesc) {
- columnDesc = (ExprNodeColumnDesc)expression.getChildren().get(1);
- }
-
- // Is this the target
- if (columnDesc != null && !partitionCols.contains(columnDesc.getColumn())) {
- assert expression instanceof ExprNodeGenericFuncDesc :
- "Expression containing index column is does not support sorting, should not try" +
- "and sort";
- ((ExprNodeGenericFuncDesc)expression).setSortedExpr(true);
- return true;
- }
- }
-
- for (ExprNodeDesc child : expression.getChildren()) {
- // If the target has been found, no need to continue
- if (findIndexColumnExprNodeDesc(child)) {
- return true;
- }
- }
- return false;
- }
-
- /**
- * Split the predicate into the piece we can deal with (pushed), and the one we can't (residual)
- * @param predicate
- * @param index
- * @return
- */
- private DecomposedPredicate decomposePredicate(ExprNodeDesc predicate, Index index,
- Set<Partition> queryPartitions) {
- IndexPredicateAnalyzer analyzer = getIndexPredicateAnalyzer(index, queryPartitions);
- List<IndexSearchCondition> searchConditions = new ArrayList<IndexSearchCondition>();
- // split predicate into pushed (what we can handle), and residual (what we can't handle)
- ExprNodeGenericFuncDesc residualPredicate = (ExprNodeGenericFuncDesc)analyzer.
- analyzePredicate(predicate, searchConditions);
-
- if (searchConditions.size() == 0) {
- return null;
- }
-
- int numIndexCols = 0;
- for (IndexSearchCondition searchCondition : searchConditions) {
- if (!partitionCols.contains(searchCondition.getColumnDesc().getColumn())) {
- numIndexCols++;
- }
- }
-
- // For now, only works if the predicate has a single condition on an index column
- if (numIndexCols == 1) {
- useSorted = true;
- } else {
- useSorted = false;
- }
-
- DecomposedPredicate decomposedPredicate = new DecomposedPredicate();
- decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions(searchConditions);
- decomposedPredicate.residualPredicate = residualPredicate;
-
- return decomposedPredicate;
- }
-
- /**
- * Instantiate a new predicate analyzer suitable for determining
- * whether we can use an index, based on rules for indexes in
- * WHERE clauses that we support
- *
- * @return preconfigured predicate analyzer for WHERE queries
- */
- private IndexPredicateAnalyzer getIndexPredicateAnalyzer(Index index, Set<Partition> queryPartitions) {
- IndexPredicateAnalyzer analyzer = new IndexPredicateAnalyzer();
-
- analyzer.addComparisonOp(GenericUDFOPEqual.class.getName());
- analyzer.addComparisonOp(GenericUDFOPLessThan.class.getName());
- analyzer.addComparisonOp(GenericUDFOPEqualOrLessThan.class.getName());
- analyzer.addComparisonOp(GenericUDFOPGreaterThan.class.getName());
- analyzer.addComparisonOp(GenericUDFOPEqualOrGreaterThan.class.getName());
-
- // only return results for columns in this index
- List<FieldSchema> columnSchemas = index.getSd().getCols();
- for (FieldSchema column : columnSchemas) {
- analyzer.allowColumnName(column.getName());
- }
-
- // partitioned columns are treated as if they have indexes so that the partitions
- // are used during the index query generation
- partitionCols = new HashSet<String>();
- for (Partition part : queryPartitions) {
- if (part.getSpec().isEmpty()) {
- continue; // empty partitions are from whole tables, so we don't want to add them in
- }
- for (String column : part.getSpec().keySet()) {
- analyzer.allowColumnName(column);
- partitionCols.add(column);
- }
- }
-
- return analyzer;
- }
-
-
- @Override
- public boolean checkQuerySize(long querySize, HiveConf hiveConf) {
- long minSize = hiveConf.getLongVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER_COMPACT_MINSIZE);
- long maxSize = hiveConf.getLongVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER_COMPACT_MAXSIZE);
- if (maxSize < 0) {
- maxSize = Long.MAX_VALUE;
- }
- return (querySize > minSize & querySize < maxSize);
- }
-
- @Override
- public boolean usesIndexTable() {
- return true;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/compact/HiveCompactIndexInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/compact/HiveCompactIndexInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/index/compact/HiveCompactIndexInputFormat.java
deleted file mode 100644
index 6d9c968..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/compact/HiveCompactIndexInputFormat.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index.compact;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-
-public class HiveCompactIndexInputFormat extends HiveIndexedInputFormat {
-
- public static final Logger l4j =
- LoggerFactory.getLogger(HiveCompactIndexInputFormat.class.getSimpleName());
-
- public HiveCompactIndexInputFormat() {
- super("hive.index.compact.file");
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
index 5cd30cb..912eb10 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
@@ -23,6 +23,7 @@ import java.io.DataOutput;
import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
+import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
@@ -41,6 +42,7 @@ import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
import org.apache.hive.common.util.Ref;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
@@ -105,6 +107,22 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
protected Map<Path, PartitionDesc> pathToPartitionInfo;
protected MapWork mrwork;
+ public static final class HiveInputSplitComparator implements Comparator<HiveInputSplit> {
+ @Override
+ public int compare(HiveInputSplit o1, HiveInputSplit o2) {
+ int pathCompare = comparePath(o1.getPath(), o2.getPath());
+ if (pathCompare != 0) {
+ return pathCompare;
+ }
+ return Long.compare(o1.getStart(), o2.getStart());
+ }
+
+ private int comparePath(Path p1, Path p2) {
+ return p1.compareTo(p2);
+ }
+ }
+
+
/**
* HiveInputSplit encapsulates an InputSplit with its corresponding
* inputFormatClass. The reason that it derives from FileSplit is to make sure
@@ -113,6 +131,7 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
public static class HiveInputSplit extends FileSplit implements InputSplit,
Configurable {
+
InputSplit inputSplit;
String inputFormatClassName;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
index 2930a46..bb75ebf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
@@ -161,7 +161,9 @@ public class ExternalCache implements FooterCache {
private boolean processBbResult(
ByteBuffer bb, int ix, HdfsFileStatusWithId file, OrcTail[] result) throws IOException {
- if (bb == null) return true;
+ if (bb == null) {
+ return true;
+ }
result[ix] = createOrcTailFromMs(file, bb);
if (result[ix] == null) {
return false;
@@ -173,7 +175,10 @@ public class ExternalCache implements FooterCache {
private void processPpdResult(MetadataPpdResult mpr, HdfsFileStatusWithId file,
int ix, OrcTail[] result, ByteBuffer[] ppdResult) throws IOException {
- if (mpr == null) return; // This file is unknown to metastore.
+ if (mpr == null)
+ {
+ return; // This file is unknown to metastore.
+ }
ppdResult[ix] = mpr.isSetIncludeBitset() ? mpr.bufferForIncludeBitset() : NO_SPLIT_AFTER_PPD;
if (mpr.isSetMetadata()) {
@@ -187,7 +192,9 @@ public class ExternalCache implements FooterCache {
private List<Long> determineFileIdsToQuery(
List<HdfsFileStatusWithId> files, OrcTail[] result, HashMap<Long, Integer> posMap) {
for (int i = 0; i < result.length; ++i) {
- if (result[i] != null) continue;
+ if (result[i] != null) {
+ continue;
+ }
HdfsFileStatusWithId file = files.get(i);
final FileStatus fs = file.getFileStatus();
Long fileId = file.getFileId();
@@ -224,9 +231,13 @@ public class ExternalCache implements FooterCache {
}
private ByteBuffer getSerializedSargForMetastore(boolean isOriginal) {
- if (sarg == null) return null;
+ if (sarg == null) {
+ return null;
+ }
ByteBuffer serializedSarg = isOriginal ? sargIsOriginal : sargNotIsOriginal;
- if (serializedSarg != null) return serializedSarg;
+ if (serializedSarg != null) {
+ return serializedSarg;
+ }
SearchArgument sarg2 = sarg;
Kryo kryo = SerializationUtilities.borrowKryo();
try {
@@ -292,7 +303,9 @@ public class ExternalCache implements FooterCache {
private static OrcTail createOrcTailFromMs(
HdfsFileStatusWithId file, ByteBuffer bb) throws IOException {
- if (bb == null) return null;
+ if (bb == null) {
+ return null;
+ }
FileStatus fs = file.getFileStatus();
ByteBuffer copy = bb.duplicate();
try {
[03/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_creation.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_creation.q.out b/ql/src/test/results/clientpositive/index_creation.q.out
deleted file mode 100644
index 49b3f11..0000000
--- a/ql/src/test/results/clientpositive/index_creation.q.out
+++ /dev/null
@@ -1,321 +0,0 @@
-PREHOOK: query: drop index src_index_2 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_2 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_3 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_3 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_4 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_4 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_5 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_5 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_6 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_6 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_7 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_7 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_8 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_8 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_9 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_9 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop table `_t`
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table `_t`
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create index src_index_2 on table src(key) as 'compact' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: create index src_index_2 on table src(key) as 'compact' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_2__
-PREHOOK: query: desc extended default__src_src_index_2__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__src_src_index_2__
-POSTHOOK: query: desc extended default__src_src_index_2__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__src_src_index_2__
-key string default
-_bucketname string
-_offsets array<bigint>
-
-#### A masked pattern was here ####
-PREHOOK: query: create index src_index_3 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_3
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: create index src_index_3 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_3
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_idx_src_index_3
-PREHOOK: query: desc extended src_idx_src_index_3
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@src_idx_src_index_3
-POSTHOOK: query: desc extended src_idx_src_index_3
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@src_idx_src_index_3
-key string default
-_bucketname string
-_offsets array<bigint>
-
-#### A masked pattern was here ####
-PREHOOK: query: create index src_index_4 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: create index src_index_4 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_4__
-PREHOOK: query: desc extended default__src_src_index_4__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__src_src_index_4__
-POSTHOOK: query: desc extended default__src_src_index_4__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__src_src_index_4__
-key string default
-_bucketname string
-_offsets array<bigint>
-
-#### A masked pattern was here ####
-PREHOOK: query: create index src_index_5 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\'
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: create index src_index_5 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\'
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_5__
-PREHOOK: query: desc extended default__src_src_index_5__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__src_src_index_5__
-POSTHOOK: query: desc extended default__src_src_index_5__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__src_src_index_5__
-key string default
-_bucketname string
-_offsets array<bigint>
-
-#### A masked pattern was here ####
-PREHOOK: query: create index src_index_6 on table src(key) as 'compact' WITH DEFERRED REBUILD STORED AS RCFILE
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: create index src_index_6 on table src(key) as 'compact' WITH DEFERRED REBUILD STORED AS RCFILE
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_6__
-PREHOOK: query: desc extended default__src_src_index_6__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__src_src_index_6__
-POSTHOOK: query: desc extended default__src_src_index_6__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__src_src_index_6__
-key string default
-_bucketname string
-_offsets array<bigint>
-
-#### A masked pattern was here ####
-PREHOOK: query: create index src_index_7 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_7 STORED AS RCFILE
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: create index src_index_7 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_7 STORED AS RCFILE
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_idx_src_index_7
-PREHOOK: query: desc extended src_idx_src_index_7
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@src_idx_src_index_7
-POSTHOOK: query: desc extended src_idx_src_index_7
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@src_idx_src_index_7
-key string default
-_bucketname string
-_offsets array<bigint>
-
-#### A masked pattern was here ####
-PREHOOK: query: create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_8__
-PREHOOK: query: desc extended default__src_src_index_8__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__src_src_index_8__
-POSTHOOK: query: desc extended default__src_src_index_8__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__src_src_index_8__
-key string default
-_bucketname string
-_offsets array<bigint>
-
-#### A masked pattern was here ####
-PREHOOK: query: create index src_index_9 on table src(key) as 'compact' WITH DEFERRED REBUILD TBLPROPERTIES ("prop1"="val1", "prop2"="val2")
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: create index src_index_9 on table src(key) as 'compact' WITH DEFERRED REBUILD TBLPROPERTIES ("prop1"="val1", "prop2"="val2")
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index_9__
-PREHOOK: query: desc extended default__src_src_index_9__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__src_src_index_9__
-POSTHOOK: query: desc extended default__src_src_index_9__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__src_src_index_9__
-key string default
-_bucketname string
-_offsets array<bigint>
-
-#### A masked pattern was here ####
-PREHOOK: query: create table `_t`(`_i` int, `_j` int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@_t
-POSTHOOK: query: create table `_t`(`_i` int, `_j` int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@_t
-PREHOOK: query: create index x on table `_t`(`_j`) as 'compact' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@_t
-POSTHOOK: query: create index x on table `_t`(`_j`) as 'compact' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@_t
-POSTHOOK: Output: default@default___t_x__
-PREHOOK: query: alter index x on `_t` rebuild
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@_t
-PREHOOK: Output: default@default___t_x__
-POSTHOOK: query: alter index x on `_t` rebuild
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@_t
-POSTHOOK: Output: default@default___t_x__
-POSTHOOK: Lineage: default___t_x__._bucketname SIMPLE [(_t)_t.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default___t_x__._j SIMPLE [(_t)_t.FieldSchema(name:_j, type:int, comment:null), ]
-POSTHOOK: Lineage: default___t_x__._offsets EXPRESSION [(_t)_t.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-PREHOOK: query: create index x2 on table `_t`(`_i`,`_j`) as 'compact' WITH DEFERRED
-REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@_t
-POSTHOOK: query: create index x2 on table `_t`(`_i`,`_j`) as 'compact' WITH DEFERRED
-REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@_t
-POSTHOOK: Output: default@default___t_x2__
-PREHOOK: query: alter index x2 on `_t` rebuild
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@_t
-PREHOOK: Output: default@default___t_x2__
-POSTHOOK: query: alter index x2 on `_t` rebuild
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@_t
-POSTHOOK: Output: default@default___t_x2__
-POSTHOOK: Lineage: default___t_x2__._bucketname SIMPLE [(_t)_t.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default___t_x2__._i SIMPLE [(_t)_t.FieldSchema(name:_i, type:int, comment:null), ]
-POSTHOOK: Lineage: default___t_x2__._j SIMPLE [(_t)_t.FieldSchema(name:_j, type:int, comment:null), ]
-POSTHOOK: Lineage: default___t_x2__._offsets EXPRESSION [(_t)_t.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-PREHOOK: query: drop index src_index_2 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_2 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_3 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_3 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_4 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_4 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_5 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_5 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_6 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_6 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_7 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_7 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_8 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_8 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop index src_index_9 on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index_9 on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: drop table `_t`
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@_t
-PREHOOK: Output: default@_t
-POSTHOOK: query: drop table `_t`
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@_t
-POSTHOOK: Output: default@_t
-PREHOOK: query: show tables
-PREHOOK: type: SHOWTABLES
-PREHOOK: Input: database:default
-POSTHOOK: query: show tables
-POSTHOOK: type: SHOWTABLES
-POSTHOOK: Input: database:default
-alltypesorc
-alltypesparquet
-cbo_t1
-cbo_t2
-cbo_t3
-lineitem
-part
-src
-src1
-src_cbo
-src_json
-src_sequencefile
-src_thrift
-srcbucket
-srcbucket2
-srcpart
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_in_db.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_in_db.q.out b/ql/src/test/results/clientpositive/index_in_db.q.out
deleted file mode 100644
index 6d7b0c3..0000000
--- a/ql/src/test/results/clientpositive/index_in_db.q.out
+++ /dev/null
@@ -1,57 +0,0 @@
-PREHOOK: query: drop database if exists index_test_db cascade
-PREHOOK: type: DROPDATABASE
-POSTHOOK: query: drop database if exists index_test_db cascade
-POSTHOOK: type: DROPDATABASE
-PREHOOK: query: create database index_test_db
-PREHOOK: type: CREATEDATABASE
-PREHOOK: Output: database:index_test_db
-POSTHOOK: query: create database index_test_db
-POSTHOOK: type: CREATEDATABASE
-POSTHOOK: Output: database:index_test_db
-PREHOOK: query: use index_test_db
-PREHOOK: type: SWITCHDATABASE
-PREHOOK: Input: database:index_test_db
-POSTHOOK: query: use index_test_db
-POSTHOOK: type: SWITCHDATABASE
-POSTHOOK: Input: database:index_test_db
-PREHOOK: query: create table testtb (id int, name string)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:index_test_db
-PREHOOK: Output: index_test_db@testtb
-POSTHOOK: query: create table testtb (id int, name string)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:index_test_db
-POSTHOOK: Output: index_test_db@testtb
-PREHOOK: query: create index id_index on table testtb (id) as 'COMPACT' WITH DEFERRED REBUILD in table testdb_id_idx_tb
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: index_test_db@testtb
-POSTHOOK: query: create index id_index on table testtb (id) as 'COMPACT' WITH DEFERRED REBUILD in table testdb_id_idx_tb
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: index_test_db@testtb
-POSTHOOK: Output: index_test_db@testdb_id_idx_tb
-PREHOOK: query: use default
-PREHOOK: type: SWITCHDATABASE
-PREHOOK: Input: database:default
-POSTHOOK: query: use default
-POSTHOOK: type: SWITCHDATABASE
-POSTHOOK: Input: database:default
-PREHOOK: query: select * from index_test_db.testtb where id>2
-PREHOOK: type: QUERY
-PREHOOK: Input: index_test_db@testtb
-#### A masked pattern was here ####
-POSTHOOK: query: select * from index_test_db.testtb where id>2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: index_test_db@testtb
-#### A masked pattern was here ####
-PREHOOK: query: use index_test_db
-PREHOOK: type: SWITCHDATABASE
-PREHOOK: Input: database:index_test_db
-POSTHOOK: query: use index_test_db
-POSTHOOK: type: SWITCHDATABASE
-POSTHOOK: Input: database:index_test_db
-PREHOOK: query: drop index id_index on testtb
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: index_test_db@testtb
-POSTHOOK: query: drop index id_index on testtb
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: index_test_db@testtb
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_serde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_serde.q.out b/ql/src/test/results/clientpositive/index_serde.q.out
deleted file mode 100644
index b5c81e9..0000000
--- a/ql/src/test/results/clientpositive/index_serde.q.out
+++ /dev/null
@@ -1,242 +0,0 @@
-PREHOOK: query: CREATE TABLE doctors
-ROW FORMAT
-SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
-STORED AS
-INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
-TBLPROPERTIES ('avro.schema.literal'='{
- "namespace": "testing.hive.avro.serde",
- "name": "doctors",
- "type": "record",
- "fields": [
- {
- "name":"number",
- "type":"int",
- "doc":"Order of playing the role"
- },
- {
- "name":"first_name",
- "type":"string",
- "doc":"first name of actor playing role"
- },
- {
- "name":"last_name",
- "type":"string",
- "doc":"last name of actor playing role"
- }
- ]
-}')
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@doctors
-POSTHOOK: query: CREATE TABLE doctors
-ROW FORMAT
-SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
-STORED AS
-INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
-TBLPROPERTIES ('avro.schema.literal'='{
- "namespace": "testing.hive.avro.serde",
- "name": "doctors",
- "type": "record",
- "fields": [
- {
- "name":"number",
- "type":"int",
- "doc":"Order of playing the role"
- },
- {
- "name":"first_name",
- "type":"string",
- "doc":"first name of actor playing role"
- },
- {
- "name":"last_name",
- "type":"string",
- "doc":"last name of actor playing role"
- }
- ]
-}')
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@doctors
-PREHOOK: query: DESCRIBE doctors
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@doctors
-POSTHOOK: query: DESCRIBE doctors
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@doctors
-number int Order of playing the role
-first_name string first name of actor playing role
-last_name string last name of actor playing role
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@doctors
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@doctors
-PREHOOK: query: CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@doctors
-POSTHOOK: query: CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@doctors
-POSTHOOK: Output: default@default__doctors_doctors_index__
-PREHOOK: query: DESCRIBE EXTENDED default__doctors_doctors_index__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__doctors_doctors_index__
-POSTHOOK: query: DESCRIBE EXTENDED default__doctors_doctors_index__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__doctors_doctors_index__
-number int Order of playing the role
-_bucketname string
-_offsets array<bigint>
-
-#### A masked pattern was here ####
-PREHOOK: query: ALTER INDEX doctors_index ON doctors REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@doctors
-PREHOOK: Output: default@default__doctors_doctors_index__
-POSTHOOK: query: ALTER INDEX doctors_index ON doctors REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@doctors
-POSTHOOK: Output: default@default__doctors_doctors_index__
-POSTHOOK: Lineage: default__doctors_doctors_index__._bucketname SIMPLE [(doctors)doctors.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__doctors_doctors_index__._offsets EXPRESSION [(doctors)doctors.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__doctors_doctors_index__.number SIMPLE [(doctors)doctors.FieldSchema(name:number, type:int, comment:Order of playing the role), ]
-PREHOOK: query: EXPLAIN SELECT * FROM doctors WHERE number > 6
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM doctors WHERE number > 6
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6
- Stage-5
- Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
- Stage-1 depends on stages: Stage-2
- Stage-4
- Stage-6
- Stage-7 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__doctors_doctors_index__
- filterExpr: (number > 6) (type: boolean)
- Filter Operator
- predicate: (number > 6) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Conditional Operator
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: doctors
- filterExpr: (number > 6) (type: boolean)
- Statistics: Num rows: 1 Data size: 5210 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (number > 6) (type: boolean)
- Statistics: Num rows: 1 Data size: 5210 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: number (type: int), first_name (type: string), last_name (type: string)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 1 Data size: 5210 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 5210 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT * FROM doctors WHERE number > 6
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__doctors_doctors_index__
-PREHOOK: Input: default@doctors
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM doctors WHERE number > 6
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__doctors_doctors_index__
-POSTHOOK: Input: default@doctors
-#### A masked pattern was here ####
-10 David Tennant
-11 Matt Smith
-7 Sylvester McCoy
-8 Paul McGann
-9 Christopher Eccleston
-PREHOOK: query: DROP INDEX doctors_index ON doctors
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@doctors
-POSTHOOK: query: DROP INDEX doctors_index ON doctors
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@doctors
-PREHOOK: query: DROP TABLE doctors
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@doctors
-PREHOOK: Output: default@doctors
-POSTHOOK: query: DROP TABLE doctors
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@doctors
-POSTHOOK: Output: default@doctors
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_skewtable.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_skewtable.q.out b/ql/src/test/results/clientpositive/index_skewtable.q.out
deleted file mode 100644
index c513a2a..0000000
--- a/ql/src/test/results/clientpositive/index_skewtable.q.out
+++ /dev/null
@@ -1,204 +0,0 @@
-PREHOOK: query: CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@kv
-POSTHOOK: query: CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@kv
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE kv
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@kv
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE kv
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@kv
-PREHOOK: query: CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@kv
-POSTHOOK: query: CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@kv
-POSTHOOK: Output: default@default__kv_kv_index__
-PREHOOK: query: DESCRIBE FORMATTED default__kv_kv_index__
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@default__kv_kv_index__
-POSTHOOK: query: DESCRIBE FORMATTED default__kv_kv_index__
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@default__kv_kv_index__
-# col_name data_type comment
-value string
-_bucketname string
-_offsets array<bigint>
-
-# Detailed Table Information
-Database: default
-#### A masked pattern was here ####
-Retention: 0
-#### A masked pattern was here ####
-Table Type: INDEX_TABLE
-Table Parameters:
-#### A masked pattern was here ####
-
-# Storage Information
-SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-InputFormat: org.apache.hadoop.mapred.TextInputFormat
-OutputFormat: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
-Compressed: No
-Num Buckets: -1
-Bucket Columns: []
-Sort Columns: [Order(col:value, order:1)]
-PREHOOK: query: ALTER INDEX kv_index ON kv REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@kv
-PREHOOK: Output: default@default__kv_kv_index__
-POSTHOOK: query: ALTER INDEX kv_index ON kv REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@kv
-POSTHOOK: Output: default@default__kv_kv_index__
-POSTHOOK: Lineage: default__kv_kv_index__._bucketname SIMPLE [(kv)kv.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__kv_kv_index__._offsets EXPRESSION [(kv)kv.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__kv_kv_index__.value SIMPLE [(kv)kv.FieldSchema(name:value, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6
- Stage-5
- Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
- Stage-1 depends on stages: Stage-2
- Stage-4
- Stage-6
- Stage-7 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__kv_kv_index__
- filterExpr: (value > '15') (type: boolean)
- Filter Operator
- predicate: (value > '15') (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Conditional Operator
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: kv
- filterExpr: (value > '15') (type: boolean)
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (value > '15') (type: boolean)
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col1 (type: string)
- sort order: +
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col0 (type: string)
- Reduce Operator Tree:
- Select Operator
- expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT * FROM kv WHERE value > '15' ORDER BY value
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__kv_kv_index__
-PREHOOK: Input: default@kv
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM kv WHERE value > '15' ORDER BY value
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__kv_kv_index__
-POSTHOOK: Input: default@kv
-#### A masked pattern was here ####
-8 18
-8 18
-2 22
-PREHOOK: query: DROP INDEX kv_index ON kv
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@kv
-POSTHOOK: query: DROP INDEX kv_index ON kv
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@kv
-PREHOOK: query: DROP TABLE kv
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@kv
-PREHOOK: Output: default@kv
-POSTHOOK: query: DROP TABLE kv
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@kv
-POSTHOOK: Output: default@kv
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_stale.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_stale.q.out b/ql/src/test/results/clientpositive/index_stale.q.out
deleted file mode 100644
index 7883fcc..0000000
--- a/ql/src/test/results/clientpositive/index_stale.q.out
+++ /dev/null
@@ -1,106 +0,0 @@
-PREHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@temp
-POSTHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@temp
-PREHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@temp
-POSTHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@temp
-POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@temp
-POSTHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@temp
-POSTHOOK: Output: default@default__temp_temp_index__
-PREHOOK: query: ALTER INDEX temp_index ON temp REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@temp
-PREHOOK: Output: default@default__temp_temp_index__
-POSTHOOK: query: ALTER INDEX temp_index ON temp REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@temp
-POSTHOOK: Output: default@default__temp_temp_index__
-POSTHOOK: Lineage: default__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ]
-PREHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@temp
-POSTHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@temp
-POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: temp
- filterExpr: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), val (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT * FROM temp WHERE key = 86
-PREHOOK: type: QUERY
-PREHOOK: Input: default@temp
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM temp WHERE key = 86
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@temp
-#### A masked pattern was here ####
-86 val_86
-PREHOOK: query: DROP index temp_index on temp
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@temp
-POSTHOOK: query: DROP index temp_index on temp
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@temp
-PREHOOK: query: DROP table temp
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@temp
-PREHOOK: Output: default@temp
-POSTHOOK: query: DROP table temp
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@temp
-POSTHOOK: Output: default@temp
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_stale_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_stale_partitioned.q.out b/ql/src/test/results/clientpositive/index_stale_partitioned.q.out
deleted file mode 100644
index 2138c33..0000000
--- a/ql/src/test/results/clientpositive/index_stale_partitioned.q.out
+++ /dev/null
@@ -1,115 +0,0 @@
-PREHOOK: query: CREATE TABLE temp(key STRING, val STRING) PARTITIONED BY (foo string) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@temp
-POSTHOOK: query: CREATE TABLE temp(key STRING, val STRING) PARTITIONED BY (foo string) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@temp
-PREHOOK: query: ALTER TABLE temp ADD PARTITION (foo = 'bar')
-PREHOOK: type: ALTERTABLE_ADDPARTS
-PREHOOK: Output: default@temp
-POSTHOOK: query: ALTER TABLE temp ADD PARTITION (foo = 'bar')
-POSTHOOK: type: ALTERTABLE_ADDPARTS
-POSTHOOK: Output: default@temp
-POSTHOOK: Output: default@temp@foo=bar
-PREHOOK: query: INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src WHERE key < 50
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@temp@foo=bar
-POSTHOOK: query: INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src WHERE key < 50
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@temp@foo=bar
-POSTHOOK: Lineage: temp PARTITION(foo=bar).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: temp PARTITION(foo=bar).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@temp
-POSTHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@temp
-POSTHOOK: Output: default@default__temp_temp_index__
-PREHOOK: query: ALTER INDEX temp_index ON temp PARTITION (foo = 'bar') REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@temp
-PREHOOK: Input: default@temp@foo=bar
-PREHOOK: Output: default@default__temp_temp_index__@foo=bar
-POSTHOOK: query: ALTER INDEX temp_index ON temp PARTITION (foo = 'bar') REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@temp
-POSTHOOK: Input: default@temp@foo=bar
-POSTHOOK: Output: default@default__temp_temp_index__@foo=bar
-POSTHOOK: Lineage: default__temp_temp_index__ PARTITION(foo=bar)._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__temp_temp_index__ PARTITION(foo=bar)._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__temp_temp_index__ PARTITION(foo=bar).key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ]
-PREHOOK: query: INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@temp@foo=bar
-POSTHOOK: query: INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@temp@foo=bar
-POSTHOOK: Lineage: temp PARTITION(foo=bar).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: temp PARTITION(foo=bar).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: SELECT * FROM default__temp_temp_index__ WHERE key = 86 AND foo='bar'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__temp_temp_index__
-PREHOOK: Input: default@default__temp_temp_index__@foo=bar
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM default__temp_temp_index__ WHERE key = 86 AND foo='bar'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__temp_temp_index__
-POSTHOOK: Input: default@default__temp_temp_index__@foo=bar
-#### A masked pattern was here ####
-PREHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 AND foo = 'bar'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 AND foo = 'bar'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- TableScan
- alias: temp
- filterExpr: ((UDFToDouble(key) = 86.0) and (foo = 'bar')) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), val (type: string), 'bar' (type: string)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
- ListSink
-
-PREHOOK: query: SELECT * FROM temp WHERE key = 86 AND foo = 'bar'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@temp
-PREHOOK: Input: default@temp@foo=bar
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM temp WHERE key = 86 AND foo = 'bar'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@temp
-POSTHOOK: Input: default@temp@foo=bar
-#### A masked pattern was here ####
-86 val_86 bar
-PREHOOK: query: DROP index temp_index on temp
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@temp
-POSTHOOK: query: DROP index temp_index on temp
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@temp
-PREHOOK: query: DROP table temp
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@temp
-PREHOOK: Output: default@temp
-POSTHOOK: query: DROP table temp
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@temp
-POSTHOOK: Output: default@temp
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/show_functions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_functions.q.out b/ql/src/test/results/clientpositive/show_functions.q.out
index fcbf2c5..4e8ceaf 100644
--- a/ql/src/test/results/clientpositive/show_functions.q.out
+++ b/ql/src/test/results/clientpositive/show_functions.q.out
@@ -82,10 +82,6 @@ e
elt
encode
enforce_constraint
-ewah_bitmap
-ewah_bitmap_and
-ewah_bitmap_empty
-ewah_bitmap_or
exp
explode
extract_union
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/spark/union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_view.q.out b/ql/src/test/results/clientpositive/spark/union_view.q.out
index 1b73ddb..d960a30 100644
--- a/ql/src/test/results/clientpositive/spark/union_view.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_view.q.out
@@ -6,13 +6,6 @@ POSTHOOK: query: CREATE TABLE src_union_1 (key int, value string) PARTITIONED BY
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@src_union_1
-PREHOOK: query: CREATE INDEX src_union_1_key_idx ON TABLE src_union_1(key) AS 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src_union_1
-POSTHOOK: query: CREATE INDEX src_union_1_key_idx ON TABLE src_union_1(key) AS 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src_union_1
-POSTHOOK: Output: default@default__src_union_1_src_union_1_key_idx__
PREHOOK: query: CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY (ds string, part_1 string)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
@@ -21,13 +14,6 @@ POSTHOOK: query: CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@src_union_2
-PREHOOK: query: CREATE INDEX src_union_2_key_idx ON TABLE src_union_2(key) AS 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src_union_2
-POSTHOOK: query: CREATE INDEX src_union_2_key_idx ON TABLE src_union_2(key) AS 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src_union_2
-POSTHOOK: Output: default@default__src_union_2_src_union_2_key_idx__
PREHOOK: query: CREATE TABLE src_union_3(key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
@@ -36,13 +22,6 @@ POSTHOOK: query: CREATE TABLE src_union_3(key int, value string) PARTITIONED BY
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@src_union_3
-PREHOOK: query: CREATE INDEX src_union_3_key_idx ON TABLE src_union_3(key) AS 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src_union_3
-POSTHOOK: query: CREATE INDEX src_union_3_key_idx ON TABLE src_union_3(key) AS 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src_union_3
-POSTHOOK: Output: default@default__src_union_3_src_union_3_key_idx__
STAGE DEPENDENCIES:
Stage-0 is a root stage
@@ -494,9 +473,9 @@ STAGE PLANS:
86 val_86 3
86 val_86 3
86 val_86 3
-86 val_86 1
86 val_86 2
86 val_86 2
+86 val_86 1
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out b/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out
index 8f463e8..55db575 100644
--- a/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out
+++ b/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out
@@ -63,31 +63,6 @@ POSTHOOK: Input: default@s/c
97 val_97
98 val_98
98 val_98
-PREHOOK: query: CREATE INDEX src_index ON TABLE `s/c`(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@s/c
-POSTHOOK: query: CREATE INDEX src_index ON TABLE `s/c`(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@s/c
-POSTHOOK: Output: default@default__s/c_src_index__
-PREHOOK: query: ALTER INDEX src_index ON `s/c` REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@s/c
-PREHOOK: Output: default@default__s/c_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON `s/c` REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@s/c
-POSTHOOK: Output: default@default__s/c_src_index__
-POSTHOOK: Lineage: default__s/c_src_index__._bucketname SIMPLE [(s/c)s/c.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__s/c_src_index__._offsets EXPRESSION [(s/c)s/c.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__s/c_src_index__.key SIMPLE [(s/c)s/c.FieldSchema(name:key, type:string, comment:default), ]
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__s/c_src_index__
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__s/c_src_index__
-#### A masked pattern was here ####
PREHOOK: query: EXPLAIN SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100
PREHOOK: type: QUERY
POSTHOOK: query: EXPLAIN SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100
@@ -151,143 +126,3 @@ POSTHOOK: Input: default@s/c
97 val_97
98 val_98
98 val_98
-PREHOOK: query: EXPLAIN SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6
- Stage-5
- Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
- Stage-1 depends on stages: Stage-2
- Stage-4
- Stage-6
- Stage-7 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__s/c_src_index__
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Conditional Operator
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: s/c
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__s/c_src_index__
-PREHOOK: Input: default@s/c
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__s/c_src_index__
-POSTHOOK: Input: default@s/c
-#### A masked pattern was here ####
-82 val_82
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-85 val_85
-86 val_86
-87 val_87
-90 val_90
-90 val_90
-90 val_90
-92 val_92
-95 val_95
-95 val_95
-96 val_96
-97 val_97
-97 val_97
-98 val_98
-98 val_98
-PREHOOK: query: DROP INDEX src_index on `s/c`
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@s/c
-POSTHOOK: query: DROP INDEX src_index on `s/c`
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@s/c
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/unicode_comments.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/unicode_comments.q.out b/ql/src/test/results/clientpositive/unicode_comments.q.out
index 9a12c55..ae8c3e5 100644
--- a/ql/src/test/results/clientpositive/unicode_comments.q.out
+++ b/ql/src/test/results/clientpositive/unicode_comments.q.out
@@ -35,13 +35,6 @@ POSTHOOK: Input: unicode_comments_db@unicode_comments_tbl1
POSTHOOK: Output: database:unicode_comments_db
POSTHOOK: Output: unicode_comments_db@unicode_comments_view1
POSTHOOK: Lineage: unicode_comments_view1.col1 SIMPLE [(unicode_comments_tbl1)unicode_comments_tbl1.FieldSchema(name:col1, type:string, comment:第一列), ]
-PREHOOK: query: create index index2 on table unicode_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment '索引'
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: unicode_comments_db@unicode_comments_tbl1
-POSTHOOK: query: create index index2 on table unicode_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment '索引'
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: unicode_comments_db@unicode_comments_tbl1
-POSTHOOK: Output: unicode_comments_db@unicode_comments_db__unicode_comments_tbl1_index2__
PREHOOK: query: describe database extended unicode_comments_db
PREHOOK: type: DESCDATABASE
PREHOOK: Input: database:unicode_comments_db
@@ -147,23 +140,15 @@ Sort Columns: []
View Original Text: select col1 from unicode_comments_tbl1
View Expanded Text: SELECT `col1` AS `col1` FROM (select `unicode_comments_tbl1`.`col1` from `unicode_comments_db`.`unicode_comments_tbl1`) `unicode_comments_db.unicode_comments_view1`
View Rewrite Enabled: No
-PREHOOK: query: show formatted index on unicode_comments_tbl1
-PREHOOK: type: SHOWINDEXES
-POSTHOOK: query: show formatted index on unicode_comments_tbl1
-POSTHOOK: type: SHOWINDEXES
-idx_name tab_name col_names idx_tab_name idx_type comment
-index2 unicode_comments_tbl1 col1 unicode_comments_db__unicode_comments_tbl1_index2__ compact 索引
PREHOOK: query: drop database unicode_comments_db cascade
PREHOOK: type: DROPDATABASE
PREHOOK: Input: database:unicode_comments_db
PREHOOK: Output: database:unicode_comments_db
-PREHOOK: Output: unicode_comments_db@unicode_comments_db__unicode_comments_tbl1_index2__
PREHOOK: Output: unicode_comments_db@unicode_comments_tbl1
PREHOOK: Output: unicode_comments_db@unicode_comments_view1
POSTHOOK: query: drop database unicode_comments_db cascade
POSTHOOK: type: DROPDATABASE
POSTHOOK: Input: database:unicode_comments_db
POSTHOOK: Output: database:unicode_comments_db
-POSTHOOK: Output: unicode_comments_db@unicode_comments_db__unicode_comments_tbl1_index2__
POSTHOOK: Output: unicode_comments_db@unicode_comments_tbl1
POSTHOOK: Output: unicode_comments_db@unicode_comments_view1
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_view.q.out b/ql/src/test/results/clientpositive/union_view.q.out
index 301f521..becad2c 100644
--- a/ql/src/test/results/clientpositive/union_view.q.out
+++ b/ql/src/test/results/clientpositive/union_view.q.out
@@ -6,13 +6,6 @@ POSTHOOK: query: CREATE TABLE src_union_1 (key int, value string) PARTITIONED BY
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@src_union_1
-PREHOOK: query: CREATE INDEX src_union_1_key_idx ON TABLE src_union_1(key) AS 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src_union_1
-POSTHOOK: query: CREATE INDEX src_union_1_key_idx ON TABLE src_union_1(key) AS 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src_union_1
-POSTHOOK: Output: default@default__src_union_1_src_union_1_key_idx__
PREHOOK: query: CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY (ds string, part_1 string)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
@@ -21,13 +14,6 @@ POSTHOOK: query: CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@src_union_2
-PREHOOK: query: CREATE INDEX src_union_2_key_idx ON TABLE src_union_2(key) AS 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src_union_2
-POSTHOOK: query: CREATE INDEX src_union_2_key_idx ON TABLE src_union_2(key) AS 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src_union_2
-POSTHOOK: Output: default@default__src_union_2_src_union_2_key_idx__
PREHOOK: query: CREATE TABLE src_union_3(key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
@@ -36,13 +22,6 @@ POSTHOOK: query: CREATE TABLE src_union_3(key int, value string) PARTITIONED BY
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@src_union_3
-PREHOOK: query: CREATE INDEX src_union_3_key_idx ON TABLE src_union_3(key) AS 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src_union_3
-POSTHOOK: query: CREATE INDEX src_union_3_key_idx ON TABLE src_union_3(key) AS 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src_union_3
-POSTHOOK: Output: default@default__src_union_3_src_union_3_key_idx__
STAGE DEPENDENCIES:
Stage-0 is a root stage
@@ -145,36 +124,10 @@ STAGE PLANS:
1000
1000
STAGE DEPENDENCIES:
- Stage-5 is a root stage
- Stage-4 depends on stages: Stage-5
- Stage-1 depends on stages: Stage-4
+ Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_1_src_union_1_key_idx__
- filterExpr: ((key = 86) and (ds = '1')) (type: boolean)
- Filter Operator
- predicate: (key = 86) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
Stage: Stage-1
Map Reduce
Map Operator Tree:
@@ -264,36 +217,10 @@ STAGE PLANS:
ListSink
STAGE DEPENDENCIES:
- Stage-5 is a root stage
- Stage-4 depends on stages: Stage-5
- Stage-1 depends on stages: Stage-4
+ Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_2_src_union_2_key_idx__
- filterExpr: ((key = 86) and (ds = '2')) (type: boolean)
- Filter Operator
- predicate: (key = 86) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
Stage: Stage-1
Map Reduce
Map Operator Tree:
@@ -383,36 +310,10 @@ STAGE PLANS:
ListSink
STAGE DEPENDENCIES:
- Stage-5 is a root stage
- Stage-4 depends on stages: Stage-5
- Stage-1 depends on stages: Stage-4
+ Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_3_src_union_3_key_idx__
- filterExpr: ((key = 86) and (ds = '3')) (type: boolean)
- Filter Operator
- predicate: (key = 86) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
Stage: Stage-1
Map Reduce
Map Operator Tree:
@@ -502,40 +403,10 @@ STAGE PLANS:
ListSink
STAGE DEPENDENCIES:
- Stage-5 is a root stage
- Stage-4 depends on stages: Stage-5
- Stage-1 depends on stages: Stage-4, Stage-6, Stage-8
- Stage-7 is a root stage
- Stage-6 depends on stages: Stage-7
- Stage-9 is a root stage
- Stage-8 depends on stages: Stage-9
+ Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_1_src_union_1_key_idx__
- filterExpr: (key = 86) (type: boolean)
- Filter Operator
- predicate: (key = 86) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
Stage: Stage-1
Map Reduce
Map Operator Tree:
@@ -618,54 +489,6 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
- Stage: Stage-7
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_2_src_union_2_key_idx__
- filterExpr: (key = 86) (type: boolean)
- Filter Operator
- predicate: (key = 86) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-9
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_3_src_union_3_key_idx__
- filterExpr: (key = 86) (type: boolean)
- Filter Operator
- predicate: (key = 86) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
Stage: Stage-0
Fetch Operator
limit: -1
@@ -683,34 +506,10 @@ STAGE PLANS:
86 val_86 3
86 val_86 3
STAGE DEPENDENCIES:
- Stage-5 is a root stage
- Stage-4 depends on stages: Stage-5
- Stage-1 depends on stages: Stage-4
+ Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_1_src_union_1_key_idx__
- filterExpr: (ds = '1') (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
Stage: Stage-1
Map Reduce
Map Operator Tree:
@@ -804,34 +603,10 @@ STAGE PLANS:
ListSink
STAGE DEPENDENCIES:
- Stage-5 is a root stage
- Stage-4 depends on stages: Stage-5
- Stage-1 depends on stages: Stage-4
+ Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_2_src_union_2_key_idx__
- filterExpr: (ds = '2') (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
Stage: Stage-1
Map Reduce
Map Operator Tree:
@@ -925,34 +700,10 @@ STAGE PLANS:
ListSink
STAGE DEPENDENCIES:
- Stage-5 is a root stage
- Stage-4 depends on stages: Stage-5
- Stage-1 depends on stages: Stage-4
+ Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_3_src_union_3_key_idx__
- filterExpr: (ds = '3') (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
Stage: Stage-1
Map Reduce
Map Operator Tree:
@@ -1049,36 +800,10 @@ STAGE PLANS:
1000
1000
STAGE DEPENDENCIES:
- Stage-5 is a root stage
- Stage-4 depends on stages: Stage-5
- Stage-1 depends on stages: Stage-4
+ Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_3_src_union_3_key_idx__
- filterExpr: ((key = 86) and (ds = '4')) (type: boolean)
- Filter Operator
- predicate: (key = 86) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
Stage: Stage-1
Map Reduce
Map Operator Tree:
@@ -1169,34 +894,10 @@ STAGE PLANS:
86 val_86 4
STAGE DEPENDENCIES:
- Stage-5 is a root stage
- Stage-4 depends on stages: Stage-5
- Stage-1 depends on stages: Stage-4
+ Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
- Stage: Stage-5
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_union_3_src_union_3_key_idx__
- filterExpr: (ds = '4') (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
Stage: Stage-1
Map Reduce
Map Operator Tree:
[10/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/database_drop.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/database_drop.q b/ql/src/test/queries/clientpositive/database_drop.q
index c8f6b0a..1a892e7 100644
--- a/ql/src/test/queries/clientpositive/database_drop.q
+++ b/ql/src/test/queries/clientpositive/database_drop.q
@@ -1,6 +1,6 @@
--- create database with multiple tables, indexes and views.
+-- create database with multiple tables, views.
-- Use both partitioned and non-partitioned tables, as well as
--- tables and indexes with specific storage locations
+-- tables with specific storage locations
-- verify the drop the database with cascade works and that the directories
-- outside the database's default storage are removed as part of the drop
@@ -13,57 +13,37 @@ dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/temp;
dfs -rmr ${system:test.tmp.dir}/dbcascade;
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade;
--- add a table, index and view
+-- add a table, view
CREATE TABLE temp_tbl (id INT, name STRING);
LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE temp_tbl;
CREATE VIEW temp_tbl_view AS SELECT * FROM temp_tbl;
-CREATE INDEX idx1 ON TABLE temp_tbl(id) AS 'COMPACT' with DEFERRED REBUILD;
-ALTER INDEX idx1 ON temp_tbl REBUILD;
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/temp_tbl2;
-dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/temp_tbl2_idx2;
--- add a table, index and view with a different storage location
+-- add a table, view with a different storage location
CREATE TABLE temp_tbl2 (id INT, name STRING) LOCATION 'file:${system:test.tmp.dir}/dbcascade/temp_tbl2';
LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl2;
CREATE VIEW temp_tbl2_view AS SELECT * FROM temp_tbl2;
-CREATE INDEX idx2 ON TABLE temp_tbl2(id) AS 'COMPACT' with DEFERRED REBUILD LOCATION 'file:${system:test.tmp.dir}/dbcascade/temp_tbl2_idx2';
-ALTER INDEX idx2 ON temp_tbl2 REBUILD;
--- add a partitioned table, index and view
+-- add a partitioned table, view
CREATE TABLE part_tab (id INT, name STRING) PARTITIONED BY (ds string);
LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab PARTITION (ds='2008-04-09');
LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab PARTITION (ds='2009-04-09');
-CREATE INDEX idx3 ON TABLE part_tab(id) AS 'COMPACT' with DEFERRED REBUILD;
-ALTER INDEX idx3 ON part_tab PARTITION (ds='2008-04-09') REBUILD;
-ALTER INDEX idx3 ON part_tab PARTITION (ds='2009-04-09') REBUILD;
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/part_tab2;
-dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/part_tab2_idx4;
--- add a partitioned table, index and view with a different storage location
+-- add a partitioned table, view with a different storage location
CREATE TABLE part_tab2 (id INT, name STRING) PARTITIONED BY (ds string)
LOCATION 'file:${system:test.tmp.dir}/dbcascade/part_tab2';
LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab2 PARTITION (ds='2008-04-09');
LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab2 PARTITION (ds='2009-04-09');
-CREATE INDEX idx4 ON TABLE part_tab2(id) AS 'COMPACT' with DEFERRED REBUILD
- LOCATION 'file:${system:test.tmp.dir}/dbcascade/part_tab2_idx4';
-ALTER INDEX idx4 ON part_tab2 PARTITION (ds='2008-04-09') REBUILD;
-ALTER INDEX idx4 ON part_tab2 PARTITION (ds='2009-04-09') REBUILD;
-
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/part_tab3;
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/part_tab3_p1;
-dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/part_tab3_idx5;
--- add a partitioned table, index and view with a different storage location
+-- add a partitioned table, view with a different storage location
CREATE TABLE part_tab3 (id INT, name STRING) PARTITIONED BY (ds string)
LOCATION 'file:${system:test.tmp.dir}/dbcascade/part_tab3';
ALTER TABLE part_tab3 ADD PARTITION (ds='2007-04-09') LOCATION 'file:${system:test.tmp.dir}/dbcascade/part_tab3_p1';
LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab3 PARTITION (ds='2008-04-09');
LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab3 PARTITION (ds='2009-04-09');
-CREATE INDEX idx5 ON TABLE part_tab3(id) AS 'COMPACT' with DEFERRED REBUILD
- LOCATION 'file:${system:test.tmp.dir}/dbcascade/part_tab3_idx5';
-ALTER INDEX idx5 ON part_tab3 PARTITION (ds='2008-04-09') REBUILD;
-ALTER INDEX idx5 ON part_tab3 PARTITION (ds='2009-04-09') REBUILD;
-
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/extab1;
@@ -75,11 +55,9 @@ CREATE EXTERNAL TABLE extab1(id INT, name STRING) ROW FORMAT
STORED AS TEXTFILE
LOCATION 'file:${system:test.tmp.dir}/dbcascade/extab1';
--- add a table, create index (give a name for index table)
+-- add a table
CREATE TABLE temp_tbl3 (id INT, name STRING);
LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl3;
-CREATE INDEX temp_tbl3_idx ON TABLE temp_tbl3(id) AS 'COMPACT' with DEFERRED REBUILD IN TABLE temp_tbl3_idx_tbl;
-ALTER INDEX temp_tbl3_idx ON temp_tbl3 REBUILD;
-- drop the database with cascade
DROP DATABASE db5 CASCADE;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/drop_index.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_index.q b/ql/src/test/queries/clientpositive/drop_index.q
deleted file mode 100644
index e03856c..0000000
--- a/ql/src/test/queries/clientpositive/drop_index.q
+++ /dev/null
@@ -1,2 +0,0 @@
-DROP INDEX IF EXISTS UnknownIndex ON src;
-DROP INDEX IF EXISTS UnknownIndex ON UnknownTable;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/drop_index_removes_partition_dirs.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_index_removes_partition_dirs.q b/ql/src/test/queries/clientpositive/drop_index_removes_partition_dirs.q
deleted file mode 100644
index ed471db..0000000
--- a/ql/src/test/queries/clientpositive/drop_index_removes_partition_dirs.q
+++ /dev/null
@@ -1,22 +0,0 @@
--- This test verifies that if a partition exists outside an index table's current location when the
--- index is dropped the partition's location is dropped as well.
-
-CREATE TABLE test_table (key STRING, value STRING)
-PARTITIONED BY (part STRING)
-STORED AS RCFILE
-LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table';
-
-CREATE INDEX test_index ON
-TABLE test_table(key) AS 'compact' WITH DEFERRED REBUILD
-IN TABLE test_index_table;
-
-ALTER TABLE test_index_table ADD PARTITION (part = '1')
-LOCATION 'file:${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2/part=1';
-
-dfs -ls ${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2;
-
-DROP INDEX test_index ON test_table;
-
-dfs -ls ${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2;
-
-dfs -rmr ${system:test.tmp.dir}/drop_index_removes_partition_dirs_index_table2;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/drop_table_with_index.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_table_with_index.q b/ql/src/test/queries/clientpositive/drop_table_with_index.q
deleted file mode 100644
index 1790664..0000000
--- a/ql/src/test/queries/clientpositive/drop_table_with_index.q
+++ /dev/null
@@ -1,35 +0,0 @@
-set hive.stats.dbclass=fs;
-set hive.stats.autogather=true;
-set hive.cbo.enable=true;
-
-DROP TABLE IF EXISTS aa;
-CREATE TABLE aa (L_ORDERKEY INT,
- L_PARTKEY INT,
- L_SUPPKEY INT,
- L_LINENUMBER INT,
- L_QUANTITY DOUBLE,
- L_EXTENDEDPRICE DOUBLE,
- L_DISCOUNT DOUBLE,
- L_TAX DOUBLE,
- L_RETURNFLAG STRING,
- L_LINESTATUS STRING,
- l_shipdate STRING,
- L_COMMITDATE STRING,
- L_RECEIPTDATE STRING,
- L_SHIPINSTRUCT STRING,
- L_SHIPMODE STRING,
- L_COMMENT STRING)
-ROW FORMAT DELIMITED
-FIELDS TERMINATED BY '|';
-
-LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE aa;
-
-CREATE INDEX aa_lshipdate_idx ON TABLE aa(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)");
-ALTER INDEX aa_lshipdate_idx ON aa REBUILD;
-
-show tables;
-
-explain select l_shipdate, count(l_shipdate)
-from aa
-group by l_shipdate;
-
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/escape_comments.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/escape_comments.q b/ql/src/test/queries/clientpositive/escape_comments.q
index bcfac3d..d586e08 100644
--- a/ql/src/test/queries/clientpositive/escape_comments.q
+++ b/ql/src/test/queries/clientpositive/escape_comments.q
@@ -5,7 +5,6 @@ create table escape_comments_tbl1
partitioned by (p1 string comment 'a\nb');
create view escape_comments_view1 (col1 comment 'a\nb') comment 'a\nb'
as select col1 from escape_comments_tbl1;
-create index index2 on table escape_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment 'a\nb';
describe database extended escape_comments_db;
describe database escape_comments_db;
@@ -14,6 +13,5 @@ describe formatted escape_comments_tbl1;
describe escape_comments_tbl1;
show create table escape_comments_view1;
describe formatted escape_comments_view1;
-show formatted index on escape_comments_tbl1;
drop database escape_comments_db cascade;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auth.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auth.q b/ql/src/test/queries/clientpositive/index_auth.q
deleted file mode 100644
index b12b742..0000000
--- a/ql/src/test/queries/clientpositive/index_auth.q
+++ /dev/null
@@ -1,20 +0,0 @@
-set hive.stats.dbclass=fs;
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
-
-create table foobar(key int, value string) PARTITIONED BY (ds string, hr string);
-alter table foobar add partition (ds='2008-04-08',hr='12');
-
-CREATE INDEX srcpart_AUTH_index ON TABLE foobar(key) as 'BITMAP' WITH DEFERRED REBUILD;
-SHOW INDEXES ON foobar;
-
-grant select on table foobar to user hive_test_user;
-grant select on table default__foobar_srcpart_auth_indeX__ to user hive_test_user;
-grant update on table default__foobar_srcpart_auth_indEx__ to user hive_test_user;
-grant create on table default__foobar_srcpart_auth_inDex__ to user hive_test_user;
-set hive.security.authorization.enabled=true;
-
-ALTER INDEX srcpart_auth_INDEX ON foobar PARTITION (ds='2008-04-08',hr='12') REBUILD;
-set hive.security.authorization.enabled=false;
-DROP INDEX srcpart_auth_index on foobar;
-DROP TABLE foobar;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auto.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auto.q b/ql/src/test/queries/clientpositive/index_auto.q
deleted file mode 100644
index fe8839a..0000000
--- a/ql/src/test/queries/clientpositive/index_auto.q
+++ /dev/null
@@ -1,31 +0,0 @@
-set hive.mapred.mode=nonstrict;
--- try the query without indexing, with manual indexing, and with automatic indexing
--- SORT_QUERY_RESULTS
-
--- without indexing
-SELECT key, value FROM src WHERE key > 80 AND key < 100;
-
-set hive.stats.dbclass=fs;
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-
--- manual indexing
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_where" SELECT `_bucketname` , `_offsets` FROM default__src_src_index__ WHERE key > 80 AND key < 100;
-SET hive.index.compact.file=${system:test.tmp.dir}/index_where;
-SET hive.optimize.index.filter=false;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-
-EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100;
-SELECT key, value FROM src WHERE key > 80 AND key < 100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
--- automatic indexing
-EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100;
-SELECT key, value FROM src WHERE key > 80 AND key < 100;
-
-DROP INDEX src_index on src;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auto_empty.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auto_empty.q b/ql/src/test/queries/clientpositive/index_auto_empty.q
deleted file mode 100644
index 7567887..0000000
--- a/ql/src/test/queries/clientpositive/index_auto_empty.q
+++ /dev/null
@@ -1,26 +0,0 @@
-set hive.mapred.mode=nonstrict;
--- Test to ensure that an empty index result is propagated correctly
-
-CREATE DATABASE it;
--- Create temp, and populate it with some values in src.
-CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE;
-
-set hive.stats.dbclass=fs;
--- Build an index on it.temp.
-CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX temp_index ON it.temp REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
--- query should not return any values
-SELECT * FROM it.it__temp_temp_index__ WHERE key = 86;
-EXPLAIN SELECT * FROM it.temp WHERE key = 86;
-SELECT * FROM it.temp WHERE key = 86;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=false;
-DROP table it.temp;
-
-DROP DATABASE it;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auto_file_format.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auto_file_format.q b/ql/src/test/queries/clientpositive/index_auto_file_format.q
deleted file mode 100644
index 2afafb8..0000000
--- a/ql/src/test/queries/clientpositive/index_auto_file_format.q
+++ /dev/null
@@ -1,23 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
--- test automatic use of index on different file formats
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
-EXPLAIN SELECT key, value FROM src WHERE key=86;
-SELECT key, value FROM src WHERE key=86;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
-EXPLAIN SELECT key, value FROM src WHERE key=86;
-SELECT key, value FROM src WHERE key=86;
-
-DROP INDEX src_index on src;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auto_mult_tables.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auto_mult_tables.q b/ql/src/test/queries/clientpositive/index_auto_mult_tables.q
deleted file mode 100644
index 924060b..0000000
--- a/ql/src/test/queries/clientpositive/index_auto_mult_tables.q
+++ /dev/null
@@ -1,25 +0,0 @@
-set hive.mapred.mode=nonstrict;
--- SORT_QUERY_RESULTS
--- try the query without indexing, with manual indexing, and with automatic indexing
-
--- without indexing
-EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-
-set hive.stats.dbclass=fs;
-
-CREATE INDEX src_index_bitmap ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX src_index_bitmap ON src REBUILD;
-
-CREATE INDEX srcpart_index_bitmap ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX srcpart_index_bitmap ON srcpart REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
-EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-
-DROP INDEX src_index_bitmap on src;
-DROP INDEX srcpart_index_bitmap on srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q b/ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q
deleted file mode 100644
index 20f34d1..0000000
--- a/ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q
+++ /dev/null
@@ -1,26 +0,0 @@
-set hive.mapred.mode=nonstrict;
--- SORT_QUERY_RESULTS
--- try the query without indexing, with manual indexing, and with automatic indexing
-
--- without indexing
-EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-
-set hive.stats.dbclass=fs;
-
-CREATE INDEX src_index_compact ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index_compact ON src REBUILD;
-
-CREATE INDEX srcpart_index_compact ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX srcpart_index_compact ON srcpart REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
--- automatic indexing
-EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-
-DROP INDEX src_index_compact on src;
-DROP INDEX srcpart_index_compact on srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auto_multiple.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auto_multiple.q b/ql/src/test/queries/clientpositive/index_auto_multiple.q
deleted file mode 100644
index 2bcb5a5..0000000
--- a/ql/src/test/queries/clientpositive/index_auto_multiple.q
+++ /dev/null
@@ -1,20 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
--- With multiple indexes, make sure we choose which to use in a consistent order
-
-CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_key_index ON src REBUILD;
-ALTER INDEX src_val_index ON src REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
-EXPLAIN SELECT key, value FROM src WHERE key=86;
-SELECT key, value FROM src WHERE key=86;
-
-DROP INDEX src_key_index ON src;
-DROP INDEX src_val_index ON src;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auto_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auto_partitioned.q b/ql/src/test/queries/clientpositive/index_auto_partitioned.q
deleted file mode 100644
index e25fdb9..0000000
--- a/ql/src/test/queries/clientpositive/index_auto_partitioned.q
+++ /dev/null
@@ -1,17 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-set hive.fetch.task.conversion=none;
-
--- SORT_QUERY_RESULTS
--- test automatic use of index on table with partitions
-CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_part_index ON srcpart REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
-EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09';
-SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09';
-
-DROP INDEX src_part_index ON srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auto_self_join.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auto_self_join.q b/ql/src/test/queries/clientpositive/index_auto_self_join.q
deleted file mode 100644
index 2ce6d1e..0000000
--- a/ql/src/test/queries/clientpositive/index_auto_self_join.q
+++ /dev/null
@@ -1,19 +0,0 @@
-set hive.mapred.mode=nonstrict;
--- SORT_QUERY_RESULTS
--- try the query without indexing, with manual indexing, and with automatic indexing
-
-EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-
-set hive.stats.dbclass=fs;
-CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
-EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90;
-
-DROP INDEX src_index on src;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auto_unused.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auto_unused.q b/ql/src/test/queries/clientpositive/index_auto_unused.q
deleted file mode 100644
index 4e33366..0000000
--- a/ql/src/test/queries/clientpositive/index_auto_unused.q
+++ /dev/null
@@ -1,64 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
--- test cases where the index should not be used automatically
-
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=5368709120;
-SET hive.optimize.index.filter.compact.maxsize=-1;
-
--- min size too large (src is less than 5G)
-EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100;
-SELECT * FROM src WHERE key > 80 AND key < 100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-SET hive.optimize.index.filter.compact.maxsize=1;
-
--- max size too small
-EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100;
-SELECT * FROM src WHERE key > 80 AND key < 100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-SET hive.optimize.index.filter.compact.maxsize=-1;
-
--- OR predicate not supported by compact indexes
-EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480;
-SELECT * FROM src WHERE key < 10 OR key > 480;
-
- SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-SET hive.optimize.index.filter.compact.maxsize=-1;
-
--- columns are not covered by indexes
-DROP INDEX src_index on src;
-CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_val_index ON src REBUILD;
-
-EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100;
-SELECT * FROM src WHERE key > 80 AND key < 100;
-
-DROP INDEX src_val_index on src;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-SET hive.optimize.index.filter.compact.maxsize=-1;
-
--- required partitions have not been built yet
-CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_part_index ON srcpart PARTITION (ds='2008-04-08', hr=11) REBUILD;
-
-EXPLAIN SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10;
-SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10;
-
-DROP INDEX src_part_index on srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_auto_update.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_auto_update.q b/ql/src/test/queries/clientpositive/index_auto_update.q
deleted file mode 100644
index b184080..0000000
--- a/ql/src/test/queries/clientpositive/index_auto_update.q
+++ /dev/null
@@ -1,29 +0,0 @@
-set hive.mapred.mode=nonstrict;
--- Test if index is actually being used.
-
--- Create temp, and populate it with some values in src.
-CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE;
-INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50;
-
--- Build an index on temp.
-CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX temp_index ON temp REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.autoupdate=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
--- overwrite temp table so index is out of date
-EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src;
-INSERT OVERWRITE TABLE temp SELECT * FROM src;
-
--- query should return indexed values
-EXPLAIN SELECT * FROM temp WHERE key = 86;
-SELECT * FROM temp WHERE key = 86;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=false;
-drop index temp_index on temp;
-DROP table temp;
-
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_bitmap.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_bitmap.q b/ql/src/test/queries/clientpositive/index_bitmap.q
deleted file mode 100644
index 91a4e54..0000000
--- a/ql/src/test/queries/clientpositive/index_bitmap.q
+++ /dev/null
@@ -1,52 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
-
-DROP INDEX srcpart_index_proj on srcpart;
-
-EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD;
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX srcpart_index_proj ON srcpart REBUILD;
-SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname`,
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname`;
-SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_test_index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08';
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` ,
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname`;
-SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_test_index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11;
-
-DROP INDEX srcpart_index_proj on srcpart;
-
-EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD;
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX srcpart_index_proj ON srcpart REBUILD;
-SELECT x.* FROM default__srcpart_srcpart_index_proj__ x;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` ,
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_srcpart_index_proj__
-WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`;
-SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-SELECT key, value FROM srcpart WHERE key=100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM srcpart WHERE key=100;
-
-DROP INDEX srcpart_index_proj on srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_bitmap1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_bitmap1.q b/ql/src/test/queries/clientpositive/index_bitmap1.q
deleted file mode 100644
index ff6ae5d..0000000
--- a/ql/src/test/queries/clientpositive/index_bitmap1.q
+++ /dev/null
@@ -1,22 +0,0 @@
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-SELECT x.* FROM default__src_src_index__ x;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname`,
-COLLECT_SET(`_offset`) as `_offsets` FROM default__src_src_index__ WHERE NOT
-EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`;
-SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-SELECT key, value FROM src WHERE key=100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM src WHERE key=100;
-
-DROP INDEX src_index ON src;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_bitmap2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_bitmap2.q b/ql/src/test/queries/clientpositive/index_bitmap2.q
deleted file mode 100644
index 89fbe76..0000000
--- a/ql/src/test/queries/clientpositive/index_bitmap2.q
+++ /dev/null
@@ -1,39 +0,0 @@
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD;
-
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX src1_index ON src REBUILD;
-ALTER INDEX src2_index ON src REBUILD;
-SELECT * FROM default__src_src1_index__;
-SELECT * FROM default__src_src2_index__;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result"
-SELECT t.bucketname as `_bucketname`, COLLECT_SET(t.offset) AS `_offsets` FROM
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset
- FROM default__src_src1_index__
- WHERE key = 0 AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`) UNION ALL
- SELECT `_bucketname` AS bucketname, `_offset` AS offset
- FROM default__src_src2_index__
- WHERE value = "val2" AND NOT EWAH_BITMAP_EMPTY(`_bitmaps`)) t
-GROUP BY t.bucketname;
-
-SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-
-SELECT key, value FROM src WHERE key=0 OR value = "val_2";
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM src WHERE key=0 OR value = "val_2";
-
-DROP INDEX src1_index ON src;
-DROP INDEX src2_index ON src;
-
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_bitmap3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_bitmap3.q b/ql/src/test/queries/clientpositive/index_bitmap3.q
deleted file mode 100644
index 73bdc89..0000000
--- a/ql/src/test/queries/clientpositive/index_bitmap3.q
+++ /dev/null
@@ -1,52 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.autogather=true;
-
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD;
-
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX src1_index ON src REBUILD;
-ALTER INDEX src2_index ON src REBUILD;
-SELECT * FROM default__src_src1_index__;
-SELECT * FROM default__src_src2_index__;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-
-EXPLAIN
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname;
-
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result"
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname;
-
-SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-
-SELECT key, value FROM src WHERE key=0 AND value = "val_0";
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM src WHERE key=0 AND value = "val_0";
-
-DROP INDEX src1_index ON src;
-DROP INDEX src2_index ON src;
-
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_bitmap_auto.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_bitmap_auto.q b/ql/src/test/queries/clientpositive/index_bitmap_auto.q
deleted file mode 100644
index 8020900..0000000
--- a/ql/src/test/queries/clientpositive/index_bitmap_auto.q
+++ /dev/null
@@ -1,57 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.autogather=true;
-
--- SORT_QUERY_RESULTS
-
--- try the query without indexing, with manual indexing, and with automatic indexing
--- without indexing
-SELECT key, value FROM src WHERE key=0 AND value = "val_0";
-
--- create indices
-EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX src1_index ON src REBUILD;
-ALTER INDEX src2_index ON src REBUILD;
-SELECT * FROM default__src_src1_index__;
-SELECT * FROM default__src_src2_index__;
-
-
--- manual indexing
-EXPLAIN
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname;
-
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result"
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname;
-
-SELECT key, value FROM src WHERE key=0 AND value = "val_0";
-
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SELECT key, value FROM src WHERE key=0 AND value = "val_0";
-
-DROP INDEX src1_index ON src;
-DROP INDEX src2_index ON src;
-
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q b/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q
deleted file mode 100644
index c6c558b..0000000
--- a/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q
+++ /dev/null
@@ -1,17 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-set hive.fetch.task.conversion=none;
-
--- SORT_QUERY_RESULTS
-
--- test automatic use of index on table with partitions
-CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX src_part_index ON srcpart REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-
-EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09';
-SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09';
-
-DROP INDEX src_part_index ON srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_bitmap_compression.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_bitmap_compression.q b/ql/src/test/queries/clientpositive/index_bitmap_compression.q
deleted file mode 100644
index 9b0bbe8..0000000
--- a/ql/src/test/queries/clientpositive/index_bitmap_compression.q
+++ /dev/null
@@ -1,18 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-SET hive.exec.compress.output=true;
-
--- SORT_QUERY_RESULTS
-
-CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
--- automatic indexing
-EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100;
-SELECT key, value FROM src WHERE key > 80 AND key < 100;
-
-DROP INDEX src_index on src;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_bitmap_rc.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_bitmap_rc.q b/ql/src/test/queries/clientpositive/index_bitmap_rc.q
deleted file mode 100644
index b8a4f12..0000000
--- a/ql/src/test/queries/clientpositive/index_bitmap_rc.q
+++ /dev/null
@@ -1,58 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
-
-CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE;
-
-INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11;
-INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12;
-INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11;
-INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12;
-
-EXPLAIN
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD;
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD;
-SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname`,
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname`;
-SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_test_index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08';
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` ,
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname`;
-SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_test_index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11;
-
-DROP INDEX srcpart_rc_index on srcpart_rc;
-
-EXPLAIN
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD;
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD;
-SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key = 100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` ,
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__
-WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`;
-SET hive.index.blockfilter.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.HiveIndexedInputFormat;
-SELECT key, value FROM srcpart_rc WHERE key=100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM srcpart_rc WHERE key=100;
-
-DROP INDEX srcpart_rc_index on srcpart_rc;
-DROP TABLE srcpart_rc;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_compact.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_compact.q b/ql/src/test/queries/clientpositive/index_compact.q
deleted file mode 100644
index 6add673..0000000
--- a/ql/src/test/queries/clientpositive/index_compact.q
+++ /dev/null
@@ -1,46 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
-
-DROP INDEX srcpart_index_proj on srcpart;
-
-EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX srcpart_index_proj ON srcpart REBUILD;
-SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_srcpart_index_proj__ x WHERE x.key=100 AND x.ds = '2008-04-08';
-SET hive.index.compact.file=${system:test.tmp.dir}/index_test_index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08';
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_srcpart_index_proj__ x WHERE x.key=100 AND x.ds = '2008-04-08' and x.hr = 11;
-SET hive.index.compact.file=${system:test.tmp.dir}/index_test_index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11;
-
-DROP INDEX srcpart_index_proj on srcpart;
-
-EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX srcpart_index_proj ON srcpart REBUILD;
-SELECT x.* FROM default__srcpart_srcpart_index_proj__ x;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_srcpart_index_proj__ WHERE key=100;
-SET hive.index.compact.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-SELECT key, value FROM srcpart WHERE key=100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM srcpart WHERE key=100;
-
-DROP INDEX srcpart_index_proj on srcpart;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_compact_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_compact_1.q b/ql/src/test/queries/clientpositive/index_compact_1.q
deleted file mode 100644
index 9cdd563..0000000
--- a/ql/src/test/queries/clientpositive/index_compact_1.q
+++ /dev/null
@@ -1,20 +0,0 @@
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-SELECT x.* FROM default__src_src_index__ x;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__src_src_index__ WHERE key=100;
-SET hive.index.compact.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-SELECT key, value FROM src WHERE key=100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM src WHERE key=100;
-
-DROP INDEX src_index on src;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_compact_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_compact_2.q b/ql/src/test/queries/clientpositive/index_compact_2.q
deleted file mode 100644
index 7b2fce2..0000000
--- a/ql/src/test/queries/clientpositive/index_compact_2.q
+++ /dev/null
@@ -1,50 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
-
-CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE;
-
-INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11;
-INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12;
-INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11;
-INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12;
-
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD;
-SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key=100 AND x.ds = '2008-04-08';
-SET hive.index.compact.file=${system:test.tmp.dir}/index_test_index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08';
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_test_index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key=100 AND x.ds = '2008-04-08' and x.hr = 11;
-SET hive.index.compact.file=${system:test.tmp.dir}/index_test_index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11;
-
-DROP INDEX srcpart_rc_index on srcpart_rc;
-
-EXPLAIN
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD;
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD;
-SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__srcpart_rc_srcpart_rc_index__ WHERE key=100;
-SET hive.index.compact.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-SELECT key, value FROM srcpart_rc WHERE key=100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM srcpart_rc WHERE key=100;
-
-DROP INDEX srcpart_rc_index on srcpart_rc;
-DROP TABLE srcpart_rc;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_compact_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_compact_3.q b/ql/src/test/queries/clientpositive/index_compact_3.q
deleted file mode 100644
index 15ba946..0000000
--- a/ql/src/test/queries/clientpositive/index_compact_3.q
+++ /dev/null
@@ -1,23 +0,0 @@
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
-
-CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE;
-
-INSERT OVERWRITE TABLE src_index_test_rc SELECT * FROM src;
-
-CREATE INDEX src_index ON TABLE src_index_test_rc(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src_index_test_rc REBUILD;
-SELECT x.* FROM default__src_index_test_rc_src_index__ x;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__src_index_test_rc_src_index__ WHERE key=100;
-SET hive.index.compact.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-SELECT key, value FROM src_index_test_rc WHERE key=100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SELECT key, value FROM src_index_test_rc WHERE key=100;
-
-DROP INDEX src_index on src_index_test_rc;
-DROP TABLE src_index_test_rc;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_compact_binary_search.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_compact_binary_search.q b/ql/src/test/queries/clientpositive/index_compact_binary_search.q
deleted file mode 100644
index e72b27c..0000000
--- a/ql/src/test/queries/clientpositive/index_compact_binary_search.q
+++ /dev/null
@@ -1,132 +0,0 @@
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.default.fileformat=TextFile;
-set hive.stats.dbclass=fs;
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=1;
-SET hive.index.compact.binary.search=true;
-
-SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-
-SELECT * FROM src WHERE key = '0';
-
-SELECT * FROM src WHERE key < '1';
-
-SELECT * FROM src WHERE key <= '0';
-
-SELECT * FROM src WHERE key > '8';
-
-SELECT * FROM src WHERE key >= '9';
-
-SET hive.exec.post.hooks=;
-
-DROP INDEX src_index ON src;
-
-SET hive.default.fileformat=RCFILE;
-
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-
-SELECT * FROM src WHERE key = '0';
-
-SELECT * FROM src WHERE key < '1';
-
-SELECT * FROM src WHERE key <= '0';
-
-SELECT * FROM src WHERE key > '8';
-
-SELECT * FROM src WHERE key >= '9';
-
-SET hive.exec.post.hooks=;
-
-DROP INDEX src_index ON src;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-SET hive.default.fileformat=TextFile;
-
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-
-SELECT * FROM src WHERE key = '0';
-
-SELECT * FROM src WHERE key < '1';
-
-SELECT * FROM src WHERE key <= '0';
-
-SELECT * FROM src WHERE key > '8';
-
-SELECT * FROM src WHERE key >= '9';
-
-SET hive.exec.post.hooks=;
-
-DROP INDEX src_index ON src;
-
-SET hive.default.fileformat=RCFILE;
-
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-
-SELECT * FROM src WHERE key = '0';
-
-SELECT * FROM src WHERE key < '1';
-
-SELECT * FROM src WHERE key <= '0';
-
-SELECT * FROM src WHERE key > '8';
-
-SELECT * FROM src WHERE key >= '9';
-
-SET hive.exec.post.hooks=;
-
-DROP INDEX src_index ON src;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-SET hive.default.fileformat=TextFile;
-
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-
-SELECT * FROM src WHERE key = '0';
-
-SELECT * FROM src WHERE key < '1';
-
-SELECT * FROM src WHERE key <= '0';
-
-SELECT * FROM src WHERE key > '8';
-
-SELECT * FROM src WHERE key >= '9';
-
-SET hive.exec.post.hooks=;
-
-DROP INDEX src_index ON src;
-
-SET hive.default.fileformat=RCFILE;
-
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.VerifyHiveSortedInputFormatUsedHook;
-
-SELECT * FROM src WHERE key = '0';
-
-SELECT * FROM src WHERE key < '1';
-
-SELECT * FROM src WHERE key <= '0';
-
-SELECT * FROM src WHERE key > '8';
-
-SELECT * FROM src WHERE key >= '9';
-
-SET hive.exec.post.hooks=;
-
-DROP INDEX src_index ON src;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_compression.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_compression.q b/ql/src/test/queries/clientpositive/index_compression.q
deleted file mode 100644
index be93560..0000000
--- a/ql/src/test/queries/clientpositive/index_compression.q
+++ /dev/null
@@ -1,18 +0,0 @@
-set hive.mapred.mode=nonstrict;
-SET hive.exec.compress.output=true;
-SET hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
-
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
--- automatic indexing
-EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100;
-SELECT key, value FROM src WHERE key > 80 AND key < 100;
-
-DROP INDEX src_index on src;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_creation.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_creation.q b/ql/src/test/queries/clientpositive/index_creation.q
deleted file mode 100644
index ef020b6..0000000
--- a/ql/src/test/queries/clientpositive/index_creation.q
+++ /dev/null
@@ -1,54 +0,0 @@
-set hive.stats.dbclass=fs;
-drop index src_index_2 on src;
-drop index src_index_3 on src;
-drop index src_index_4 on src;
-drop index src_index_5 on src;
-drop index src_index_6 on src;
-drop index src_index_7 on src;
-drop index src_index_8 on src;
-drop index src_index_9 on src;
-drop table `_t`;
-
-create index src_index_2 on table src(key) as 'compact' WITH DEFERRED REBUILD;
-desc extended default__src_src_index_2__;
-
-create index src_index_3 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_3;
-desc extended src_idx_src_index_3;
-
-create index src_index_4 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE;
-desc extended default__src_src_index_4__;
-
-create index src_index_5 on table src(key) as 'compact' WITH DEFERRED REBUILD ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' ESCAPED BY '\\';
-desc extended default__src_src_index_5__;
-
-create index src_index_6 on table src(key) as 'compact' WITH DEFERRED REBUILD STORED AS RCFILE;
-desc extended default__src_src_index_6__;
-
-create index src_index_7 on table src(key) as 'compact' WITH DEFERRED REBUILD in table src_idx_src_index_7 STORED AS RCFILE;
-desc extended src_idx_src_index_7;
-
-create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
-desc extended default__src_src_index_8__;
-
-create index src_index_9 on table src(key) as 'compact' WITH DEFERRED REBUILD TBLPROPERTIES ("prop1"="val1", "prop2"="val2");
-desc extended default__src_src_index_9__;
-
-create table `_t`(`_i` int, `_j` int);
-create index x on table `_t`(`_j`) as 'compact' WITH DEFERRED REBUILD;
-alter index x on `_t` rebuild;
-
-create index x2 on table `_t`(`_i`,`_j`) as 'compact' WITH DEFERRED
-REBUILD;
-alter index x2 on `_t` rebuild;
-
-drop index src_index_2 on src;
-drop index src_index_3 on src;
-drop index src_index_4 on src;
-drop index src_index_5 on src;
-drop index src_index_6 on src;
-drop index src_index_7 on src;
-drop index src_index_8 on src;
-drop index src_index_9 on src;
-drop table `_t`;
-
-show tables;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_in_db.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_in_db.q b/ql/src/test/queries/clientpositive/index_in_db.q
deleted file mode 100644
index 1c34781..0000000
--- a/ql/src/test/queries/clientpositive/index_in_db.q
+++ /dev/null
@@ -1,16 +0,0 @@
-set hive.optimize.index.filter=true;
-drop database if exists index_test_db cascade;
--- Test selecting selecting from a table that is backed by an index
--- create table, index in a db, then set default db as current db, and try selecting
-
-create database index_test_db;
-
-use index_test_db;
-create table testtb (id int, name string);
-create index id_index on table testtb (id) as 'COMPACT' WITH DEFERRED REBUILD in table testdb_id_idx_tb;
-
-use default;
-select * from index_test_db.testtb where id>2;
-
-use index_test_db;
-drop index id_index on testtb;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_serde.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_serde.q b/ql/src/test/queries/clientpositive/index_serde.q
deleted file mode 100644
index 8f20f28..0000000
--- a/ql/src/test/queries/clientpositive/index_serde.q
+++ /dev/null
@@ -1,52 +0,0 @@
-set hive.stats.dbclass=fs;
-
--- SORT_QUERY_RESULTS
--- Want to ensure we can build and use indices on tables stored with SerDes
--- Build the (Avro backed) table
-CREATE TABLE doctors
-ROW FORMAT
-SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
-STORED AS
-INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
-OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
-TBLPROPERTIES ('avro.schema.literal'='{
- "namespace": "testing.hive.avro.serde",
- "name": "doctors",
- "type": "record",
- "fields": [
- {
- "name":"number",
- "type":"int",
- "doc":"Order of playing the role"
- },
- {
- "name":"first_name",
- "type":"string",
- "doc":"first name of actor playing role"
- },
- {
- "name":"last_name",
- "type":"string",
- "doc":"last name of actor playing role"
- }
- ]
-}');
-
-DESCRIBE doctors;
-
-LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors;
-
--- Create and build an index
-CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD;
-DESCRIBE EXTENDED default__doctors_doctors_index__;
-ALTER INDEX doctors_index ON doctors REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
-EXPLAIN SELECT * FROM doctors WHERE number > 6;
-SELECT * FROM doctors WHERE number > 6;
-
-DROP INDEX doctors_index ON doctors;
-DROP TABLE doctors;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_skewtable.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_skewtable.q b/ql/src/test/queries/clientpositive/index_skewtable.q
deleted file mode 100644
index e85e646..0000000
--- a/ql/src/test/queries/clientpositive/index_skewtable.q
+++ /dev/null
@@ -1,23 +0,0 @@
-set hive.mapred.mode=nonstrict;
--- Test creating an index on skewed table
-
--- Create a skew table
-CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE;
-
-LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE kv;
-
--- Create and build an index
-CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD;
-DESCRIBE FORMATTED default__kv_kv_index__;
-ALTER INDEX kv_index ON kv REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
--- Run a query that uses the index
-EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value;
-SELECT * FROM kv WHERE value > '15' ORDER BY value;
-
-DROP INDEX kv_index ON kv;
-DROP TABLE kv;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_stale.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_stale.q b/ql/src/test/queries/clientpositive/index_stale.q
deleted file mode 100644
index 6daba83..0000000
--- a/ql/src/test/queries/clientpositive/index_stale.q
+++ /dev/null
@@ -1,23 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
--- test that stale indexes are not used
-
-CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE;
-INSERT OVERWRITE TABLE temp SELECT * FROM src WHERE key < 50;
-
--- Build an index on temp.
-CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX temp_index ON temp REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
--- overwrite temp table so index is out of date
-INSERT OVERWRITE TABLE temp SELECT * FROM src;
-
--- should return correct results bypassing index
-EXPLAIN SELECT * FROM temp WHERE key = 86;
-SELECT * FROM temp WHERE key = 86;
-DROP index temp_index on temp;
-DROP table temp;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/index_stale_partitioned.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/index_stale_partitioned.q b/ql/src/test/queries/clientpositive/index_stale_partitioned.q
deleted file mode 100644
index 630b415..0000000
--- a/ql/src/test/queries/clientpositive/index_stale_partitioned.q
+++ /dev/null
@@ -1,29 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
--- Test if index is actually being used.
-
--- Create temp, and populate it with some values in src.
-CREATE TABLE temp(key STRING, val STRING) PARTITIONED BY (foo string) STORED AS TEXTFILE;
-ALTER TABLE temp ADD PARTITION (foo = 'bar');
-INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src WHERE key < 50;
-
--- Build an index on temp.
-CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX temp_index ON temp PARTITION (foo = 'bar') REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
--- overwrite temp table so index is out of date
-INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src;
-
--- query should not return any values
-SELECT * FROM default__temp_temp_index__ WHERE key = 86 AND foo='bar';
-EXPLAIN SELECT * FROM temp WHERE key = 86 AND foo = 'bar';
-SELECT * FROM temp WHERE key = 86 AND foo = 'bar';
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=false;
-DROP index temp_index on temp;
-DROP table temp;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/show_indexes_edge_cases.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/show_indexes_edge_cases.q b/ql/src/test/queries/clientpositive/show_indexes_edge_cases.q
deleted file mode 100644
index 9758c16..0000000
--- a/ql/src/test/queries/clientpositive/show_indexes_edge_cases.q
+++ /dev/null
@@ -1,28 +0,0 @@
-set hive.stats.dbclass=fs;
-DROP TABLE show_idx_empty;
-DROP TABLE show_idx_full;
-
-CREATE TABLE show_idx_empty(KEY STRING, VALUE STRING);
-CREATE TABLE show_idx_full(KEY STRING, VALUE1 STRING, VALUE2 STRING);
-
-CREATE INDEX idx_1 ON TABLE show_idx_full(KEY) AS "COMPACT" WITH DEFERRED REBUILD;
-CREATE INDEX idx_2 ON TABLE show_idx_full(VALUE1) AS "COMPACT" WITH DEFERRED REBUILD;
-
-CREATE INDEX idx_comment ON TABLE show_idx_full(VALUE2) AS "COMPACT" WITH DEFERRED REBUILD COMMENT "index comment";
-CREATE INDEX idx_compound ON TABLE show_idx_full(KEY, VALUE1) AS "COMPACT" WITH DEFERRED REBUILD;
-
-ALTER INDEX idx_1 ON show_idx_full REBUILD;
-ALTER INDEX idx_2 ON show_idx_full REBUILD;
-ALTER INDEX idx_comment ON show_idx_full REBUILD;
-ALTER INDEX idx_compound ON show_idx_full REBUILD;
-
-EXPLAIN SHOW INDEXES ON show_idx_full;
-SHOW INDEXES ON show_idx_full;
-
-EXPLAIN SHOW INDEXES ON show_idx_empty;
-SHOW INDEXES ON show_idx_empty;
-
-DROP INDEX idx_1 on show_idx_full;
-DROP INDEX idx_2 on show_idx_full;
-DROP TABLE show_idx_empty;
-DROP TABLE show_idx_full;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/show_indexes_syntax.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/show_indexes_syntax.q b/ql/src/test/queries/clientpositive/show_indexes_syntax.q
deleted file mode 100644
index bb43c5e..0000000
--- a/ql/src/test/queries/clientpositive/show_indexes_syntax.q
+++ /dev/null
@@ -1,24 +0,0 @@
-set hive.stats.dbclass=fs;
-DROP TABLE show_idx_t1;
-
-CREATE TABLE show_idx_t1(KEY STRING, VALUE STRING);
-
-CREATE INDEX idx_t1 ON TABLE show_idx_t1(KEY) AS "COMPACT" WITH DEFERRED REBUILD;
-ALTER INDEX idx_t1 ON show_idx_t1 REBUILD;
-
-EXPLAIN
-SHOW INDEX ON show_idx_t1;
-
-SHOW INDEX ON show_idx_t1;
-
-EXPLAIN
-SHOW INDEXES ON show_idx_t1;
-
-SHOW INDEXES ON show_idx_t1;
-
-EXPLAIN
-SHOW FORMATTED INDEXES ON show_idx_t1;
-
-SHOW FORMATTED INDEXES ON show_idx_t1;
-
-DROP TABLE show_idx_t1;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/special_character_in_tabnames_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/special_character_in_tabnames_2.q b/ql/src/test/queries/clientpositive/special_character_in_tabnames_2.q
index d7010e9..d8993dd 100644
--- a/ql/src/test/queries/clientpositive/special_character_in_tabnames_2.q
+++ b/ql/src/test/queries/clientpositive/special_character_in_tabnames_2.q
@@ -13,30 +13,7 @@ ANALYZE TABLE `s/c` COMPUTE STATISTICS;
ANALYZE TABLE `s/c` COMPUTE STATISTICS FOR COLUMNS key,value;
--- without indexing
SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100;
-set hive.stats.dbclass=fs;
-CREATE INDEX src_index ON TABLE `s/c`(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON `s/c` REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-
--- manual indexing
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_where" SELECT `_bucketname` , `_offsets` FROM `default__s/c_src_index__` WHERE key > 80 AND key < 100;
-SET hive.index.compact.file=${system:test.tmp.dir}/index_where;
-SET hive.optimize.index.filter=false;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-
-EXPLAIN SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100;
-SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
-
--- automatic indexing
EXPLAIN SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100;
SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100;
-
-DROP INDEX src_index on `s/c`;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/udf_bitmap_and.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udf_bitmap_and.q b/ql/src/test/queries/clientpositive/udf_bitmap_and.q
deleted file mode 100644
index ed7711c..0000000
--- a/ql/src/test/queries/clientpositive/udf_bitmap_and.q
+++ /dev/null
@@ -1,14 +0,0 @@
-set hive.fetch.task.conversion=more;
-
-select ewah_bitmap_and(array(13,2,4,8589934592,4096,0), array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows);
-select ewah_bitmap_and(array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0)) from src tablesample (1 rows);
-
-drop table bitmap_test;
-create table bitmap_test (a array<bigint>, b array<bigint>);
-
-insert overwrite table bitmap_test
-select array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0) from src tablesample (10 rows);
-
-select ewah_bitmap_and(a,b) from bitmap_test;
-
-drop table bitmap_test;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/udf_bitmap_empty.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udf_bitmap_empty.q b/ql/src/test/queries/clientpositive/udf_bitmap_empty.q
deleted file mode 100644
index 142b248..0000000
--- a/ql/src/test/queries/clientpositive/udf_bitmap_empty.q
+++ /dev/null
@@ -1,5 +0,0 @@
-set hive.fetch.task.conversion=more;
-
-select ewah_bitmap_empty(array(13,2,4,8589934592,0,0)) from src tablesample (1 rows);
-
-select ewah_bitmap_empty(array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows);
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/udf_bitmap_or.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udf_bitmap_or.q b/ql/src/test/queries/clientpositive/udf_bitmap_or.q
deleted file mode 100644
index 00785b7..0000000
--- a/ql/src/test/queries/clientpositive/udf_bitmap_or.q
+++ /dev/null
@@ -1,14 +0,0 @@
-set hive.fetch.task.conversion=more;
-
-select ewah_bitmap_or(array(13,2,4,8589934592,4096,0), array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows);
-select ewah_bitmap_or(array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0)) from src tablesample (1 rows);
-
-drop table bitmap_test;
-create table bitmap_test (a array<bigint>, b array<bigint>);
-
-insert overwrite table bitmap_test
-select array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0) from src tablesample (10 rows);
-
-select ewah_bitmap_or(a,b) from bitmap_test;
-
-drop table bitmap_test;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/unicode_comments.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/unicode_comments.q b/ql/src/test/queries/clientpositive/unicode_comments.q
index 4d958e4..00c94f3 100644
--- a/ql/src/test/queries/clientpositive/unicode_comments.q
+++ b/ql/src/test/queries/clientpositive/unicode_comments.q
@@ -5,13 +5,11 @@ create table unicode_comments_tbl1
partitioned by (p1 string comment '分割');
create view unicode_comments_view1 (col1 comment '第一列') comment '视图'
as select col1 from unicode_comments_tbl1;
-create index index2 on table unicode_comments_tbl1(col1) as 'COMPACT' with deferred rebuild comment '索引';
describe database extended unicode_comments_db;
show create table unicode_comments_tbl1;
describe formatted unicode_comments_tbl1;
show create table unicode_comments_view1;
describe formatted unicode_comments_view1;
-show formatted index on unicode_comments_tbl1;
drop database unicode_comments_db cascade;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/union_view.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/union_view.q b/ql/src/test/queries/clientpositive/union_view.q
index c4f63e6..74e77a8 100644
--- a/ql/src/test/queries/clientpositive/union_view.q
+++ b/ql/src/test/queries/clientpositive/union_view.q
@@ -3,18 +3,14 @@ set hive.stats.dbclass=fs;
set hive.explain.user=false;
CREATE TABLE src_union_1 (key int, value string) PARTITIONED BY (ds string);
-CREATE INDEX src_union_1_key_idx ON TABLE src_union_1(key) AS 'COMPACT' WITH DEFERRED REBUILD;
CREATE TABLE src_union_2 (key int, value string) PARTITIONED BY (ds string, part_1 string);
-CREATE INDEX src_union_2_key_idx ON TABLE src_union_2(key) AS 'COMPACT' WITH DEFERRED REBUILD;
CREATE TABLE src_union_3(key int, value string) PARTITIONED BY (ds string, part_1 string, part_2 string);
-CREATE INDEX src_union_3_key_idx ON TABLE src_union_3(key) AS 'COMPACT' WITH DEFERRED REBUILD;
SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
SET hive.optimize.index.filter=true;
-SET hive.optimize.index.filter.compact.minsize=0;
SET hive.exec.pre.hooks=;
SET hive.exec.post.hooks=;
@@ -23,17 +19,12 @@ SET hive.merge.mapfiles=false;
SET hive.merge.mapredfiles=false;
INSERT OVERWRITE TABLE src_union_1 PARTITION (ds='1') SELECT * FROM src;
-ALTER INDEX src_union_1_key_idx ON src_union_1 PARTITION (ds='1') REBUILD;
INSERT OVERWRITE TABLE src_union_2 PARTITION (ds='2', part_1='1') SELECT * FROM src;
INSERT OVERWRITE TABLE src_union_2 PARTITION (ds='2', part_1='2') SELECT * FROM src;
-ALTER INDEX src_union_2_key_idx ON src_union_2 PARTITION (ds='2', part_1='1') REBUILD;
-ALTER INDEX src_union_2_key_idx ON src_union_2 PARTITION (ds='2', part_1='2') REBUILD;
INSERT OVERWRITE TABLE src_union_3 PARTITION (ds='3', part_1='1', part_2='2:3+4') SELECT * FROM src;
INSERT OVERWRITE TABLE src_union_3 PARTITION (ds='3', part_1='2', part_2='2:3+4') SELECT * FROM src;
-ALTER INDEX src_union_3_key_idx ON src_union_3 PARTITION (ds='3', part_1='1', part_2='2:3+4') REBUILD;
-ALTER INDEX src_union_3_key_idx ON src_union_3 PARTITION (ds='3', part_1='2', part_2='2:3+4') REBUILD;
EXPLAIN SELECT key, value, ds FROM src_union_1 WHERE key=86 and ds='1';
EXPLAIN SELECT key, value, ds FROM src_union_2 WHERE key=86 and ds='2';
@@ -80,7 +71,6 @@ SELECT count(1) from src_union_view WHERE ds ='2';
SELECT count(1) from src_union_view WHERE ds ='3';
INSERT OVERWRITE TABLE src_union_3 PARTITION (ds='4', part_1='1', part_2='2:3+4') SELECT * FROM src;
-ALTER INDEX src_union_3_key_idx ON src_union_3 PARTITION (ds='4', part_1='1', part_2='2:3+4') REBUILD;
EXPLAIN SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='4';
SELECT key, value, ds FROM src_union_view WHERE key=86 AND ds ='4';
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out b/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out
index 9accae0..d390d90 100644
--- a/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out
+++ b/ql/src/test/results/clientnegative/authorization_invalid_priv_v2.q.out
@@ -6,7 +6,7 @@ POSTHOOK: query: create table if not exists authorization_invalid_v2 (key int, v
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@authorization_invalid_v2
-PREHOOK: query: grant index on table authorization_invalid_v2 to user hive_test_user
+PREHOOK: query: grant lock on table authorization_invalid_v2 to user hive_test_user
PREHOOK: type: GRANT_PRIVILEGE
PREHOOK: Output: default@authorization_invalid_v2
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unsupported privilege type INDEX
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unsupported privilege type LOCK
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientnegative/index_bitmap_no_map_aggr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/index_bitmap_no_map_aggr.q.out b/ql/src/test/results/clientnegative/index_bitmap_no_map_aggr.q.out
deleted file mode 100644
index b29d3a6..0000000
--- a/ql/src/test/results/clientnegative/index_bitmap_no_map_aggr.q.out
+++ /dev/null
@@ -1,20 +0,0 @@
-PREHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: org.apache.hadoop.hive.ql.metadata.HiveException: Cannot construct index without map-side aggregation
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientnegative/index_compact_entry_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/index_compact_entry_limit.q.out b/ql/src/test/results/clientnegative/index_compact_entry_limit.q.out
deleted file mode 100644
index f844ee4..0000000
--- a/ql/src/test/results/clientnegative/index_compact_entry_limit.q.out
+++ /dev/null
@@ -1,37 +0,0 @@
-PREHOOK: query: drop index src_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM src WHERE key=100 ORDER BY key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-Job Submission failed with exception 'java.io.IOException(org.apache.hadoop.hive.ql.metadata.HiveException: Number of compact index entries loaded during the query exceeded the maximum of 5 set in hive.index.compact.query.max.entries)'
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask. org.apache.hadoop.hive.ql.metadata.HiveException: Number of compact index entries loaded during the query exceeded the maximum of 5 set in hive.index.compact.query.max.entries
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientnegative/index_compact_size_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/index_compact_size_limit.q.out b/ql/src/test/results/clientnegative/index_compact_size_limit.q.out
deleted file mode 100644
index 9ff8f8f..0000000
--- a/ql/src/test/results/clientnegative/index_compact_size_limit.q.out
+++ /dev/null
@@ -1,37 +0,0 @@
-PREHOOK: query: drop index src_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: drop index src_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM src WHERE key=100 ORDER BY key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-Job Submission failed with exception 'java.io.IOException(Size of data to read during a compact-index-based query exceeded the maximum of 1024 set in hive.index.compact.query.max.size)'
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapRedTask. Size of data to read during a compact-index-based query exceeded the maximum of 1024 set in hive.index.compact.query.max.size
[11/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
index 52c105f..98da309 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
@@ -36,10 +36,7 @@ public class DDLWork implements Serializable {
// TODO: this can probably be replaced with much less code via dynamic dispatch and/or templates.
private PreInsertTableDesc preInsertTableDesc;
private InsertTableDesc insertTableDesc;
- private CreateIndexDesc createIndexDesc;
- private AlterIndexDesc alterIndexDesc;
private AlterMaterializedViewDesc alterMVDesc;
- private DropIndexDesc dropIdxDesc;
private CreateDatabaseDesc createDatabaseDesc;
private SwitchDatabaseDesc switchDatabaseDesc;
private DropDatabaseDesc dropDatabaseDesc;
@@ -71,7 +68,6 @@ public class DDLWork implements Serializable {
private AlterTableSimpleDesc alterTblSimpleDesc;
private MsckDesc msckDesc;
private ShowTableStatusDesc showTblStatusDesc;
- private ShowIndexesDesc showIndexesDesc;
private DescDatabaseDesc descDbDesc;
private AlterDatabaseDesc alterDbDesc;
private AlterTableAlterPartDesc alterTableAlterPartDesc;
@@ -125,16 +121,6 @@ public class DDLWork implements Serializable {
this.outputs = outputs;
}
- public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
- CreateIndexDesc createIndex) {
- this(inputs, outputs);
- this.createIndexDesc = createIndex;
- }
-
- public DDLWork(AlterIndexDesc alterIndex) {
- this.alterIndexDesc = alterIndex;
- }
-
/**
* @param createDatabaseDesc
* Create Database descriptor
@@ -209,16 +195,6 @@ public class DDLWork implements Serializable {
}
/**
- * @param alterIdxDesc
- * alter index descriptor
- */
- public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
- AlterIndexDesc alterIndexDesc) {
- this(inputs, outputs);
- this.alterIndexDesc = alterIndexDesc;
- }
-
- /**
* @param alterMVDesc
* alter materialized view descriptor
*/
@@ -492,12 +468,6 @@ public class DDLWork implements Serializable {
}
public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
- DropIndexDesc dropIndexDesc) {
- this(inputs, outputs);
- this.dropIdxDesc = dropIndexDesc;
- }
-
- public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
RoleDDLDesc roleDDLDesc) {
this(inputs, outputs);
this.roleDDLDesc = roleDDLDesc;
@@ -528,12 +498,6 @@ public class DDLWork implements Serializable {
}
public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
- ShowIndexesDesc showIndexesDesc) {
- this(inputs, outputs);
- this.showIndexesDesc = showIndexesDesc;
- }
-
- public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
AlterTablePartMergeFilesDesc mergeDesc) {
this(inputs, outputs);
this.mergeFilesDesc = mergeDesc;
@@ -725,36 +689,6 @@ public class DDLWork implements Serializable {
}
/**
- * @return the createIndexDesc
- */
- public CreateIndexDesc getCreateIndexDesc() {
- return createIndexDesc;
- }
-
- /**
- * @param createIndexDesc
- * the createIndexDesc to set
- */
- public void setCreateIndexDesc(CreateIndexDesc createIndexDesc) {
- this.createIndexDesc = createIndexDesc;
- }
-
- /**
- * @return the alterIndexDesc
- */
- public AlterIndexDesc getAlterIndexDesc() {
- return alterIndexDesc;
- }
-
- /**
- * @param alterIndexDesc
- * the alterIndexDesc to set
- */
- public void setAlterIndexDesc(AlterIndexDesc alterIndexDesc) {
- this.alterIndexDesc = alterIndexDesc;
- }
-
- /**
* @return the createTblDesc
*/
@Explain(displayName = "Create Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -1041,18 +975,6 @@ public class DDLWork implements Serializable {
}
/**
- * @return the showIndexesDesc
- */
- @Explain(displayName = "Show Index Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
- public ShowIndexesDesc getShowIndexesDesc() {
- return showIndexesDesc;
- }
-
- public void setShowIndexesDesc(ShowIndexesDesc showIndexesDesc) {
- this.showIndexesDesc = showIndexesDesc;
- }
-
- /**
* @return the descTblDesc
*/
@Explain(displayName = "Describe Table Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -1172,14 +1094,6 @@ public class DDLWork implements Serializable {
this.outputs = outputs;
}
- public DropIndexDesc getDropIdxDesc() {
- return dropIdxDesc;
- }
-
- public void setDropIdxDesc(DropIndexDesc dropIdxDesc) {
- this.dropIdxDesc = dropIdxDesc;
- }
-
/**
* @return role ddl desc
*/
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java
deleted file mode 100644
index 58ac328..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropIndexDesc.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.plan;
-
-public class DropIndexDesc {
-
- private static final long serialVersionUID = 1L;
-
- private String indexName;
-
- private String tableName;
-
- private boolean throwException;
-
- /**
- * @param indexName
- * @param tableName
- */
- public DropIndexDesc(String indexName, String tableName, boolean throwException) {
- this.indexName = indexName;
- this.tableName = tableName;
- this.throwException = throwException;
- }
-
- /**
- * @return index name
- */
- public String getIndexName() {
- return indexName;
- }
-
- /**
- * @param indexName index name
- */
- public void setIndexName(String indexName) {
- this.indexName = indexName;
- }
-
- /**
- * @return table name
- */
- public String getTableName() {
- return tableName;
- }
-
- /**
- * @param tableName table name
- */
- public void setTableName(String tableName) {
- this.tableName = tableName;
- }
-
- public boolean isThrowException() {
- return throwException;
- }
-
- public void setThrowException(boolean throwException) {
- this.throwException = throwException;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
index 3938bd5..a9e5c8c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
@@ -71,7 +71,6 @@ public enum HiveOperation {
SHOW_CREATEDATABASE("SHOW_CREATEDATABASE", new Privilege[]{Privilege.SELECT}, null),
SHOW_CREATETABLE("SHOW_CREATETABLE", new Privilege[]{Privilege.SELECT}, null),
SHOWFUNCTIONS("SHOWFUNCTIONS", null, null, true, false),
- SHOWINDEXES("SHOWINDEXES", null, null, true, false),
SHOWPARTITIONS("SHOWPARTITIONS", null, null),
SHOWLOCKS("SHOWLOCKS", null, null, true, false),
SHOWCONF("SHOWCONF", null, null),
@@ -89,9 +88,6 @@ public enum HiveOperation {
DROP_MATERIALIZED_VIEW("DROP_MATERIALIZED_VIEW", null, new Privilege[]{Privilege.DROP}),
ALTER_MATERIALIZED_VIEW_REWRITE("ALTER_MATERIALIZED_VIEW_REWRITE",
new Privilege[]{Privilege.ALTER_METADATA}, null),
- CREATEINDEX("CREATEINDEX", null, null),
- DROPINDEX("DROPINDEX", null, null),
- ALTERINDEX_REBUILD("ALTERINDEX_REBUILD", null, null),
ALTERVIEW_PROPERTIES("ALTERVIEW_PROPERTIES", null, null),
DROPVIEW_PROPERTIES("DROPVIEW_PROPERTIES", null, null),
LOCKTABLE("LOCKTABLE", new Privilege[]{Privilege.LOCK}, null),
@@ -114,7 +110,6 @@ public enum HiveOperation {
TRUNCATETABLE("TRUNCATETABLE", null, new Privilege[]{Privilege.DROP}),
CREATETABLE_AS_SELECT("CREATETABLE_AS_SELECT", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.CREATE}),
QUERY("QUERY", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA, Privilege.CREATE}, true, false),
- ALTERINDEX_PROPS("ALTERINDEX_PROPS",null, null),
ALTERDATABASE("ALTERDATABASE", null, null),
ALTERDATABASE_OWNER("ALTERDATABASE_OWNER", null, null),
ALTERDATABASE_LOCATION("ALTERDATABASE_LOCATION", new Privilege[]{Privilege.ALTER_DATA}, null),
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index 9298630..f147309 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@ -118,8 +118,6 @@ public class MapWork extends BaseWork {
private String inputformat;
- private String indexIntermediateFile;
-
private Integer numMapTasks;
private Long maxSplitSize;
private Long minSplitSize;
@@ -587,10 +585,6 @@ public class MapWork extends BaseWork {
return this.mapperCannotSpanPartns;
}
- public String getIndexIntermediateFile() {
- return indexIntermediateFile;
- }
-
public ArrayList<String> getAliases() {
return new ArrayList<String>(aliasToWork.keySet());
}
@@ -641,14 +635,6 @@ public class MapWork extends BaseWork {
return sortedColsByDirectory;
}
- public void addIndexIntermediateFile(String fileName) {
- if (this.indexIntermediateFile == null) {
- this.indexIntermediateFile = fileName;
- } else {
- this.indexIntermediateFile += "," + fileName;
- }
- }
-
public int getSamplingType() {
return samplingType;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowIndexesDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowIndexesDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowIndexesDesc.java
deleted file mode 100644
index e18a94c..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowIndexesDesc.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.plan;
-
-import java.io.Serializable;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.ql.plan.Explain.Level;
-
-
-/**
- * ShowIndexesDesc.
- * Returns table index information per SQL syntax.
- */
-@Explain(displayName = "Show Indexes", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class ShowIndexesDesc extends DDLDesc implements Serializable {
- private static final long serialVersionUID = 1L;
- String tableName;
- String resFile;
- boolean isFormatted;
-
- /**
- * thrift ddl for the result of show indexes.
- */
- private static final String schema = "idx_name,tab_name,col_names,idx_tab_name,idx_type,comment"
- + "#string:string:string:string:string:string";
-
- public static String getSchema() {
- return schema;
- }
-
- public String getTableName() {
- return tableName;
- }
-
- public String getResFile() {
- return resFile;
- }
-
- public boolean isFormatted() {
- return isFormatted;
- }
-
- public void setFormatted(boolean isFormatted) {
- this.isFormatted = isFormatted;
- }
-
- /**
- *
- * @param tableName
- * Name of the table whose indexes need to be listed.
- * @param resFile
- * File to store the results in.
- */
- public ShowIndexesDesc(String tableName, Path resFile) {
- this.tableName = tableName;
- this.resFile = resFile.toString();
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
index 2accad3..efbd858 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
@@ -653,6 +653,7 @@ public final class OpProcFactory {
}
public static class ReduceSinkPPD extends DefaultPPD implements NodeProcessor {
+ @Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
super.process(nd, stack, procCtx, nodeOutputs);
@@ -790,7 +791,9 @@ public final class OpProcFactory {
* @param ewi
*/
protected void logExpr(Node nd, ExprWalkerInfo ewi) {
- if (!LOG.isDebugEnabled()) return;
+ if (!LOG.isDebugEnabled()) {
+ return;
+ }
for (Entry<String, List<ExprNodeDesc>> e : ewi.getFinalCandidates().entrySet()) {
StringBuilder sb = new StringBuilder("Pushdown predicates of ").append(nd.getName())
.append(" for alias ").append(e.getKey()).append(": ");
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java
index 6000590..1e9c639 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java
@@ -101,9 +101,6 @@ public class Privilege {
public static Privilege DROP = new Privilege(PrivilegeType.DROP,
PrivilegeScope.ALLSCOPE_EXCEPT_COLUMN);
- public static Privilege INDEX = new Privilege(PrivilegeType.INDEX,
- PrivilegeScope.ALLSCOPE);
-
public static Privilege LOCK = new Privilege(PrivilegeType.LOCK,
PrivilegeScope.ALLSCOPE_EXCEPT_COLUMN);
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java
index 3040938..27c7986 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java
@@ -48,7 +48,6 @@ public class PrivilegeRegistry {
Registry.put(Privilege.ALTER_METADATA.getPriv(), Privilege.ALTER_METADATA);
Registry.put(Privilege.CREATE.getPriv(), Privilege.CREATE);
Registry.put(Privilege.DROP.getPriv(), Privilege.DROP);
- Registry.put(Privilege.INDEX.getPriv(), Privilege.INDEX);
Registry.put(Privilege.LOCK.getPriv(), Privilege.LOCK);
Registry.put(Privilege.SELECT.getPriv(), Privilege.SELECT);
Registry.put(Privilege.SHOW_DATABASE.getPriv(),
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
index 56b6bf6..7678e8f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
@@ -33,7 +33,6 @@ public enum PrivilegeType {
ALTER_METADATA(HiveParser.TOK_PRIV_ALTER_METADATA, "Alter"),
CREATE(HiveParser.TOK_PRIV_CREATE, "Create"),
DROP(HiveParser.TOK_PRIV_DROP, "Drop"),
- INDEX(HiveParser.TOK_PRIV_INDEX, "Index"),
LOCK(HiveParser.TOK_PRIV_LOCK, "Lock"),
SELECT(HiveParser.TOK_PRIV_SELECT, "Select"),
SHOW_DATABASE(HiveParser.TOK_PRIV_SHOW_DATABASE, "Show_Database"),
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
index d2f1716..b66d188 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.security.authorization;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.AccessControlException;
import java.util.ArrayList;
@@ -293,9 +292,6 @@ public class StorageBasedAuthorizationProvider extends HiveAuthorizationProvider
return FsAction.WRITE;
case DROP:
return FsAction.WRITE;
- case INDEX:
- throw new AuthorizationException(
- "StorageBasedAuthorizationProvider cannot handle INDEX privilege");
case LOCK:
throw new AuthorizationException(
"StorageBasedAuthorizationProvider cannot handle LOCK privilege");
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFEWAHBitmapBop.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFEWAHBitmapBop.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFEWAHBitmapBop.java
deleted file mode 100644
index aa5be09..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFEWAHBitmapBop.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.udf.generic;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import javaewah.EWAHCompressedBitmap;
-
-import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapObjectInput;
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapObjectOutput;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
-import org.apache.hadoop.io.LongWritable;
-
-/**
- * An abstract class for a UDF that performs a binary operation between two EWAH-compressed bitmaps.
- * For example: Bitmap OR and AND operations between two EWAH-compressed bitmaps.
- */
-abstract public class AbstractGenericUDFEWAHBitmapBop extends GenericUDF {
- protected final ArrayList<Object> ret = new ArrayList<Object>();
- private transient ObjectInspector b1OI;
- private final String name;
-
- AbstractGenericUDFEWAHBitmapBop(String name) {
- this.name = name;
- }
-
- @Override
- public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
- if (arguments.length != 2) {
- throw new UDFArgumentLengthException(
- "The function " + name + "(b1, b2) takes exactly 2 arguments");
- }
-
- if (arguments[0].getCategory().equals(Category.LIST)) {
- b1OI = (ListObjectInspector) arguments[0];
- } else {
- throw new UDFArgumentTypeException(0, "\""
- + Category.LIST.toString().toLowerCase()
- + "\" is expected at function " + name + ", but \""
- + arguments[0].getTypeName() + "\" is found");
- }
-
- if (!arguments[1].getCategory().equals(Category.LIST)) {
- throw new UDFArgumentTypeException(1, "\""
- + Category.LIST.toString().toLowerCase()
- + "\" is expected at function " + name + ", but \""
- + arguments[1].getTypeName() + "\" is found");
-
- }
- return ObjectInspectorFactory
- .getStandardListObjectInspector(PrimitiveObjectInspectorFactory
- .writableLongObjectInspector);
- }
-
- protected abstract EWAHCompressedBitmap bitmapBop(
- EWAHCompressedBitmap bitmap1, EWAHCompressedBitmap bitmap2);
-
- @Override
- public Object evaluate(DeferredObject[] arguments) throws HiveException {
- assert (arguments.length == 2);
- Object b1 = arguments[0].get();
- Object b2 = arguments[1].get();
-
- EWAHCompressedBitmap bitmap1 = wordArrayToBitmap(b1);
- EWAHCompressedBitmap bitmap2 = wordArrayToBitmap(b2);
-
- EWAHCompressedBitmap bitmapAnd = bitmapBop(bitmap1, bitmap2);
-
- BitmapObjectOutput bitmapObjOut = new BitmapObjectOutput();
- try {
- bitmapAnd.writeExternal(bitmapObjOut);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- ret.clear();
- List<LongWritable> retList = bitmapToWordArray(bitmapAnd);
- for (LongWritable l : retList) {
- ret.add(l);
- }
- return ret;
- }
-
- protected EWAHCompressedBitmap wordArrayToBitmap(Object b) {
- ListObjectInspector lloi = (ListObjectInspector) b1OI;
- int length = lloi.getListLength(b);
- ArrayList<LongWritable> bitmapArray = new ArrayList<LongWritable>();
- for (int i = 0; i < length; i++) {
- long l = PrimitiveObjectInspectorUtils.getLong(
- lloi.getListElement(b, i),
- (PrimitiveObjectInspector) lloi.getListElementObjectInspector());
- bitmapArray.add(new LongWritable(l));
- }
-
- BitmapObjectInput bitmapObjIn = new BitmapObjectInput(bitmapArray);
- EWAHCompressedBitmap bitmap = new EWAHCompressedBitmap();
- try {
- bitmap.readExternal(bitmapObjIn);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- return bitmap;
- }
-
- protected List<LongWritable> bitmapToWordArray(EWAHCompressedBitmap bitmap) {
- BitmapObjectOutput bitmapObjOut = new BitmapObjectOutput();
- try {
- bitmap.writeExternal(bitmapObjOut);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- return bitmapObjOut.list();
- }
-
- @Override
- public String getDisplayString(String[] children) {
- return getStandardDisplayString(name, children, ",");
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java
deleted file mode 100644
index fabeecc..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEWAHBitmap.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.udf.generic;
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-import javaewah.EWAHCompressedBitmap;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.ql.exec.Description;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapObjectInput;
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapObjectOutput;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.StandardListObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.util.StringUtils;
-
-
-/**
- * GenericUDAFEWAHBitmap.
- *
- */
-@Description(name = "ewah_bitmap", value = "_FUNC_(expr) - Returns an EWAH-compressed bitmap representation of a column.")
-public class GenericUDAFEWAHBitmap extends AbstractGenericUDAFResolver {
-
- static final Logger LOG = LoggerFactory.getLogger(GenericUDAFEWAHBitmap.class.getName());
-
- @Override
- public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters)
- throws SemanticException {
- if (parameters.length != 1) {
- throw new UDFArgumentTypeException(parameters.length - 1,
- "Exactly one argument is expected.");
- }
- ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(parameters[0]);
- if (!ObjectInspectorUtils.compareSupported(oi)) {
- throw new UDFArgumentTypeException(parameters.length - 1,
- "Cannot support comparison of map<> type or complex type containing map<>.");
- }
- return new GenericUDAFEWAHBitmapEvaluator();
- }
-
- //The UDAF evaluator assumes that all rows it's evaluating have
- //the same (desired) value.
- public static class GenericUDAFEWAHBitmapEvaluator extends GenericUDAFEvaluator {
-
- // For PARTIAL1 and COMPLETE: ObjectInspectors for original data
- private PrimitiveObjectInspector inputOI;
-
- // For PARTIAL2 and FINAL: ObjectInspectors for partial aggregations
- // (lists of bitmaps)
- private transient StandardListObjectInspector loi;
- private transient StandardListObjectInspector internalMergeOI;
-
- @Override
- public ObjectInspector init(Mode m, ObjectInspector[] parameters)
- throws HiveException {
- super.init(m, parameters);
- // init output object inspectors
- // The output of a partial aggregation is a list
- if (m == Mode.PARTIAL1) {
- inputOI = (PrimitiveObjectInspector) parameters[0];
- return ObjectInspectorFactory
- .getStandardListObjectInspector(PrimitiveObjectInspectorFactory.writableLongObjectInspector);
- } else if (m == Mode.PARTIAL2 || m == Mode.FINAL) {
- internalMergeOI = (StandardListObjectInspector) parameters[0];
- inputOI = (PrimitiveObjectInspector)internalMergeOI.getListElementObjectInspector();
- loi = (StandardListObjectInspector) ObjectInspectorFactory
- .getStandardListObjectInspector(PrimitiveObjectInspectorFactory.writableLongObjectInspector);
- return loi;
- } else { // Mode.COMPLETE, ie. no map-side aggregation, requires ordering
- inputOI = (PrimitiveObjectInspector)parameters[0];
- loi = (StandardListObjectInspector) ObjectInspectorFactory
- .getStandardListObjectInspector(PrimitiveObjectInspectorFactory.writableLongObjectInspector);
- return loi;
- }
- }
-
- /** class for storing the current partial result aggregation */
- @AggregationType(estimable = true)
- static class BitmapAgg extends AbstractAggregationBuffer {
- EWAHCompressedBitmap bitmap;
- @Override
- public int estimate() {
- return bitmap.sizeInBytes();
- }
- }
-
- @Override
- public void reset(AggregationBuffer agg) throws HiveException {
-
- ((BitmapAgg) agg).bitmap = new EWAHCompressedBitmap();
- }
-
- @Override
- public AggregationBuffer getNewAggregationBuffer() throws HiveException {
- BitmapAgg result = new BitmapAgg();
- reset(result);
- return result;
- }
-
- @Override
- public void iterate(AggregationBuffer agg, Object[] parameters)
- throws HiveException {
- assert (parameters.length == 1);
- Object p = parameters[0];
- if (p != null) {
- BitmapAgg myagg = (BitmapAgg) agg;
- try {
- int row = PrimitiveObjectInspectorUtils.getInt(p, inputOI);
- addBitmap(row, myagg);
- } catch (NumberFormatException e) {
- LOG.warn(getClass().getSimpleName() + " " +
- StringUtils.stringifyException(e));
- }
- }
- }
-
-
- @Override
- public Object terminate(AggregationBuffer agg) throws HiveException {
- BitmapAgg myagg = (BitmapAgg) agg;
-
- BitmapObjectOutput bitmapObjOut = new BitmapObjectOutput();
- try {
- myagg.bitmap.writeExternal(bitmapObjOut);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- return bitmapObjOut.list();
- }
-
- @Override
- public void merge(AggregationBuffer agg, Object partial)
- throws HiveException {
- BitmapAgg myagg = (BitmapAgg) agg;
- ArrayList<LongWritable> partialResult = (ArrayList<LongWritable>) internalMergeOI.getList(partial);
- BitmapObjectInput bitmapObjIn = new BitmapObjectInput(partialResult);
- EWAHCompressedBitmap partialBitmap = new EWAHCompressedBitmap();
- try {
- partialBitmap.readExternal(bitmapObjIn);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- myagg.bitmap = myagg.bitmap.or(partialBitmap);
- }
-
- @Override
- public Object terminatePartial(AggregationBuffer agg) throws HiveException {
- BitmapAgg myagg = (BitmapAgg) agg;
- BitmapObjectOutput bitmapObjOut = new BitmapObjectOutput();
- try {
- myagg.bitmap.writeExternal(bitmapObjOut);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- return bitmapObjOut.list();
- }
-
- private void addBitmap(int newRow, BitmapAgg myagg) {
- if (!myagg.bitmap.set(newRow)) {
- throw new RuntimeException("Can't set bits out of order with EWAHCompressedBitmap");
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapAnd.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapAnd.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapAnd.java
deleted file mode 100644
index 976fa18..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapAnd.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.udf.generic;
-
-import javaewah.EWAHCompressedBitmap;
-
-import org.apache.hadoop.hive.ql.exec.Description;
-
-/**
- * GenericEWAHUDFBitmapAnd.
- *
- */
-@Description(name = "ewah_bitmap_and",
- value = "_FUNC_(b1, b2) - Return an EWAH-compressed bitmap that is the bitwise AND of two bitmaps.")
-public class GenericUDFEWAHBitmapAnd extends AbstractGenericUDFEWAHBitmapBop {
-
- public GenericUDFEWAHBitmapAnd() {
- super("EWAH_BITMAP_AND");
- }
-
- @Override
- protected EWAHCompressedBitmap bitmapBop(
- EWAHCompressedBitmap bitmap1, EWAHCompressedBitmap bitmap2) {
- return bitmap1.and(bitmap2);
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapEmpty.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapEmpty.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapEmpty.java
deleted file mode 100644
index aab6e82..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapEmpty.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.udf.generic;
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-import javaewah.EWAHCompressedBitmap;
-
-import org.apache.hadoop.hive.ql.exec.Description;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapObjectInput;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
-import org.apache.hadoop.io.BooleanWritable;
-import org.apache.hadoop.io.LongWritable;
-
-@Description(name = "ewah_bitmap_empty", value = "_FUNC_(bitmap) - "
- + "Predicate that tests whether an EWAH-compressed bitmap is all zeros ")
-public class GenericUDFEWAHBitmapEmpty extends GenericUDF {
- private transient ObjectInspector bitmapOI;
- private transient BooleanObjectInspector boolOI;
-
-@Override
-public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
- if (arguments.length != 1) {
- throw new UDFArgumentLengthException(
- "The function EWAH_BITMAP_EMPTY(b) takes exactly 1 argument");
- }
-
- if (arguments[0].getCategory().equals(Category.LIST)) {
- bitmapOI = (ListObjectInspector) arguments[0];
- } else {
- throw new UDFArgumentTypeException(0, "\""
- + Category.LIST.toString().toLowerCase()
- + "\" is expected at function EWAH_BITMAP_EMPTY, but \""
- + arguments[0].getTypeName() + "\" is found");
- }
-
- boolOI = PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
- return boolOI;
- }
-
- @Override
- public Object evaluate(DeferredObject[] arguments) throws HiveException {
- assert (arguments.length == 1);
- Object b = arguments[0].get();
-
- ListObjectInspector lloi = (ListObjectInspector) bitmapOI;
- int length = lloi.getListLength(b);
- ArrayList<LongWritable> bitmapArray = new ArrayList<LongWritable>();
- for (int i = 0; i < length; i++) {
- long l = PrimitiveObjectInspectorUtils.getLong(
- lloi.getListElement(b, i),
- (PrimitiveObjectInspector) lloi.getListElementObjectInspector());
- bitmapArray.add(new LongWritable(l));
- }
-
- BitmapObjectInput bitmapObjIn = new BitmapObjectInput(bitmapArray);
- EWAHCompressedBitmap bitmap = new EWAHCompressedBitmap();
- try {
- bitmap.readExternal(bitmapObjIn);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
-
- // Add return true only if bitmap is all zeros.
- return new BooleanWritable(!bitmap.iterator().hasNext());
- }
-
-
- @Override
- public String getDisplayString(String[] children) {
- return getStandardDisplayString("EWAH_BITMAP_EMPTY", children);
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapOr.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapOr.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapOr.java
deleted file mode 100644
index 33d6be6..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEWAHBitmapOr.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.udf.generic;
-
-import javaewah.EWAHCompressedBitmap;
-
-import org.apache.hadoop.hive.ql.exec.Description;
-
-/**
- * GenericUDFEWAHBitmapOr.
- *
- */
-@Description(name = "ewah_bitmap_or",
- value = "_FUNC_(b1, b2) - Return an EWAH-compressed bitmap that is the bitwise OR of two bitmaps.")
-public class GenericUDFEWAHBitmapOr extends AbstractGenericUDFEWAHBitmapBop {
-
- public GenericUDFEWAHBitmapOr() {
- super("EWAH_BITMAP_OR");
- }
-
- @Override
- protected EWAHCompressedBitmap bitmapBop(
- EWAHCompressedBitmap bitmap1, EWAHCompressedBitmap bitmap2) {
- return bitmap1.or(bitmap2);
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/org/apache/hadoop/hive/ql/index/MockIndexResult.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/index/MockIndexResult.java b/ql/src/test/org/apache/hadoop/hive/ql/index/MockIndexResult.java
deleted file mode 100644
index 808cb6a..0000000
--- a/ql/src/test/org/apache/hadoop/hive/ql/index/MockIndexResult.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import com.google.common.collect.ImmutableSet;
-import java.util.Collection;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplit;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.mapred.FileSplit;
-
-public final class MockIndexResult implements IndexResult {
-
- private final ImmutableSet<HiveInputSplit> selectedSplits;
-
- public MockIndexResult(Collection<HiveInputSplit> selectedSplits) {
- this.selectedSplits = ImmutableSet.copyOf(selectedSplits);
- }
-
- @Override
- public boolean contains(FileSplit split) throws HiveException {
- return selectedSplits.contains(split);
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/org/apache/hadoop/hive/ql/index/MockInputFile.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/index/MockInputFile.java b/ql/src/test/org/apache/hadoop/hive/ql/index/MockInputFile.java
index 4804e36..405efdf 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/index/MockInputFile.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/index/MockInputFile.java
@@ -74,7 +74,8 @@ public final class MockInputFile {
DefaultSplitLengthStep {
private String path;
- private long defaultSplitSize = SplitFilterTestCase.DEFAULT_SPLIT_SIZE;;
+ public static final long DEFAULT_SPLIT_SIZE = 1024 * 1024;
+ private long defaultSplitSize = DEFAULT_SPLIT_SIZE;
private final List<HiveInputSplit> splits = new ArrayList<>();
private final List<HiveInputSplit> selectedSplits = new ArrayList<>();
private long position = 0;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/org/apache/hadoop/hive/ql/index/SplitFilterTestCase.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/index/SplitFilterTestCase.java b/ql/src/test/org/apache/hadoop/hive/ql/index/SplitFilterTestCase.java
deleted file mode 100644
index fdd0731..0000000
--- a/ql/src/test/org/apache/hadoop/hive/ql/index/SplitFilterTestCase.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import com.google.common.collect.ImmutableSet;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplit;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-public final class SplitFilterTestCase {
- public static final long DEFAULT_SPLIT_SIZE = 1024 * 1024;
- public static final long SMALL_SPLIT_SIZE = 500;
-
- private final Set<HiveInputSplit> allSplits;
- private final Set<HiveInputSplit> selectedSplits;
- private final Set<HiveInputSplit> expectedSplits;
- private final long maxInputSize;
-
- private SplitFilterTestCase(Iterable<HiveInputSplit> allSplits,
- Iterable<HiveInputSplit> selectedSplits, Iterable<HiveInputSplit> expectedSplits,
- long maxInputSize) {
-
- this.allSplits = ImmutableSet.copyOf(allSplits);
- this.selectedSplits = ImmutableSet.copyOf(selectedSplits);
- this.expectedSplits = ImmutableSet.copyOf(expectedSplits);
- this.maxInputSize = maxInputSize;
- }
-
- private HiveInputSplit[] toArray(Collection<HiveInputSplit> splits) {
- return splits.toArray(new HiveInputSplit[splits.size()]);
- }
-
- public void executeAndValidate() throws IOException {
- SplitFilter filter = new SplitFilter(new MockIndexResult(selectedSplits), maxInputSize);
- List<HiveInputSplit> actualSplits = filter.filter(toArray(allSplits));
- assertSplits(expectedSplits, actualSplits);
- }
-
- private void assertSplits(Collection<HiveInputSplit> expectedSplits,
- Collection<HiveInputSplit> actualSplits) {
- SplitFilter.HiveInputSplitComparator hiveInputSplitComparator =
- new SplitFilter.HiveInputSplitComparator();
-
- List<HiveInputSplit> sortedExpectedSplits = new ArrayList<>(expectedSplits);
- Collections.sort(sortedExpectedSplits, hiveInputSplitComparator);
-
- List<HiveInputSplit> sortedActualSplits = new ArrayList<>(actualSplits);
- Collections.sort(sortedActualSplits, hiveInputSplitComparator);
-
- assertEquals("Number of selected splits.", sortedExpectedSplits.size(),
- sortedActualSplits.size());
-
- for (int i = 0; i < sortedExpectedSplits.size(); i++) {
- HiveInputSplit expectedSplit = sortedExpectedSplits.get(i);
- HiveInputSplit actualSplit = sortedActualSplits.get(i);
-
- String splitName = "Split #" + i;
-
- assertEquals(splitName + " path.", expectedSplit.getPath(), actualSplit.getPath());
- assertEquals(splitName + " start.", expectedSplit.getStart(), actualSplit.getStart());
- assertEquals(splitName + " length.", expectedSplit.getLength(), actualSplit.getLength());
- }
- }
-
- public static MaxInputSizeStep builder() {
- return new SplitFilterTestCaseBuilder();
- }
-
- public static interface MaxInputSizeStep extends InputFilesStep {
- InputFilesStep maxInputSize(long maxInputSize);
- }
-
- public static interface InputFilesStep {
- ExpectedSplitsStep inputFiles(MockInputFile... inputFiles);
- }
-
- public static interface ExpectedSplitsStep {
- BuildStep expectedSplits(HiveInputSplit... expectedSplits);
- }
-
- public static interface BuildStep {
- SplitFilterTestCase build();
- }
-
- private static final class SplitFilterTestCaseBuilder implements MaxInputSizeStep, InputFilesStep,
- ExpectedSplitsStep, BuildStep {
-
- private long maxInputSize = Long.MAX_VALUE;
- private List<MockInputFile> inputFiles;
- private List<HiveInputSplit> expectedSplits;
-
- @Override
- public InputFilesStep maxInputSize(long maxInputSize) {
- this.maxInputSize = maxInputSize;
- return this;
- }
-
- @Override
- public ExpectedSplitsStep inputFiles(MockInputFile... inputFiles) {
- this.inputFiles = Arrays.asList(inputFiles);
- return this;
- }
-
- @Override
- public BuildStep expectedSplits(HiveInputSplit... expectedSplits) {
- this.expectedSplits = Arrays.asList(expectedSplits);
- return this;
- }
-
- @Override
- public SplitFilterTestCase build() {
- List<HiveInputSplit> allSplits = new ArrayList<>();
- List<HiveInputSplit> selectedSplits = new ArrayList<>();
- Set<String> seenPaths = new HashSet<String>();
-
- for (MockInputFile inputFile : inputFiles) {
- if (seenPaths.add(inputFile.getPath())) {
- allSplits.addAll(inputFile.getSplits());
- selectedSplits.addAll(inputFile.getSelectedSplits());
- } else {
- fail(String.format("Cannot add 2 input files with the same path to a test case. " +
- "The duplicated path is '%s'.", inputFile.getPath()));
- }
- }
-
- return new SplitFilterTestCase(allSplits, selectedSplits, expectedSplits, maxInputSize);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/org/apache/hadoop/hive/ql/index/TestHiveInputSplitComparator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/index/TestHiveInputSplitComparator.java b/ql/src/test/org/apache/hadoop/hive/ql/index/TestHiveInputSplitComparator.java
index 3fc18e9..feb5ea9 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/index/TestHiveInputSplitComparator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/index/TestHiveInputSplitComparator.java
@@ -18,8 +18,9 @@
package org.apache.hadoop.hive.ql.index;
import java.util.Arrays;
+
import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplit;
-import org.apache.hadoop.hive.ql.index.SplitFilter.HiveInputSplitComparator;
+import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplitComparator;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/org/apache/hadoop/hive/ql/index/TestIndexType.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/index/TestIndexType.java b/ql/src/test/org/apache/hadoop/hive/ql/index/TestIndexType.java
deleted file mode 100644
index befb103..0000000
--- a/ql/src/test/org/apache/hadoop/hive/ql/index/TestIndexType.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import junit.framework.TestCase;
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapIndexHandler;
-import org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler;
-import org.junit.Test;
-
-public class TestIndexType extends TestCase {
-
- @Test
- public void testIndexTypeHandlers(){
- assertEquals(HiveIndex.IndexType.AGGREGATE_TABLE.getHandlerClsName(), AggregateIndexHandler.class.getName());
- assertEquals(HiveIndex.IndexType.BITMAP_TABLE.getHandlerClsName(), BitmapIndexHandler.class.getName());
- assertEquals(HiveIndex.IndexType.COMPACT_SUMMARY_TABLE.getHandlerClsName(), CompactIndexHandler.class.getName());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/org/apache/hadoop/hive/ql/index/TestSplitFilter.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/index/TestSplitFilter.java b/ql/src/test/org/apache/hadoop/hive/ql/index/TestSplitFilter.java
deleted file mode 100644
index b5114e9..0000000
--- a/ql/src/test/org/apache/hadoop/hive/ql/index/TestSplitFilter.java
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import java.io.IOException;
-import org.junit.Test;
-
-import static org.apache.hadoop.hive.ql.index.MockHiveInputSplits.createMockSplit;
-import static org.apache.hadoop.io.SequenceFile.SYNC_INTERVAL;
-import static org.apache.hadoop.hive.ql.index.SplitFilterTestCase.DEFAULT_SPLIT_SIZE;
-import static org.apache.hadoop.hive.ql.index.SplitFilterTestCase.SMALL_SPLIT_SIZE;
-
-public class TestSplitFilter {
- private SplitFilterTestCase testCase;
-
- @Test
- public void testOneSelectedSplitsInMiddle() throws Exception {
- testCase = SplitFilterTestCase.builder()
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .split()
- .selectedSplit()
- .split()
- .build()
- )
- .expectedSplits(
- createMockSplit("A", DEFAULT_SPLIT_SIZE - SYNC_INTERVAL, DEFAULT_SPLIT_SIZE + SYNC_INTERVAL)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test
- public void testSelectedFirstSplit() throws Exception {
- testCase = SplitFilterTestCase.builder()
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .selectedSplit()
- .split()
- .split()
- .build()
- )
- .expectedSplits(
- createMockSplit("A", 0, DEFAULT_SPLIT_SIZE)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test
- public void testSelectedLastSplit() throws Exception {
- int lastSplitSize = 1234;
-
- testCase = SplitFilterTestCase.builder()
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .split()
- .selectedSplit(lastSplitSize)
- .build()
- )
- .expectedSplits(
- createMockSplit("A", DEFAULT_SPLIT_SIZE - SYNC_INTERVAL, lastSplitSize + SYNC_INTERVAL)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test
- public void testSelectedTwoAdjacentSplits() throws Exception {
-
- testCase = SplitFilterTestCase.builder()
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .selectedSplit()
- .selectedSplit()
- .split()
- .build()
- )
- .expectedSplits(
- createMockSplit("A", 0, DEFAULT_SPLIT_SIZE),
- createMockSplit("A", DEFAULT_SPLIT_SIZE, DEFAULT_SPLIT_SIZE)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test
- public void testSelectedThreeAdjacentSplits() throws Exception {
-
- testCase = SplitFilterTestCase.builder()
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .selectedSplit()
- .selectedSplit()
- .selectedSplit()
- .split()
- .build()
- )
- .expectedSplits(
- createMockSplit("A", 0, DEFAULT_SPLIT_SIZE),
- createMockSplit("A", DEFAULT_SPLIT_SIZE, DEFAULT_SPLIT_SIZE),
- createMockSplit("A", DEFAULT_SPLIT_SIZE * 2, DEFAULT_SPLIT_SIZE)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test
- public void testSelectedSplitsInTwoFiles() throws Exception {
-
- testCase = SplitFilterTestCase.builder()
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .selectedSplit()
- .split()
- .build(),
- MockInputFile.builder()
- .path("B")
- .selectedSplit()
- .build()
- )
- .expectedSplits(
- createMockSplit("A", 0, DEFAULT_SPLIT_SIZE),
- createMockSplit("B", 0, DEFAULT_SPLIT_SIZE)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test
- public void testOverlapWithPreviousFile() throws Exception {
-
- testCase = SplitFilterTestCase.builder()
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .selectedSplit()
- .build(),
- MockInputFile.builder()
- .path("B")
- .split()
- .selectedSplit()
- .build()
- )
- .expectedSplits(
- createMockSplit("A", 0, DEFAULT_SPLIT_SIZE),
- createMockSplit("B", DEFAULT_SPLIT_SIZE - SYNC_INTERVAL, DEFAULT_SPLIT_SIZE + SYNC_INTERVAL)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test
- public void testOverlapInSecondFile() throws Exception {
-
- testCase = SplitFilterTestCase.builder()
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .selectedSplit()
- .build(),
- MockInputFile.builder()
- .path("B")
- .split()
- .selectedSplit()
- .selectedSplit()
- .build()
- )
- .expectedSplits(
- createMockSplit("A", 0, DEFAULT_SPLIT_SIZE),
- createMockSplit("B", DEFAULT_SPLIT_SIZE - SYNC_INTERVAL, DEFAULT_SPLIT_SIZE + SYNC_INTERVAL),
- createMockSplit("B", DEFAULT_SPLIT_SIZE * 2, DEFAULT_SPLIT_SIZE)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test
- public void testSmallSplitsLengthAdjustment() throws Exception {
-
- testCase = SplitFilterTestCase.builder()
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .defaultSplitLength(SMALL_SPLIT_SIZE)
- .split()
- .selectedSplit()
- .build()
- )
- .expectedSplits(
- createMockSplit("A", 0, SMALL_SPLIT_SIZE * 2)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test
- public void testSmallSplitsOverlap() throws Exception {
-
- testCase = SplitFilterTestCase.builder()
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .defaultSplitLength(SMALL_SPLIT_SIZE)
- .selectedSplit()
- .split()
- .selectedSplit()
- .split()
- .selectedSplit()
- .build()
- )
- .expectedSplits(
- createMockSplit("A", 0, SMALL_SPLIT_SIZE),
- createMockSplit("A", SMALL_SPLIT_SIZE * 2, SMALL_SPLIT_SIZE),
- createMockSplit("A", SMALL_SPLIT_SIZE * 4, SMALL_SPLIT_SIZE)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test
- public void testMaxSplitsSizePositive() throws Exception {
-
- testCase = SplitFilterTestCase.builder()
- .maxInputSize(DEFAULT_SPLIT_SIZE * 3 + SYNC_INTERVAL * 2)
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .selectedSplit()
- .split()
- .selectedSplit()
- .split()
- .selectedSplit()
- .build()
- )
- .expectedSplits(
- createMockSplit("A", 0, DEFAULT_SPLIT_SIZE),
- createMockSplit("A", DEFAULT_SPLIT_SIZE * 2 - SYNC_INTERVAL, DEFAULT_SPLIT_SIZE + SYNC_INTERVAL),
- createMockSplit("A", DEFAULT_SPLIT_SIZE * 4 - SYNC_INTERVAL, DEFAULT_SPLIT_SIZE + SYNC_INTERVAL)
- )
- .build();
-
- testCase.executeAndValidate();
- }
-
- @Test(expected = IOException.class)
- public void testMaxSplitsSizeNegative() throws Exception {
- testCase = SplitFilterTestCase.builder()
- .maxInputSize(DEFAULT_SPLIT_SIZE * 3)
- .inputFiles(
- MockInputFile.builder()
- .path("A")
- .selectedSplit()
- .split()
- .selectedSplit()
- .split()
- .selectedSplit()
- .build()
- )
- .expectedSplits()
- .build();
-
- testCase.executeAndValidate();
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index b5b478f..d982555 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.ql.index.HiveIndex;
import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.stats.StatsUtils;
@@ -688,127 +687,6 @@ public class TestHive extends TestCase {
}
}
- /**
- * Tests creating a simple index on a simple table.
- *
- * @throws Throwable
- */
- public void testIndex() throws Throwable {
- try{
- // create a simple table
- String tableName = "table_for_testindex";
- String qTableName = Warehouse.DEFAULT_DATABASE_NAME + "." + tableName;
- try {
- hm.dropTable(Warehouse.DEFAULT_DATABASE_NAME, tableName);
- } catch (HiveException e) {
- e.printStackTrace();
- assertTrue("Unable to drop table", false);
- }
-
- Table tbl = new Table(Warehouse.DEFAULT_DATABASE_NAME, tableName);
- List<FieldSchema> fields = tbl.getCols();
-
- fields.add(new FieldSchema("col1", serdeConstants.INT_TYPE_NAME, "int -- first column"));
- fields.add(new FieldSchema("col2", serdeConstants.STRING_TYPE_NAME,
- "string -- second column"));
- fields.add(new FieldSchema("col3", serdeConstants.DOUBLE_TYPE_NAME,
- "double -- thrift column"));
- tbl.setFields(fields);
-
- tbl.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
- tbl.setInputFormatClass(SequenceFileInputFormat.class);
-
- // create table
- try {
- hm.createTable(tbl);
- } catch (HiveException e) {
- e.printStackTrace();
- assertTrue("Unable to create table: " + tableName, false);
- }
-
- // Create a simple index
- String indexName = "index_on_table_for_testindex";
- String indexHandlerClass = HiveIndex.IndexType.COMPACT_SUMMARY_TABLE.getHandlerClsName();
- List<String> indexedCols = new ArrayList<String>();
- indexedCols.add("col1");
- String indexTableName = "index_on_table_for_testindex_table";
- String qIndexTableName = Warehouse.DEFAULT_DATABASE_NAME + "." + indexTableName;
- boolean deferredRebuild = true;
- String inputFormat = SequenceFileInputFormat.class.getName();
- String outputFormat = SequenceFileOutputFormat.class.getName();
- String serde = null;
- String storageHandler = null;
- String location = null;
- String collItemDelim = null;
- String fieldDelim = null;
- String fieldEscape = null;
- String lineDelim = null;
- String mapKeyDelim = null;
- String indexComment = null;
- Map<String, String> indexProps = null;
- Map<String, String> tableProps = null;
- Map<String, String> serdeProps = new HashMap<String, String>();
- hm.createIndex(qTableName, indexName, indexHandlerClass, indexedCols, qIndexTableName,
- deferredRebuild, inputFormat, outputFormat, serde, storageHandler, location,
- indexProps, tableProps, serdeProps, collItemDelim, fieldDelim, fieldEscape, lineDelim,
- mapKeyDelim, indexComment);
-
- // Retrieve and validate the index
- Index index = null;
- try {
- index = hm.getIndex(tableName, indexName);
- assertNotNull("Unable to fetch index", index);
- index.validate();
- assertEquals("Index names don't match for index: " + indexName, indexName,
- index.getIndexName());
- assertEquals("Table names don't match for index: " + indexName, tableName,
- index.getOrigTableName());
- assertEquals("Index table names didn't match for index: " + indexName, indexTableName,
- index.getIndexTableName());
- assertEquals("Index handler classes didn't match for index: " + indexName,
- indexHandlerClass, index.getIndexHandlerClass());
- assertEquals("Deferred rebuild didn't match for index: " + indexName, deferredRebuild,
- index.isDeferredRebuild());
-
- } catch (HiveException e) {
- System.err.println(StringUtils.stringifyException(e));
- assertTrue("Unable to fetch index correctly: " + indexName, false);
- }
-
- // Drop index
- try {
- hm.dropIndex(Warehouse.DEFAULT_DATABASE_NAME, tableName, indexName, false, true);
- } catch (HiveException e) {
- System.err.println(StringUtils.stringifyException(e));
- assertTrue("Unable to drop index: " + indexName, false);
- }
-
- boolean dropIndexException = false;
- try {
- hm.getIndex(tableName, indexName);
- } catch (HiveException e) {
- // Expected since it was just dropped
- dropIndexException = true;
- }
-
- assertTrue("Unable to drop index: " + indexName, dropIndexException);
-
- // Drop table
- try {
- hm.dropTable(tableName);
- Table droppedTable = hm.getTable(tableName, false);
- assertNull("Unable to drop table " + tableName, droppedTable);
- } catch (HiveException e) {
- System.err.println(StringUtils.stringifyException(e));
- assertTrue("Unable to drop table: " + tableName, false);
- }
- } catch (Throwable e) {
- System.err.println(StringUtils.stringifyException(e));
- System.err.println("testIndex failed");
- throw e;
- }
- }
-
public void testHiveRefreshOnConfChange() throws Throwable{
Hive prevHiveObj = Hive.get();
prevHiveObj.getDatabaseCurrent();
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java
index 87cd98f..4a33885 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java
@@ -66,7 +66,6 @@ public class TestPrivilegesV1 extends PrivilegesTestBase{
grantUserTable("alter", PrivilegeType.ALTER_METADATA);
grantUserTable("create", PrivilegeType.CREATE);
grantUserTable("drop", PrivilegeType.DROP);
- grantUserTable("index", PrivilegeType.INDEX);
grantUserTable("lock", PrivilegeType.LOCK);
grantUserTable("select", PrivilegeType.SELECT);
grantUserTable("show_database", PrivilegeType.SHOW_DATABASE);
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q b/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q
deleted file mode 100644
index 6de8c7f..0000000
--- a/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q
+++ /dev/null
@@ -1,18 +0,0 @@
-set hive.strict.checks.bucketing=false;
-
-set hive.exec.concatenate.check.index=true;
-create table src_rc_concatenate_test(key int, value string) stored as rcfile;
-
-load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test;
-load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test;
-load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test;
-
-show table extended like `src_rc_concatenate_test`;
-
-select count(1) from src_rc_concatenate_test;
-select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test;
-
-create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
-show indexes on src_rc_concatenate_test;
-
-alter table src_rc_concatenate_test concatenate;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/authorization_create_index.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_create_index.q b/ql/src/test/queries/clientnegative/authorization_create_index.q
deleted file mode 100644
index eeb5673..0000000
--- a/ql/src/test/queries/clientnegative/authorization_create_index.q
+++ /dev/null
@@ -1,7 +0,0 @@
-set hive.test.authz.sstd.hs2.mode=true;
-set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
-set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
-set hive.security.authorization.enabled=true;
-create table t1 (a int);
-set user.name=user2;
-create index t1_index on table t1(a) as 'COMPACT' WITH DEFERRED REBUILD;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/authorization_drop_index.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_drop_index.q b/ql/src/test/queries/clientnegative/authorization_drop_index.q
deleted file mode 100644
index d984d06..0000000
--- a/ql/src/test/queries/clientnegative/authorization_drop_index.q
+++ /dev/null
@@ -1,8 +0,0 @@
-set hive.test.authz.sstd.hs2.mode=true;
-set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
-set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
-set hive.security.authorization.enabled=true;
-create table t1 (a int);
-create index t1_index on table t1(a) as 'COMPACT' WITH DEFERRED REBUILD;
-set user.name=user2;
-drop index t1_index on t1;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/authorization_invalid_priv_v2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_invalid_priv_v2.q b/ql/src/test/queries/clientnegative/authorization_invalid_priv_v2.q
index 5d4c95e..ae28997 100644
--- a/ql/src/test/queries/clientnegative/authorization_invalid_priv_v2.q
+++ b/ql/src/test/queries/clientnegative/authorization_invalid_priv_v2.q
@@ -2,5 +2,5 @@ set hive.test.authz.sstd.hs2.mode=true;
set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
create table if not exists authorization_invalid_v2 (key int, value string);
-grant index on table authorization_invalid_v2 to user hive_test_user;
+grant lock on table authorization_invalid_v2 to user hive_test_user;
drop table authorization_invalid_v2;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/authorization_uri_index.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_uri_index.q b/ql/src/test/queries/clientnegative/authorization_uri_index.q
deleted file mode 100644
index 795928d..0000000
--- a/ql/src/test/queries/clientnegative/authorization_uri_index.q
+++ /dev/null
@@ -1,14 +0,0 @@
-set hive.test.authz.sstd.hs2.mode=true;
-set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
-set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
-set hive.security.authorization.enabled=true;
-
-dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/az_uri_index;
-dfs -touchz ${system:test.tmp.dir}/az_uri_index/1.txt;
-dfs -chmod 555 ${system:test.tmp.dir}/az_uri_index/1.txt;
-
-
-create table t1(i int);
-create index idt1 on table t1 (i) as 'COMPACT' WITH DEFERRED REBUILD LOCATION '${system:test.tmp.dir}/az_uri_index/';
-
--- Attempt to use location for index that does not have permissions should fail
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/bad_indextype.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/bad_indextype.q b/ql/src/test/queries/clientnegative/bad_indextype.q
deleted file mode 100644
index 8f5bf42..0000000
--- a/ql/src/test/queries/clientnegative/bad_indextype.q
+++ /dev/null
@@ -1 +0,0 @@
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) AS 'UNKNOWN' WITH DEFERRED REBUILD;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/drop_index_failure.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/drop_index_failure.q b/ql/src/test/queries/clientnegative/drop_index_failure.q
deleted file mode 100644
index 6e907df..0000000
--- a/ql/src/test/queries/clientnegative/drop_index_failure.q
+++ /dev/null
@@ -1,3 +0,0 @@
-set hive.exec.drop.ignorenonexistent=false;
--- Can't use DROP INDEX if the index doesn't exist and IF EXISTS isn't specified
-DROP INDEX UnknownIndex ON src;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/index_bitmap_no_map_aggr.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/index_bitmap_no_map_aggr.q b/ql/src/test/queries/clientnegative/index_bitmap_no_map_aggr.q
deleted file mode 100644
index a17cd1f..0000000
--- a/ql/src/test/queries/clientnegative/index_bitmap_no_map_aggr.q
+++ /dev/null
@@ -1,7 +0,0 @@
-EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-SET hive.map.aggr=false;
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
-ALTER INDEX src1_index ON src REBUILD;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/index_compact_entry_limit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/index_compact_entry_limit.q b/ql/src/test/queries/clientnegative/index_compact_entry_limit.q
deleted file mode 100644
index 63973e6..0000000
--- a/ql/src/test/queries/clientnegative/index_compact_entry_limit.q
+++ /dev/null
@@ -1,13 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-drop index src_index on src;
-
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__src_src_index__ WHERE key<1000;
-SET hive.index.compact.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-SET hive.index.compact.query.max.entries=5;
-SELECT key, value FROM src WHERE key=100 ORDER BY key;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/index_compact_size_limit.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/index_compact_size_limit.q b/ql/src/test/queries/clientnegative/index_compact_size_limit.q
deleted file mode 100644
index ae4e265..0000000
--- a/ql/src/test/queries/clientnegative/index_compact_size_limit.q
+++ /dev/null
@@ -1,14 +0,0 @@
-set hive.mapred.mode=nonstrict;
-set hive.stats.dbclass=fs;
-drop index src_index on src;
-
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
-ALTER INDEX src_index ON src REBUILD;
-
-SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result" SELECT `_bucketname` , `_offsets` FROM default__src_src_index__ WHERE key<1000;
-SET hive.index.compact.file=${system:test.tmp.dir}/index_result;
-SET hive.input.format=org.apache.hadoop.hive.ql.index.compact.HiveCompactIndexInputFormat;
-SET hive.index.compact.query.max.size=1024;
-SELECT key, value FROM src WHERE key=100 ORDER BY key;
-
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/merge_negative_1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/merge_negative_1.q b/ql/src/test/queries/clientnegative/merge_negative_1.q
deleted file mode 100644
index 0a48c01..0000000
--- a/ql/src/test/queries/clientnegative/merge_negative_1.q
+++ /dev/null
@@ -1,3 +0,0 @@
-create table src2 like src;
-CREATE INDEX src_index_merge_test ON TABLE src2(key) as 'COMPACT' WITH DEFERRED REBUILD;
-alter table src2 concatenate;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/show_create_table_index.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/show_create_table_index.q b/ql/src/test/queries/clientnegative/show_create_table_index.q
deleted file mode 100644
index 0dd0ef9..0000000
--- a/ql/src/test/queries/clientnegative/show_create_table_index.q
+++ /dev/null
@@ -1,6 +0,0 @@
-CREATE TABLE tmp_showcrt (key int, value string);
-CREATE INDEX tmp_index on table tmp_showcrt(key) as 'compact' WITH DEFERRED REBUILD;
-SHOW CREATE TABLE default__tmp_showcrt_tmp_index__;
-DROP INDEX tmp_index on tmp_showcrt;
-DROP TABLE tmp_showcrt;
-
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/temp_table_index.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/temp_table_index.q b/ql/src/test/queries/clientnegative/temp_table_index.q
deleted file mode 100644
index 91f45ce..0000000
--- a/ql/src/test/queries/clientnegative/temp_table_index.q
+++ /dev/null
@@ -1,2 +0,0 @@
-create temporary table tmp1 (c1 string);
-create index tmp1_idx on table tmp1 (c1) as 'COMPACT' with deferred rebuild;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientnegative/truncate_column_indexed_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/truncate_column_indexed_table.q b/ql/src/test/queries/clientnegative/truncate_column_indexed_table.q
deleted file mode 100644
index 13f32c8..0000000
--- a/ql/src/test/queries/clientnegative/truncate_column_indexed_table.q
+++ /dev/null
@@ -1,9 +0,0 @@
--- Tests truncating a column from an indexed table
-
-CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE;
-
-INSERT OVERWRITE TABLE test_tab SELECT * FROM src;
-
-CREATE INDEX test_tab_index ON TABLE test_tab (key) as 'COMPACT' WITH DEFERRED REBUILD;
-
-TRUNCATE TABLE test_tab COLUMNS (value);
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q b/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q
deleted file mode 100644
index 3a9e14c..0000000
--- a/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q
+++ /dev/null
@@ -1,51 +0,0 @@
-set hive.strict.checks.bucketing=false;
-
-set hive.mapred.mode=nonstrict;
-set hive.exec.concatenate.check.index =false;
-create table src_rc_concatenate_test(key int, value string) stored as rcfile;
-
-load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test;
-load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test;
-load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test;
-
-show table extended like `src_rc_concatenate_test`;
-
-select count(1) from src_rc_concatenate_test;
-select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test;
-
-create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
-show indexes on src_rc_concatenate_test;
-
-alter table src_rc_concatenate_test concatenate;
-
-show table extended like `src_rc_concatenate_test`;
-
-select count(1) from src_rc_concatenate_test;
-select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test;
-
-drop index src_rc_concatenate_test_index on src_rc_concatenate_test;
-
-create table src_rc_concatenate_test_part(key int, value string) partitioned by (ds string) stored as rcfile;
-
-alter table src_rc_concatenate_test_part add partition (ds='2011');
-
-load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test_part partition (ds='2011');
-load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test_part partition (ds='2011');
-load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test_part partition (ds='2011');
-
-show table extended like `src_rc_concatenate_test_part` partition (ds='2011');
-
-select count(1) from src_rc_concatenate_test_part;
-select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part;
-
-create index src_rc_concatenate_test_part_index on table src_rc_concatenate_test_part(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
-show indexes on src_rc_concatenate_test_part;
-
-alter table src_rc_concatenate_test_part partition (ds='2011') concatenate;
-
-show table extended like `src_rc_concatenate_test_part` partition (ds='2011');
-
-select count(1) from src_rc_concatenate_test_part;
-select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part;
-
-drop index src_rc_concatenate_test_part_index on src_rc_concatenate_test_part;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/alter_index.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_index.q b/ql/src/test/queries/clientpositive/alter_index.q
deleted file mode 100644
index 3a3d13c..0000000
--- a/ql/src/test/queries/clientpositive/alter_index.q
+++ /dev/null
@@ -1,11 +0,0 @@
-drop index src_index_8 on src;
-
-create index src_index_8 on table default.src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
-desc extended default__src_src_index_8__;
-
-alter index src_index_8 on default.src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3");
-desc extended default__src_src_index_8__;
-
-drop index src_index_8 on default.src;
-
-show tables;
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/queries/clientpositive/authorization_index.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_index.q b/ql/src/test/queries/clientpositive/authorization_index.q
deleted file mode 100644
index b8dd577..0000000
--- a/ql/src/test/queries/clientpositive/authorization_index.q
+++ /dev/null
@@ -1,13 +0,0 @@
-set hive.test.authz.sstd.hs2.mode=true;
-set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
-set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
-set hive.stats.dbclass=fs;
-set hive.security.authorization.enabled=true;
-create table t1 (a int);
-create index t1_index on table t1(a) as 'COMPACT' WITH DEFERRED REBUILD;
-desc formatted default__t1_t1_index__;
-alter index t1_index on t1 rebuild;
-
-drop table t1;
-
-set hive.security.authorization.enabled=false;
[15/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
HIVE-18448: Drop Support For Indexes From Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Signed-off-by: Zoltan Haindrich <ki...@rxd.hu>
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b0d3cb45
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b0d3cb45
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b0d3cb45
Branch: refs/heads/master
Commit: b0d3cb4527e4855c8544349e8f9023a0284a3160
Parents: c2c188e
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Wed Feb 14 09:33:38 2018 +0100
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Wed Feb 14 09:33:38 2018 +0100
----------------------------------------------------------------------
.../org/apache/hadoop/hive/conf/HiveConf.java | 38 +-
.../SemanticAnalysis/HCatSemanticAnalyzer.java | 11 -
...estDDLWithRemoteMetastoreSecondNamenode.java | 54 -
.../org/apache/hadoop/hive/ql/QTestUtil.java | 10 -
.../java/org/apache/hadoop/hive/ql/Driver.java | 1 -
.../org/apache/hadoop/hive/ql/ErrorMsg.java | 3 -
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 248 +---
.../hadoop/hive/ql/exec/FunctionRegistry.java | 14 +-
.../hadoop/hive/ql/exec/OperatorUtils.java | 31 -
.../apache/hadoop/hive/ql/exec/TaskFactory.java | 4 -
.../apache/hadoop/hive/ql/exec/Utilities.java | 5 -
.../hadoop/hive/ql/exec/mr/ExecDriver.java | 5 -
.../hive/ql/index/AbstractIndexHandler.java | 58 -
.../hive/ql/index/AggregateIndexHandler.java | 159 ---
.../apache/hadoop/hive/ql/index/HiveIndex.java | 75 --
.../hadoop/hive/ql/index/HiveIndexHandler.java | 142 --
.../hive/ql/index/HiveIndexQueryContext.java | 101 --
.../hadoop/hive/ql/index/HiveIndexResult.java | 209 ---
.../hive/ql/index/HiveIndexedInputFormat.java | 162 ---
.../hive/ql/index/IndexMetadataChangeTask.java | 100 --
.../hive/ql/index/IndexMetadataChangeWork.java | 67 -
.../hive/ql/index/IndexPredicateAnalyzer.java | 3 +
.../hadoop/hive/ql/index/IndexResult.java | 25 -
.../hive/ql/index/IndexSearchCondition.java | 4 +-
.../hadoop/hive/ql/index/SplitFilter.java | 125 --
.../hive/ql/index/TableBasedIndexHandler.java | 155 ---
.../ql/index/bitmap/BitmapIndexHandler.java | 312 -----
.../hive/ql/index/bitmap/BitmapInnerQuery.java | 70 -
.../hive/ql/index/bitmap/BitmapObjectInput.java | 182 ---
.../ql/index/bitmap/BitmapObjectOutput.java | 126 --
.../hive/ql/index/bitmap/BitmapOuterQuery.java | 84 --
.../hive/ql/index/bitmap/BitmapQuery.java | 29 -
.../ql/index/compact/CompactIndexHandler.java | 408 ------
.../compact/HiveCompactIndexInputFormat.java | 33 -
.../hadoop/hive/ql/io/HiveInputFormat.java | 19 +
.../hadoop/hive/ql/io/orc/ExternalCache.java | 25 +-
.../apache/hadoop/hive/ql/metadata/Hive.java | 324 +----
.../hadoop/hive/ql/metadata/HiveUtils.java | 36 -
.../formatting/MetaDataFormatUtils.java | 53 +-
.../hadoop/hive/ql/optimizer/IndexUtils.java | 249 ----
.../hadoop/hive/ql/optimizer/Optimizer.java | 10 +-
.../ql/optimizer/QueryPlanPostProcessor.java | 2 -
.../ql/optimizer/index/RewriteCanApplyCtx.java | 265 ----
.../index/RewriteCanApplyProcFactory.java | 116 --
.../ql/optimizer/index/RewriteGBUsingIndex.java | 359 -----
.../index/RewriteParseContextGenerator.java | 122 --
.../RewriteQueryUsingAggregateIndexCtx.java | 325 -----
.../optimizer/physical/IndexWhereResolver.java | 42 -
.../optimizer/physical/PhysicalOptimizer.java | 3 -
.../physical/index/IndexWhereProcCtx.java | 48 -
.../physical/index/IndexWhereProcessor.java | 255 ----
.../index/IndexWhereTaskDispatcher.java | 175 ---
.../hive/ql/parse/DDLSemanticAnalyzer.java | 289 +---
.../apache/hadoop/hive/ql/parse/HiveParser.g | 105 --
.../hadoop/hive/ql/parse/IndexUpdater.java | 153 ---
.../hive/ql/parse/LoadSemanticAnalyzer.java | 20 +-
.../hive/ql/parse/SemanticAnalyzerFactory.java | 10 -
.../hadoop/hive/ql/parse/TaskCompiler.java | 15 -
.../hadoop/hive/ql/plan/AlterIndexDesc.java | 132 --
.../hadoop/hive/ql/plan/CreateIndexDesc.java | 252 ----
.../org/apache/hadoop/hive/ql/plan/DDLWork.java | 86 --
.../hadoop/hive/ql/plan/DropIndexDesc.java | 75 --
.../hadoop/hive/ql/plan/HiveOperation.java | 5 -
.../org/apache/hadoop/hive/ql/plan/MapWork.java | 14 -
.../hadoop/hive/ql/plan/ShowIndexesDesc.java | 75 --
.../hadoop/hive/ql/ppd/OpProcFactory.java | 5 +-
.../ql/security/authorization/Privilege.java | 3 -
.../authorization/PrivilegeRegistry.java | 1 -
.../security/authorization/PrivilegeType.java | 1 -
.../StorageBasedAuthorizationProvider.java | 4 -
.../AbstractGenericUDFEWAHBitmapBop.java | 146 --
.../ql/udf/generic/GenericUDAFEWAHBitmap.java | 193 ---
.../ql/udf/generic/GenericUDFEWAHBitmapAnd.java | 42 -
.../udf/generic/GenericUDFEWAHBitmapEmpty.java | 100 --
.../ql/udf/generic/GenericUDFEWAHBitmapOr.java | 42 -
.../hadoop/hive/ql/index/MockIndexResult.java | 38 -
.../hadoop/hive/ql/index/MockInputFile.java | 3 +-
.../hive/ql/index/SplitFilterTestCase.java | 153 ---
.../ql/index/TestHiveInputSplitComparator.java | 3 +-
.../hadoop/hive/ql/index/TestIndexType.java | 34 -
.../hadoop/hive/ql/index/TestSplitFilter.java | 296 ----
.../hadoop/hive/ql/metadata/TestHive.java | 122 --
.../parse/authorization/TestPrivilegesV1.java | 1 -
.../alter_concatenate_indexed_table.q | 18 -
.../clientnegative/authorization_create_index.q | 7 -
.../clientnegative/authorization_drop_index.q | 8 -
.../authorization_invalid_priv_v2.q | 2 +-
.../clientnegative/authorization_uri_index.q | 14 -
.../test/queries/clientnegative/bad_indextype.q | 1 -
.../queries/clientnegative/drop_index_failure.q | 3 -
.../clientnegative/index_bitmap_no_map_aggr.q | 7 -
.../clientnegative/index_compact_entry_limit.q | 13 -
.../clientnegative/index_compact_size_limit.q | 14 -
.../queries/clientnegative/merge_negative_1.q | 3 -
.../clientnegative/show_create_table_index.q | 6 -
.../queries/clientnegative/temp_table_index.q | 2 -
.../truncate_column_indexed_table.q | 9 -
.../alter_concatenate_indexed_table.q | 51 -
.../test/queries/clientpositive/alter_index.q | 11 -
.../clientpositive/authorization_index.q | 13 -
.../test/queries/clientpositive/database_drop.q | 38 +-
ql/src/test/queries/clientpositive/drop_index.q | 2 -
.../drop_index_removes_partition_dirs.q | 22 -
.../clientpositive/drop_table_with_index.q | 35 -
.../queries/clientpositive/escape_comments.q | 2 -
ql/src/test/queries/clientpositive/index_auth.q | 20 -
ql/src/test/queries/clientpositive/index_auto.q | 31 -
.../queries/clientpositive/index_auto_empty.q | 26 -
.../clientpositive/index_auto_file_format.q | 23 -
.../clientpositive/index_auto_mult_tables.q | 25 -
.../index_auto_mult_tables_compact.q | 26 -
.../clientpositive/index_auto_multiple.q | 20 -
.../clientpositive/index_auto_partitioned.q | 17 -
.../clientpositive/index_auto_self_join.q | 19 -
.../queries/clientpositive/index_auto_unused.q | 64 -
.../queries/clientpositive/index_auto_update.q | 29 -
.../test/queries/clientpositive/index_bitmap.q | 52 -
.../test/queries/clientpositive/index_bitmap1.q | 22 -
.../test/queries/clientpositive/index_bitmap2.q | 39 -
.../test/queries/clientpositive/index_bitmap3.q | 52 -
.../queries/clientpositive/index_bitmap_auto.q | 57 -
.../index_bitmap_auto_partitioned.q | 17 -
.../clientpositive/index_bitmap_compression.q | 18 -
.../queries/clientpositive/index_bitmap_rc.q | 58 -
.../test/queries/clientpositive/index_compact.q | 46 -
.../queries/clientpositive/index_compact_1.q | 20 -
.../queries/clientpositive/index_compact_2.q | 50 -
.../queries/clientpositive/index_compact_3.q | 23 -
.../index_compact_binary_search.q | 132 --
.../queries/clientpositive/index_compression.q | 18 -
.../queries/clientpositive/index_creation.q | 54 -
.../test/queries/clientpositive/index_in_db.q | 16 -
.../test/queries/clientpositive/index_serde.q | 52 -
.../queries/clientpositive/index_skewtable.q | 23 -
.../test/queries/clientpositive/index_stale.q | 23 -
.../clientpositive/index_stale_partitioned.q | 29 -
.../clientpositive/show_indexes_edge_cases.q | 28 -
.../clientpositive/show_indexes_syntax.q | 24 -
.../special_character_in_tabnames_2.q | 23 -
.../queries/clientpositive/udf_bitmap_and.q | 14 -
.../queries/clientpositive/udf_bitmap_empty.q | 5 -
.../test/queries/clientpositive/udf_bitmap_or.q | 14 -
.../queries/clientpositive/unicode_comments.q | 2 -
ql/src/test/queries/clientpositive/union_view.q | 10 -
.../authorization_invalid_priv_v2.q.out | 4 +-
.../index_bitmap_no_map_aggr.q.out | 20 -
.../index_compact_entry_limit.q.out | 37 -
.../index_compact_size_limit.q.out | 37 -
.../beeline/escape_comments.q.out | 15 -
.../results/clientpositive/database_drop.q.out | 174 ---
.../clientpositive/escape_comments.q.out | 16 -
.../results/clientpositive/index_auth.q.out | 79 --
.../results/clientpositive/index_auto.q.out | 255 ----
.../clientpositive/index_auto_empty.q.out | 101 --
.../clientpositive/index_auto_file_format.q.out | 256 ----
.../clientpositive/index_auto_mult_tables.q.out | 438 ------
.../index_auto_mult_tables_compact.q.out | 485 -------
.../clientpositive/index_auto_multiple.q.out | 164 ---
.../clientpositive/index_auto_partitioned.q.out | 172 ---
.../clientpositive/index_auto_self_join.q.out | 295 ----
.../clientpositive/index_auto_unused.q.out | 388 ------
.../clientpositive/index_auto_update.q.out | 353 -----
.../results/clientpositive/index_bitmap.q.out | 291 ----
.../results/clientpositive/index_bitmap1.q.out | 75 --
.../results/clientpositive/index_bitmap2.q.out | 138 --
.../results/clientpositive/index_bitmap3.q.out | 1262 -----------------
.../clientpositive/index_bitmap_auto.q.out | 1273 ------------------
.../index_bitmap_auto_partitioned.q.out | 150 ---
.../index_bitmap_compression.q.out | 133 --
.../clientpositive/index_bitmap_rc.q.out | 349 -----
.../results/clientpositive/index_compact.q.out | 271 ----
.../clientpositive/index_compact_1.q.out | 70 -
.../clientpositive/index_compact_2.q.out | 317 -----
.../clientpositive/index_compact_3.q.out | 84 --
.../index_compact_binary_search.q.out | 473 -------
.../clientpositive/index_compression.q.out | 158 ---
.../results/clientpositive/index_creation.q.out | 321 -----
.../results/clientpositive/index_in_db.q.out | 57 -
.../results/clientpositive/index_serde.q.out | 242 ----
.../clientpositive/index_skewtable.q.out | 204 ---
.../results/clientpositive/index_stale.q.out | 106 --
.../index_stale_partitioned.q.out | 115 --
.../results/clientpositive/show_functions.q.out | 4 -
.../clientpositive/spark/union_view.q.out | 23 +-
.../special_character_in_tabnames_2.q.out | 165 ---
.../clientpositive/unicode_comments.q.out | 15 -
.../results/clientpositive/union_view.q.out | 317 +----
187 files changed, 124 insertions(+), 19179 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index ce96bff..f3980b6 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -597,7 +597,7 @@ public class HiveConf extends Configuration {
"When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."),
DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true,
- "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/index/function"),
+ "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/function"),
HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"),
@@ -1616,10 +1616,8 @@ public class HiveConf extends Configuration {
"Whether to provide the row offset virtual column"),
// Optimizer
- HIVEOPTINDEXFILTER("hive.optimize.index.filter", false,
- "Whether to enable automatic use of indexes"),
- HIVEINDEXAUTOUPDATE("hive.optimize.index.autoupdate", false,
- "Whether to update stale indexes automatically"),
+ HIVEOPTINDEXFILTER("hive.optimize.index.filter", false, "Whether to enable automatic use of indexes"),
+
HIVEOPTPPD("hive.optimize.ppd", true,
"Whether to enable predicate pushdown"),
HIVEOPTPPD_WINDOWING("hive.optimize.ppd.windowing", true,
@@ -1757,18 +1755,6 @@ public class HiveConf extends Configuration {
"If the number of references to a CTE clause exceeds this threshold, Hive will materialize it\n" +
"before executing the main query block. -1 will disable this feature."),
- // Indexes
- HIVEOPTINDEXFILTER_COMPACT_MINSIZE("hive.optimize.index.filter.compact.minsize", (long) 5 * 1024 * 1024 * 1024,
- "Minimum size (in bytes) of the inputs on which a compact index is automatically used."), // 5G
- HIVEOPTINDEXFILTER_COMPACT_MAXSIZE("hive.optimize.index.filter.compact.maxsize", (long) -1,
- "Maximum size (in bytes) of the inputs on which a compact index is automatically used. A negative number is equivalent to infinity."), // infinity
- HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES("hive.index.compact.query.max.entries", (long) 10000000,
- "The maximum number of index entries to read during a query that uses the compact index. Negative value is equivalent to infinity."), // 10M
- HIVE_INDEX_COMPACT_QUERY_MAX_SIZE("hive.index.compact.query.max.size", (long) 10 * 1024 * 1024 * 1024,
- "The maximum number of bytes that a query using the compact index can read. Negative value is equivalent to infinity."), // 10G
- HIVE_INDEX_COMPACT_BINARY_SEARCH("hive.index.compact.binary.search", true,
- "Whether or not to use a binary search to find the entries in an index table that match the filter, where possible"),
-
// Statistics
HIVE_STATS_ESTIMATE_STATS("hive.stats.estimate", true,
"Estimate statistics in absence of statistics."),
@@ -2139,9 +2125,6 @@ public class HiveConf extends Configuration {
// For har files
HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"),
- HIVEOPTGBYUSINGINDEX("hive.optimize.index.groupby", false,
- "Whether to enable optimization of group-by queries using Aggregate indexes."),
-
HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"),
"Some select queries can be converted to single FETCH task minimizing latency.\n" +
"Currently the query should be single sourced not having any subquery and should not have\n" +
@@ -2265,12 +2248,6 @@ public class HiveConf extends Configuration {
HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false,
"Whether to throw an exception if dynamic partition insert generates empty results."),
- HIVE_INDEX_COMPACT_FILE("hive.index.compact.file", "", "internal variable"),
- HIVE_INDEX_BLOCKFILTER_FILE("hive.index.blockfilter.file", "", "internal variable"),
- HIVE_INDEX_IGNORE_HDFS_LOC("hive.index.compact.file.ignore.hdfs", false,
- "When true the HDFS location stored in the index file will be ignored at runtime.\n" +
- "If the data got moved or the name of the cluster got changed, the index data should still be usable."),
-
HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile,file,s3,s3a",
"A comma separated list of acceptable URI schemes for import and export."),
// temporary variable for testing. This is added just to turn off this feature in case of a bug in
@@ -2289,12 +2266,6 @@ public class HiveConf extends Configuration {
HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false,
"should rework the mapred work or not.\n" +
"This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time."),
- HIVE_CONCATENATE_CHECK_INDEX ("hive.exec.concatenate.check.index", true,
- "If this is set to true, Hive will throw error when doing\n" +
- "'alter table tbl_name [partSpec] concatenate' on a table/partition\n" +
- "that has indexes on it. The reason the user want to set this to true\n" +
- "is because it can help user to avoid handling all index drop, recreation,\n" +
- "rebuild work. This is very helpful for tables with thousands of partitions."),
HIVE_IO_EXCEPTION_HANDLERS("hive.io.exception.handlers", "",
"A list of io exception handler class names. This is used\n" +
"to construct a list exception handlers to handle exceptions thrown\n" +
@@ -3073,7 +3044,7 @@ public class HiveConf extends Configuration {
"hive.tez.bucket.pruning", false,
"When pruning is enabled, filters on bucket columns will be processed by \n" +
"filtering the splits against a bitset of included buckets. This needs predicates \n"+
- "produced by hive.optimize.ppd and hive.optimize.index.filters."),
+ "produced by hive.optimize.ppd and hive.optimize.index.filters."),
TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT(
"hive.tez.bucket.pruning.compat", true,
"When pruning is enabled, handle possibly broken inserts due to negative hashcodes.\n" +
@@ -4616,7 +4587,6 @@ public class HiveConf extends Configuration {
ConfVars.HIVE_CHECK_CROSS_PRODUCT.varname,
ConfVars.HIVE_CLI_TEZ_SESSION_ASYNC.varname,
ConfVars.HIVE_COMPAT.varname,
- ConfVars.HIVE_CONCATENATE_CHECK_INDEX.varname,
ConfVars.HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY.varname,
ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION.varname,
ConfVars.HIVE_EXECUTION_ENGINE.varname,
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
index bec1f26..8105e8b 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java
@@ -90,12 +90,6 @@ public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase {
case HiveParser.TOK_DESCDATABASE:
case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
- // Index DDL
- case HiveParser.TOK_ALTERINDEX_PROPERTIES:
- case HiveParser.TOK_CREATEINDEX:
- case HiveParser.TOK_DROPINDEX:
- case HiveParser.TOK_SHOWINDEXES:
-
// View DDL
// "alter view add partition" does not work because of the nature of implementation
// of the DDL in hive. Hive will internally invoke another Driver on the select statement,
@@ -174,11 +168,6 @@ public class HCatSemanticAnalyzer extends HCatSemanticAnalyzerBase {
case HiveParser.TOK_DESCDATABASE:
case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
- // Index DDL
- case HiveParser.TOK_ALTERINDEX_PROPERTIES:
- case HiveParser.TOK_CREATEINDEX:
- case HiveParser.TOK_DROPINDEX:
- case HiveParser.TOK_SHOWINDEXES:
break;
// View DDL
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
index 63a7313..de33833 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
import org.apache.hadoop.hive.ql.metadata.*;
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
@@ -240,24 +239,6 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
return table;
}
- private void createIndexAndCheck(Table table, String indexName, String indexLocation) throws Exception {
- executeQuery("CREATE INDEX " + indexName + " ON TABLE " + table.getTableName()
- + " (col1) AS 'COMPACT' WITH DEFERRED REBUILD "
- + buildLocationClause(indexLocation));
- Index index = db.getIndex(table.getTableName(), indexName);
- assertNotNull("Index object is expected for " + indexName , index);
- String location = index.getSd().getLocation();
- if (indexLocation != null) {
- assertEquals("Index should be located in the second filesystem",
- fs2.makeQualified(new Path(indexLocation)).toString(), location);
- }
- else {
- // Since warehouse path is non-qualified the index should be located on second filesystem
- assertEquals("Index should be located in the second filesystem",
- fs2.getUri().getScheme(), new URI(location).getScheme());
- }
- }
-
private void createDatabaseAndCheck(String databaseName, String databaseLocation) throws Exception {
executeQuery("CREATE DATABASE " + databaseName + buildLocationClause(databaseLocation));
Database database = db.getDatabase(databaseName);
@@ -274,41 +255,6 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
}
}
- public void testCreateTableWithIndexAndPartitionsNonDefaultNameNode() throws Exception {
- assertTrue("Test suite should be initialied", isInitialized );
- final String tableLocation = tmppathFs2 + "/" + Table1Name;
- final String table5Location = tmppathFs2 + "/" + Table5Name;
- final String indexLocation = tmppathFs2 + "/" + Index1Name;
- final String partition3Location = fs.makeQualified(new Path(tmppath + "/p3")).toString();
-
- // Create table with absolute non-qualified path
- Table table1 = createTableAndCheck(Table1Name, tableLocation);
-
- // Create table without location
- createTableAndCheck(Table2Name, null);
-
- // Add partition without location
- addPartitionAndCheck(table1, "p", "p1", null);
-
- // Add partition with absolute location
- addPartitionAndCheck(table1, "p", "p2", tableLocation + "/p2");
-
- // Add partition with qualified location in default fs
- addPartitionAndCheck(table1, "p", "p3", partition3Location);
-
- // Create index with absolute non-qualified path
- createIndexAndCheck(table1, Index1Name, indexLocation);
-
- // Create index with absolute non-qualified path
- createIndexAndCheck(table1, Index2Name, null);
-
- // Create table like Table1Name absolute non-qualified path
- createTableAndCheck(table1, Table5Name, table5Location);
-
- // Create table without location
- createTableAndCheck(table1, Table6Name, null);
- }
-
public void testAlterPartitionSetLocationNonDefaultNameNode() throws Exception {
assertTrue("Test suite should have been initialized", isInitialized);
String tableLocation = tmppathFs2 + "/" + "test_set_part_loc";
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index fcce531..6cd7a13 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -95,12 +95,10 @@ import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hive.druid.MiniDruidCluster;
-import org.apache.hive.testutils.HiveTestEnvSetup;
import org.apache.hadoop.hive.llap.LlapItUtils;
import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster;
import org.apache.hadoop.hive.llap.io.api.LlapProxy;
import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.Task;
@@ -966,14 +964,6 @@ public class QTestUtil {
continue;
}
db.dropTable(dbName, tblName, true, true, fsType == FsType.encrypted_hdfs);
- } else {
- // this table is defined in srcTables, drop all indexes on it
- List<Index> indexes = db.getIndexes(dbName, tblName, (short)-1);
- if (indexes != null && indexes.size() > 0) {
- for (Index index : indexes) {
- db.dropIndex(dbName, tblName, index.getIndexName(), true, true);
- }
- }
}
}
if (!DEFAULT_DATABASE_NAME.equals(dbName)) {
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 23b209e..d00e639 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -764,7 +764,6 @@ public class Driver implements IDriver {
case SHOWTABLES:
case SHOWCOLUMNS:
case SHOWFUNCTIONS:
- case SHOWINDEXES:
case SHOWPARTITIONS:
case SHOWLOCKS:
case SHOWVIEWS:
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 39a613c..883dcda 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -62,7 +62,6 @@ public enum ErrorMsg {
//========================== 10000 range starts here ========================//
INVALID_TABLE(10001, "Table not found", "42S02"),
INVALID_COLUMN(10002, "Invalid column reference"),
- INVALID_INDEX(10003, "Invalid index"),
INVALID_TABLE_OR_COLUMN(10004, "Invalid table alias or column reference"),
AMBIGUOUS_TABLE_OR_COLUMN(10005, "Ambiguous table alias or column reference"),
INVALID_PARTITION(10006, "Partition not found"),
@@ -326,7 +325,6 @@ public enum ErrorMsg {
TABLES_INCOMPATIBLE_SCHEMAS(10235, "Tables have incompatible schemas and their partitions " +
" cannot be exchanged."),
- TRUNCATE_COLUMN_INDEXED_TABLE(10236, "Can not truncate columns from table with indexes"),
TRUNCATE_COLUMN_NOT_RC(10237, "Only RCFileFormat supports column truncation."),
TRUNCATE_COLUMN_ARCHIVED(10238, "Column truncation cannot be performed on archived partitions."),
TRUNCATE_BUCKETED_COLUMN(10239,
@@ -426,7 +424,6 @@ public enum ErrorMsg {
"Grouping sets aggregations (with rollups or cubes) are not allowed when " +
"HIVEMULTIGROUPBYSINGLEREDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets"),
CANNOT_RETRIEVE_TABLE_METADATA(10316, "Error while retrieving table metadata"),
- CANNOT_DROP_INDEX(10317, "Error while dropping index"),
INVALID_AST_TREE(10318, "Internal error : Invalid AST"),
ERROR_SERIALIZE_METASTORE(10319, "Error while serializing the metastore objects"),
IO_ERROR(10320, "Error while performing IO operation "),
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 227f6ae..802349f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -51,7 +51,6 @@ import java.util.TreeSet;
import java.util.concurrent.ExecutionException;
import com.google.common.collect.ImmutableSet;
-import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -61,7 +60,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.common.ValidReadTxnList;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.conf.Constants;
@@ -76,7 +74,6 @@ import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.CompactionResponse;
@@ -85,7 +82,6 @@ import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -108,7 +104,6 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.TxnInfo;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
-import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
@@ -150,7 +145,6 @@ import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
import org.apache.hadoop.hive.ql.metadata.NotNullConstraint;
import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -172,7 +166,6 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc;
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
import org.apache.hadoop.hive.ql.plan.AlterMaterializedViewDesc;
import org.apache.hadoop.hive.ql.plan.AlterResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc;
@@ -184,7 +177,6 @@ import org.apache.hadoop.hive.ql.plan.AlterWMTriggerDesc;
import org.apache.hadoop.hive.ql.plan.CacheMetadataDesc;
import org.apache.hadoop.hive.ql.plan.ColStatistics;
import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMMappingDesc;
import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
@@ -198,7 +190,6 @@ import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc;
import org.apache.hadoop.hive.ql.plan.DescFunctionDesc;
import org.apache.hadoop.hive.ql.plan.DescTableDesc;
import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc;
-import org.apache.hadoop.hive.ql.plan.DropIndexDesc;
import org.apache.hadoop.hive.ql.plan.DropResourcePlanDesc;
import org.apache.hadoop.hive.ql.plan.DropTableDesc;
import org.apache.hadoop.hive.ql.plan.DropWMMappingDesc;
@@ -232,7 +223,6 @@ import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc;
import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
import org.apache.hadoop.hive.ql.plan.ShowGrantDesc;
-import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc;
import org.apache.hadoop.hive.ql.plan.ShowLocksDesc;
import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc;
import org.apache.hadoop.hive.ql.plan.ShowResourcePlanDesc;
@@ -394,21 +384,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
return createTable(db, crtTbl);
}
- CreateIndexDesc crtIndex = work.getCreateIndexDesc();
- if (crtIndex != null) {
- return createIndex(db, crtIndex);
- }
-
- AlterIndexDesc alterIndex = work.getAlterIndexDesc();
- if (alterIndex != null) {
- return alterIndex(db, alterIndex);
- }
-
- DropIndexDesc dropIdx = work.getDropIdxDesc();
- if (dropIdx != null) {
- return dropIndex(db, dropIdx);
- }
-
CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc();
if (crtTblLike != null) {
return createTableLike(db, crtTblLike);
@@ -589,11 +564,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
return grantOrRevokeRole(db, grantOrRevokeRoleDDL);
}
- ShowIndexesDesc showIndexes = work.getShowIndexesDesc();
- if (showIndexes != null) {
- return showIndexes(db, showIndexes);
- }
-
AlterTablePartMergeFilesDesc mergeFilesDesc = work.getMergeFilesDesc();
if (mergeFilesDesc != null) {
return mergeFiles(db, mergeFilesDesc, driverContext);
@@ -743,8 +713,12 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
if (!mustHaveAppliedChange && !desc.isReplace()) {
return 0; // The modification cannot affect an active plan.
}
- if (appliedRp == null && !mustHaveAppliedChange) return 0; // Replacing an inactive plan.
- if (wm == null && isInTest) return 0; // Skip for tests if WM is not present.
+ if (appliedRp == null && !mustHaveAppliedChange) {
+ return 0; // Replacing an inactive plan.
+ }
+ if (wm == null && isInTest) {
+ return 0; // Skip for tests if WM is not present.
+ }
if ((appliedRp == null) != desc.isForceDeactivate()) {
throw new HiveException("Cannot get a resource plan to apply; or non-null plan on disable");
@@ -1250,134 +1224,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
return 0;
}
- private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException {
-
- if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
- throw new UnsupportedOperationException("Indexes unsupported for Tez execution engine");
- }
-
- db.dropIndex(dropIdx.getTableName(), dropIdx.getIndexName(), dropIdx.isThrowException(), true);
- return 0;
- }
-
- private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException {
-
- if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
- throw new UnsupportedOperationException("Indexes unsupported for Tez execution engine");
- }
-
- if( crtIndex.getSerde() != null) {
- validateSerDe(crtIndex.getSerde());
- }
-
- String indexTableName = crtIndex.getIndexTableName();
- // If location is specified - ensure that it is a full qualified name
- makeLocationQualified(crtIndex, indexTableName);
-
- db
- .createIndex(
- crtIndex.getTableName(), crtIndex.getIndexName(), crtIndex.getIndexTypeHandlerClass(),
- crtIndex.getIndexedCols(), crtIndex.getIndexTableName(), crtIndex.getDeferredRebuild(),
- crtIndex.getInputFormat(), crtIndex.getOutputFormat(), crtIndex.getSerde(),
- crtIndex.getStorageHandler(), crtIndex.getLocation(), crtIndex.getIdxProps(), crtIndex.getTblProps(),
- crtIndex.getSerdeProps(), crtIndex.getCollItemDelim(), crtIndex.getFieldDelim(), crtIndex.getFieldEscape(),
- crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), crtIndex.getIndexComment()
- );
- if (HiveUtils.getIndexHandler(conf, crtIndex.getIndexTypeHandlerClass()).usesIndexTable()) {
- Table indexTable = db.getTable(indexTableName);
- addIfAbsentByName(new WriteEntity(indexTable, WriteEntity.WriteType.DDL_NO_LOCK));
- }
- return 0;
- }
-
- private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException {
-
- if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
- throw new UnsupportedOperationException("Indexes unsupported for Tez execution engine");
- }
-
- String baseTableName = alterIndex.getBaseTableName();
- String indexName = alterIndex.getIndexName();
- Index idx = db.getIndex(baseTableName, indexName);
-
- switch(alterIndex.getOp()) {
- case ADDPROPS:
- idx.getParameters().putAll(alterIndex.getProps());
- break;
- case UPDATETIMESTAMP:
- try {
- Map<String, String> props = new HashMap<String, String>();
- Map<Map<String, String>, Long> basePartTs = new HashMap<Map<String, String>, Long>();
-
- Table baseTbl = db.getTable(baseTableName);
-
- if (baseTbl.isPartitioned()) {
- List<Partition> baseParts;
- if (alterIndex.getSpec() != null) {
- baseParts = db.getPartitions(baseTbl, alterIndex.getSpec());
- } else {
- baseParts = db.getPartitions(baseTbl);
- }
- if (baseParts != null) {
- for (Partition p : baseParts) {
- Path dataLocation = p.getDataLocation();
- FileSystem fs = dataLocation.getFileSystem(db.getConf());
- FileStatus fss = fs.getFileStatus(dataLocation);
- long lastModificationTime = fss.getModificationTime();
-
- FileStatus[] parts = fs.listStatus(dataLocation, FileUtils.HIDDEN_FILES_PATH_FILTER);
- if (parts != null && parts.length > 0) {
- for (FileStatus status : parts) {
- if (status.getModificationTime() > lastModificationTime) {
- lastModificationTime = status.getModificationTime();
- }
- }
- }
- basePartTs.put(p.getSpec(), lastModificationTime);
- }
- }
- } else {
- FileSystem fs = baseTbl.getPath().getFileSystem(db.getConf());
- FileStatus fss = fs.getFileStatus(baseTbl.getPath());
- basePartTs.put(null, fss.getModificationTime());
- }
- for (Map<String, String> spec : basePartTs.keySet()) {
- if (spec != null) {
- props.put(spec.toString(), basePartTs.get(spec).toString());
- } else {
- props.put("base_timestamp", basePartTs.get(null).toString());
- }
- }
- idx.getParameters().putAll(props);
- } catch (HiveException e) {
- throw new HiveException("ERROR: Failed to update index timestamps");
- } catch (IOException e) {
- throw new HiveException("ERROR: Failed to look up timestamps on filesystem");
- }
-
- break;
- default:
- console.printError("Unsupported Alter command");
- return 1;
- }
-
- // set last modified by properties
- if (!updateModifiedParameters(idx.getParameters(), conf)) {
- return 1;
- }
-
- try {
- db.alterIndex(baseTableName, indexName, idx);
- } catch (InvalidOperationException e) {
- console.printError("Invalid alter operation: " + e.getMessage());
- LOG.info("alter index: ", e);
- return 1;
- } catch (HiveException e) {
- console.printError("Invalid alter operation: " + e.getMessage());
- return 1;
- }
- return 0;
- }
/**
* Alters a materialized view.
@@ -2782,57 +2628,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
return builder;
}
- /**
- * Write a list of indexes to a file.
- *
- * @param db
- * The database in question.
- * @param showIndexes
- * These are the indexes we're interested in.
- * @return Returns 0 when execution succeeds and above 0 if it fails.
- * @throws HiveException
- * Throws this exception if an unexpected error occurs.
- */
- private int showIndexes(Hive db, ShowIndexesDesc showIndexes) throws HiveException {
- // get the indexes for the table and populate the output
- String tableName = showIndexes.getTableName();
- Table tbl = null;
- List<Index> indexes = null;
-
- tbl = db.getTable(tableName);
-
- indexes = db.getIndexes(tbl.getDbName(), tbl.getTableName(), (short) -1);
-
- // In case the query is served by HiveServer2, don't pad it with spaces,
- // as HiveServer2 output is consumed by JDBC/ODBC clients.
- boolean isOutputPadded = !SessionState.get().isHiveServerQuery();
-
- // write the results in the file
- DataOutputStream outStream = getOutputStream(showIndexes.getResFile());
- try {
- if (showIndexes.isFormatted()) {
- // column headers
- outStream.write(MetaDataFormatUtils.getIndexColumnsHeader().getBytes(StandardCharsets.UTF_8));
- }
-
- for (Index index : indexes)
- {
- outStream.write(MetaDataFormatUtils.getIndexInformation(index, isOutputPadded).getBytes(StandardCharsets.UTF_8));
- }
- } catch (FileNotFoundException e) {
- LOG.info("show indexes: ", e);
- throw new HiveException(e.toString());
- } catch (IOException e) {
- LOG.info("show indexes: ", e);
- throw new HiveException(e.toString());
- } catch (Exception e) {
- throw new HiveException(e.toString());
- } finally {
- IOUtils.closeStream(outStream);
- }
-
- return 0;
- }
/**
* Write a list of the available databases to a file.
@@ -5294,37 +5089,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
}
/**
- * Make qualified location for an index .
- *
- * @param crtIndex
- * Create index descriptor.
- * @param name
- * Object name.
- */
- private void makeLocationQualified(CreateIndexDesc crtIndex, String name) throws HiveException
- {
- Path path = null;
- if (crtIndex.getLocation() == null) {
- // Location is not set, leave it as-is if index doesn't belong to default DB
- // Currently all indexes are created in current DB only
- if (Utilities.getDatabaseName(name).equalsIgnoreCase(Warehouse.DEFAULT_DATABASE_NAME)) {
- // Default database name path is always ignored, use METASTOREWAREHOUSE and object name
- // instead
- String warehouse = HiveConf.getVar(conf, ConfVars.METASTOREWAREHOUSE);
- String tableName = Utilities.getTableName(name);
- path = new Path(warehouse, tableName.toLowerCase());
- }
- }
- else {
- path = new Path(crtIndex.getLocation());
- }
-
- if (path != null) {
- crtIndex.setLocation(Utilities.getQualifiedPath(conf, path));
- }
- }
-
- /**
* Make qualified location for a database .
*
* @param database
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index f7801bb..32fc257 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -383,10 +383,6 @@ public final class FunctionRegistry {
system.registerGenericUDF("between", GenericUDFBetween.class);
system.registerGenericUDF("in_bloom_filter", GenericUDFInBloomFilter.class);
- system.registerGenericUDF("ewah_bitmap_and", GenericUDFEWAHBitmapAnd.class);
- system.registerGenericUDF("ewah_bitmap_or", GenericUDFEWAHBitmapOr.class);
- system.registerGenericUDF("ewah_bitmap_empty", GenericUDFEWAHBitmapEmpty.class);
-
// Utility UDFs
system.registerUDF("version", UDFVersion.class, false);
@@ -447,8 +443,6 @@ public final class FunctionRegistry {
system.registerGenericUDAF("ngrams", new GenericUDAFnGrams());
system.registerGenericUDAF("context_ngrams", new GenericUDAFContextNGrams());
- system.registerGenericUDAF("ewah_bitmap", new GenericUDAFEWAHBitmap());
-
system.registerGenericUDAF("compute_stats", new GenericUDAFComputeStats());
system.registerGenericUDAF("bloom_filter", new GenericUDAFBloomFilter());
system.registerUDAF("percentile", UDAFPercentile.class);
@@ -1661,7 +1655,9 @@ public final class FunctionRegistry {
public static boolean isPermanentFunction(ExprNodeGenericFuncDesc fnExpr) {
GenericUDF udf = fnExpr.getGenericUDF();
- if (udf == null) return false;
+ if (udf == null) {
+ return false;
+ }
Class<?> clazz = udf.getClass();
if (udf instanceof GenericUDFBridge) {
@@ -1787,7 +1783,9 @@ public final class FunctionRegistry {
*/
public static boolean isBuiltInFuncExpr(ExprNodeGenericFuncDesc fnExpr) {
GenericUDF udf = fnExpr.getGenericUDF();
- if (udf == null) return false;
+ if (udf == null) {
+ return false;
+ }
Class clazz = udf.getClass();
if (udf instanceof GenericUDFBridge) {
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
index 5d2c759..c2959d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java
@@ -197,33 +197,6 @@ public class OperatorUtils {
return lastOp;
}
- /**
- * Starting at the input operator, finds the last operator upstream that is
- * an instance of the input class.
- *
- * @param op the starting operator
- * @param clazz the class that the operator that we are looking for instantiates
- * @return null if no such operator exists or multiple branches are found in
- * the stream, the last operator otherwise
- */
- @SuppressWarnings("unchecked")
- public static <T> T findLastOperatorUpstream(Operator<?> op, Class<T> clazz) {
- Operator<?> currentOp = op;
- T lastOp = null;
- while (currentOp != null) {
- if (clazz.isInstance(currentOp)) {
- lastOp = (T) currentOp;
- }
- if (currentOp.getParentOperators().size() == 1) {
- currentOp = currentOp.getParentOperators().get(0);
- }
- else {
- currentOp = null;
- }
- }
- return lastOp;
- }
-
public static void iterateParents(Operator<?> operator, Function<Operator<?>> function) {
iterateParents(operator, function, new HashSet<Operator<?>>());
}
@@ -240,10 +213,6 @@ public class OperatorUtils {
}
}
- public static boolean sameRowSchema(Operator<?> operator1, Operator<?> operator2) {
- return operator1.getSchema().equals(operator2.getSchema());
- }
-
/**
* Given an operator and a set of classes, it classifies the operators it finds
* in the stream depending on the classes they instantiate.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
index 85cef86..83590e2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java
@@ -33,8 +33,6 @@ import org.apache.hadoop.hive.ql.exec.repl.bootstrap.ReplLoadTask;
import org.apache.hadoop.hive.ql.exec.repl.bootstrap.ReplLoadWork;
import org.apache.hadoop.hive.ql.exec.spark.SparkTask;
import org.apache.hadoop.hive.ql.exec.tez.TezTask;
-import org.apache.hadoop.hive.ql.index.IndexMetadataChangeTask;
-import org.apache.hadoop.hive.ql.index.IndexMetadataChangeWork;
import org.apache.hadoop.hive.ql.io.merge.MergeFileTask;
import org.apache.hadoop.hive.ql.io.merge.MergeFileWork;
import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
@@ -104,8 +102,6 @@ public final class TaskFactory {
DependencyCollectionTask.class));
taskvec.add(new TaskTuple<ImportCommitWork>(ImportCommitWork.class,
ImportCommitTask.class));
- taskvec.add(new TaskTuple<IndexMetadataChangeWork>(IndexMetadataChangeWork.class,
- IndexMetadataChangeTask.class));
taskvec.add(new TaskTuple<TezWork>(TezWork.class, TezTask.class));
taskvec.add(new TaskTuple<SparkWork>(SparkWork.class, SparkTask.class));
taskvec.add(new TaskTuple<>(ReplDumpWork.class, ReplDumpTask.class));
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 8f44c94..8248442 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -3567,11 +3567,6 @@ public final class Utilities {
if (mWork.getInputformat() != null) {
HiveConf.setVar(conf, var, mWork.getInputformat());
}
- if (mWork.getIndexIntermediateFile() != null) {
- conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, mWork.getIndexIntermediateFile());
- conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, mWork.getIndexIntermediateFile());
- }
-
// Intentionally overwrites anything the user may have put here
conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted());
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index b436e80..e7fe4a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -574,11 +574,6 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
if (mWork.getInputformat() != null) {
HiveConf.setVar(conf, ConfVars.HIVEINPUTFORMAT, mWork.getInputformat());
}
- if (mWork.getIndexIntermediateFile() != null) {
- conf.set(ConfVars.HIVE_INDEX_COMPACT_FILE.varname, mWork.getIndexIntermediateFile());
- conf.set(ConfVars.HIVE_INDEX_BLOCKFILTER_FILE.varname, mWork.getIndexIntermediateFile());
- }
-
// Intentionally overwrites anything the user may have put here
conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted());
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java
deleted file mode 100644
index 3424600..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/AbstractIndexHandler.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-
-/**
- * Abstract base class for index handlers. This is provided as insulation
- * so that as HiveIndexHandler evolves, default implementations of new
- * methods can be added here in order to avoid breaking existing
- * plugin implementations.
- */
-public abstract class AbstractIndexHandler implements HiveIndexHandler {
-
- public static String getColumnNames(List<FieldSchema> fieldSchemas) {
- StringBuilder sb = new StringBuilder();
- for (int i = 0; i < fieldSchemas.size(); i++) {
- if (i > 0) {
- sb.append(",");
- }
- sb.append(HiveUtils.unparseIdentifier(fieldSchemas.get(i).getName()));
- }
- return sb.toString();
- }
-
- public void generateIndexQuery(Index index, ExprNodeDesc predicate,
- ParseContext pctx, HiveIndexQueryContext queryContext) {
- queryContext.setQueryTasks(null);
- return;
- }
-
- public boolean checkQuerySize(long inputSize, HiveConf conf) {
- return false;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
deleted file mode 100644
index fb77096..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index;
-
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
-import org.apache.hadoop.hive.ql.plan.PartitionDesc;
-import org.apache.hadoop.hive.ql.session.LineageState;
-
-/**
- * Index handler for indexes that have aggregate functions on indexed columns.
- *
- */
-public class AggregateIndexHandler extends CompactIndexHandler {
-
- @Override
- public void analyzeIndexDefinition(Table baseTable, Index index,
- Table indexTable) throws HiveException {
- StorageDescriptor storageDesc = index.getSd();
- if (this.usesIndexTable() && indexTable != null) {
- StorageDescriptor indexTableSd = storageDesc.deepCopy();
- List<FieldSchema> indexTblCols = indexTableSd.getCols();
- FieldSchema bucketFileName = new FieldSchema("_bucketname", "string", "");
- indexTblCols.add(bucketFileName);
- FieldSchema offSets = new FieldSchema("_offsets", "array<bigint>", "");
- indexTblCols.add(offSets);
- Map<String, String> paraList = index.getParameters();
-
- if(paraList != null && paraList.containsKey("AGGREGATES")){
- String propValue = paraList.get("AGGREGATES");
- if(propValue.contains(",")){
- String[] aggFuncs = propValue.split(",");
- for (int i = 0; i < aggFuncs.length; i++) {
- createAggregationFunction(indexTblCols, aggFuncs[i]);
- }
- }else{
- createAggregationFunction(indexTblCols, propValue);
- }
- }
- indexTable.setSd(indexTableSd);
- }
- }
-
- private void createAggregationFunction(List<FieldSchema> indexTblCols, String property){
- String[] aggFuncCol = property.split("\\(");
- String funcName = aggFuncCol[0];
- String colName = aggFuncCol[1].substring(0, aggFuncCol[1].length() - 1);
- if(colName.contains("*")){
- colName = colName.replace("*", "all");
- }
- FieldSchema aggregationFunction =
- new FieldSchema("_" + funcName + "_of_" + colName + "", "bigint", "");
- indexTblCols.add(aggregationFunction);
- }
-
- @Override
- protected Task<?> getIndexBuilderMapRedTask(Set<ReadEntity> inputs,
- Set<WriteEntity> outputs,
- Index index, boolean partitioned,
- PartitionDesc indexTblPartDesc, String indexTableName,
- PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
- LineageState lineageState) {
-
- List<FieldSchema> indexField = index.getSd().getCols();
- String indexCols = HiveUtils.getUnparsedColumnNamesFromFieldSchema(indexField);
-
- //form a new insert overwrite query.
- StringBuilder command= new StringBuilder();
- Map<String, String> partSpec = indexTblPartDesc.getPartSpec();
-
- command.append("INSERT OVERWRITE TABLE " + HiveUtils.unparseIdentifier(indexTableName));
- if (partitioned && indexTblPartDesc != null) {
- command.append(" PARTITION ( ");
- List<String> ret = getPartKVPairStringArray((LinkedHashMap<String, String>) partSpec);
- for (int i = 0; i < ret.size(); i++) {
- String partKV = ret.get(i);
- command.append(partKV);
- if (i < ret.size() - 1) {
- command.append(",");
- }
- }
- command.append(" ) ");
- }
-
- command.append(" SELECT ");
- command.append(indexCols);
- command.append(",");
-
- command.append(VirtualColumn.FILENAME.getName());
- command.append(",");
- command.append(" collect_set (");
- command.append(VirtualColumn.BLOCKOFFSET.getName());
- command.append(") ");
- command.append(",");
-
- assert indexField.size()==1;
-
- Map<String, String> paraList = index.getParameters();
- if(paraList != null && paraList.containsKey("AGGREGATES")){
- command.append(paraList.get("AGGREGATES") + " ");
- }
-
- command.append(" FROM " + HiveUtils.unparseIdentifier(baseTableName));
- Map<String, String> basePartSpec = baseTablePartDesc.getPartSpec();
- if(basePartSpec != null) {
- command.append(" WHERE ");
- List<String> pkv = getPartKVPairStringArray((LinkedHashMap<String, String>) basePartSpec);
- for (int i = 0; i < pkv.size(); i++) {
- String partKV = pkv.get(i);
- command.append(partKV);
- if (i < pkv.size() - 1) {
- command.append(" AND ");
- }
- }
- }
- command.append(" GROUP BY ");
- command.append(indexCols + ", " + VirtualColumn.FILENAME.getName());
-
- HiveConf builderConf = new HiveConf(getConf(), AggregateIndexHandler.class);
- builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPFILES, false);
- builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPREDFILES, false);
- builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false);
- Task<?> rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs,
- command, (LinkedHashMap<String, String>) partSpec, indexTableName, dbName, lineageState);
- return rootTask;
- }
- }
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java
deleted file mode 100644
index 30ae484..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import org.apache.hadoop.hive.ql.index.bitmap.BitmapIndexHandler;
-import org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Holds index related constants
- */
-public class HiveIndex {
- public static final Logger l4j = LoggerFactory.getLogger("HiveIndex");
- public static final String INDEX_TABLE_CREATETIME = "hive.index.basetbl.dfs.lastModifiedTime";
-
- public static enum IndexType {
- AGGREGATE_TABLE("aggregate", AggregateIndexHandler.class.getName()),
- COMPACT_SUMMARY_TABLE("compact", CompactIndexHandler.class.getName()),
- BITMAP_TABLE("bitmap", BitmapIndexHandler.class.getName());
-
- private IndexType(String indexType, String className) {
- indexTypeName = indexType;
- this.handlerClsName = className;
- }
-
- private final String indexTypeName;
- private final String handlerClsName;
-
- public String getName() {
- return indexTypeName;
- }
-
- public String getHandlerClsName() {
- return handlerClsName;
- }
- }
-
- public static IndexType getIndexType(String name) {
- IndexType[] types = IndexType.values();
- for (IndexType type : types) {
- if(type.getName().equals(name.toLowerCase())) {
- return type;
- }
- }
- return null;
- }
-
- public static IndexType getIndexTypeByClassName(String className) {
- IndexType[] types = IndexType.values();
- for (IndexType type : types) {
- if(type.getHandlerClsName().equals(className)) {
- return type;
- }
- }
- return null;
- }
-
-}
-
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
deleted file mode 100644
index 8facd91..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.index;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.session.LineageState;
-
-/**
- * HiveIndexHandler defines a pluggable interface for adding new index handlers
- * to Hive.
- */
-public interface HiveIndexHandler extends Configurable {
- /**
- * Determines whether this handler implements indexes by creating an index
- * table.
- *
- * @return true if index creation implies creation of an index table in Hive;
- * false if the index representation is not stored in a Hive table
- */
- boolean usesIndexTable();
-
- /**
- * Requests that the handler validate an index definition and fill in
- * additional information about its stored representation.
- *
- * @param baseTable
- * the definition of the table being indexed
- *
- * @param index
- * the definition of the index being created
- *
- * @param indexTable
- * a partial definition of the index table to be used for storing the
- * index representation, or null if usesIndexTable() returns false;
- * the handler can augment the index's storage descriptor (e.g. with
- * information about input/output format) and/or the index table's
- * definition (typically with additional columns containing the index
- * representation, e.g. pointers into HDFS).
- *
- * @throws HiveException if the index definition is invalid with respect to
- * either the base table or the supplied index table definition
- */
- void analyzeIndexDefinition(
- org.apache.hadoop.hive.metastore.api.Table baseTable,
- org.apache.hadoop.hive.metastore.api.Index index,
- org.apache.hadoop.hive.metastore.api.Table indexTable)
- throws HiveException;
-
- /**
- * Requests that the handler generate a plan for building the index; the plan
- * should read the base table and write out the index representation.
- *
- * @param baseTbl
- * the definition of the table being indexed
- *
- * @param index
- * the definition of the index
- *
- * @param baseTblPartitions
- * list of base table partitions with each element mirrors to the
- * corresponding one in indexTblPartitions
- *
- * @param indexTbl
- * the definition of the index table, or null if usesIndexTable()
- * returns null
- *
- * @param inputs
- * inputs for hooks, supplemental outputs going
- * along with the return value
- *
- * @param outputs
- * outputs for hooks, supplemental outputs going
- * along with the return value
- *
- * @param lineageState
- * tracks Lineage for the query
- *
- * @return list of tasks to be executed in parallel for building the index
- *
- * @throws HiveException if plan generation fails
- */
- List<Task<?>> generateIndexBuildTaskList(
- org.apache.hadoop.hive.ql.metadata.Table baseTbl,
- org.apache.hadoop.hive.metastore.api.Index index,
- List<Partition> indexTblPartitions, List<Partition> baseTblPartitions,
- org.apache.hadoop.hive.ql.metadata.Table indexTbl,
- Set<ReadEntity> inputs, Set<WriteEntity> outputs, LineageState lineageState)
- throws HiveException;
-
- /**
- * Generate the list of tasks required to run an index optimized sub-query for the
- * given predicate, using the given indexes. If multiple indexes are
- * provided, it is up to the handler whether to use none, one, some or all of
- * them. The supplied predicate may reference any of the columns from any of
- * the indexes. If the handler decides to use more than one index, it is
- * responsible for generating tasks to combine their search results
- * (e.g. performing a JOIN on the result).
- * @param indexes
- * @param predicate
- * @param pctx
- * @param queryContext contains results, such as query tasks and input configuration
- */
- void generateIndexQuery(List<Index> indexes, ExprNodeDesc predicate,
- ParseContext pctx, HiveIndexQueryContext queryContext);
-
- /**
- * Check the size of an input query to make sure it fits within the bounds
- *
- * @param inputSize size (in bytes) of the query in question
- * @param conf
- * @return true if query is within the bounds
- */
- boolean checkQuerySize(long inputSize, HiveConf conf);
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java
deleted file mode 100644
index b736541..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexQueryContext.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import java.io.Serializable;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-
-/**
- * Used to pass information between the IndexProcessor and the plugin
- * IndexHandler during query processing
- *
- */
-public class HiveIndexQueryContext {
-
- private Set<ReadEntity> additionalSemanticInputs; // additional inputs to add to the parse context when
- // merging the index query tasks
- private String indexInputFormat; // input format to set on the TableScanOperator to activate indexing
- private String indexIntermediateFile; // name of intermediate file written by the index query for the
- // TableScanOperator to use
- private List<Task<? extends Serializable>> queryTasks; // list of tasks that will execute the index query and write
- // results to a temporary file
- private ExprNodeDesc residualPredicate; // predicate that could not be processed by an index handler
- // and should be used on the base table scan (see HIVE-2115)
- private Set<Partition> queryPartitions; // partitions accessed by the original query
-
- public HiveIndexQueryContext() {
- this.additionalSemanticInputs = null;
- this.indexInputFormat = null;
- this.indexIntermediateFile = null;
- this.queryTasks = null;
- }
-
- public Set<ReadEntity> getAdditionalSemanticInputs() {
- return additionalSemanticInputs;
- }
- public void addAdditionalSemanticInputs(Set<ReadEntity> additionalParseInputs) {
- if (this.additionalSemanticInputs == null) {
- this.additionalSemanticInputs = new LinkedHashSet<ReadEntity>();
- }
- this.additionalSemanticInputs.addAll(additionalParseInputs);
- }
-
- public String getIndexInputFormat() {
- return indexInputFormat;
- }
- public void setIndexInputFormat(String indexInputFormat) {
- this.indexInputFormat = indexInputFormat;
- }
-
- public String getIndexIntermediateFile() {
- return indexIntermediateFile;
- }
- public void setIndexIntermediateFile(String indexIntermediateFile) {
- this.indexIntermediateFile = indexIntermediateFile;
- }
-
- public List<Task<? extends Serializable>> getQueryTasks() {
- return queryTasks;
- }
- public void setQueryTasks(List<Task<? extends Serializable>> indexQueryTasks) {
- this.queryTasks = indexQueryTasks;
- }
-
- public void setResidualPredicate(ExprNodeDesc residualPredicate) {
- this.residualPredicate = residualPredicate;
- }
-
- public ExprNodeDesc getResidualPredicate() {
- return residualPredicate;
- }
-
- public Set<Partition> getQueryPartitions() {
- return queryPartitions;
- }
-
- public void setQueryPartitions(Set<Partition> queryPartitions) {
- this.queryPartitions = queryPartitions;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
deleted file mode 100644
index 6697066..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.index;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable;
-import org.apache.hadoop.hive.serde2.lazy.LazySerDeParameters;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.LineRecordReader.LineReader;
-
-/**
- * HiveIndexResult parses the input stream from an index query
- * to generate a list of file splits to query.
- */
-public class HiveIndexResult implements IndexResult {
-
- public static final Logger l4j =
- LoggerFactory.getLogger(HiveIndexResult.class.getSimpleName());
-
- // IndexBucket
- static class IBucket {
- private String name = null;
- private final SortedSet<Long> offsets = new TreeSet<Long>();
-
- public IBucket(String n) {
- name = n;
- }
-
- public void add(Long offset) {
- offsets.add(offset);
- }
-
- public String getName() {
- return name;
- }
-
- public SortedSet<Long> getOffsets() {
- return offsets;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (obj.getClass() != this.getClass()) {
- return false;
- }
- return (((IBucket) obj).name.compareToIgnoreCase(this.name) == 0);
- }
- }
-
- JobConf job = null;
- BytesRefWritable[] bytesRef = new BytesRefWritable[2];
- boolean ignoreHdfsLoc = false;
-
- public HiveIndexResult(List<String> indexFiles, JobConf conf) throws IOException,
- HiveException {
- job = conf;
-
- bytesRef[0] = new BytesRefWritable();
- bytesRef[1] = new BytesRefWritable();
- ignoreHdfsLoc = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_INDEX_IGNORE_HDFS_LOC);
-
- if (indexFiles != null && indexFiles.size() > 0) {
- List<Path> paths = new ArrayList<Path>();
- for (String indexFile : indexFiles) {
- Path indexFilePath = new Path(indexFile);
- FileSystem fs = indexFilePath.getFileSystem(conf);
- FileStatus indexStat = fs.getFileStatus(indexFilePath);
- if (indexStat.isDir()) {
- FileStatus[] fss = fs.listStatus(indexFilePath, FileUtils.HIDDEN_FILES_PATH_FILTER);
- for (FileStatus f : fss) {
- paths.add(f.getPath());
- }
- } else {
- paths.add(indexFilePath);
- }
- }
-
- long maxEntriesToLoad = HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES);
- if (maxEntriesToLoad < 0) {
- maxEntriesToLoad=Long.MAX_VALUE;
- }
-
- long lineCounter = 0;
- for (Path indexFinalPath : paths) {
- FileSystem fs = indexFinalPath.getFileSystem(conf);
- FSDataInputStream ifile = fs.open(indexFinalPath);
- LineReader lr = new LineReader(ifile, conf);
- try {
- Text line = new Text();
- while (lr.readLine(line) > 0) {
- if (++lineCounter > maxEntriesToLoad) {
- throw new HiveException("Number of compact index entries loaded during the query exceeded the maximum of " + maxEntriesToLoad
- + " set in " + HiveConf.ConfVars.HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES.varname);
- }
- add(line);
- }
- }
- finally {
- // this will close the input stream
- lr.close();
- }
- }
- }
- }
-
- Map<String, IBucket> buckets = new HashMap<String, IBucket>();
-
- private void add(Text line) throws HiveException {
- String l = line.toString();
- byte[] bytes = l.getBytes();
- int firstEnd = 0;
- int i = 0;
- for (int index = 0; index < bytes.length; index++) {
- if (bytes[index] == LazySerDeParameters.DefaultSeparators[0]) {
- i++;
- firstEnd = index;
- }
- }
- if (i > 1) {
- throw new HiveException(
- "Bad index file row (index file should only contain two columns: bucket_file_name and offset lists.) ."
- + line.toString());
- }
- String bucketFileName = new String(bytes, 0, firstEnd);
-
- if (ignoreHdfsLoc) {
- Path tmpPath = new Path(bucketFileName);
- bucketFileName = tmpPath.toUri().getPath();
- }
- IBucket bucket = buckets.get(bucketFileName);
- if (bucket == null) {
- bucket = new IBucket(bucketFileName);
- buckets.put(bucketFileName, bucket);
- }
-
- int currentStart = firstEnd + 1;
- int currentEnd = firstEnd + 1;
- for (; currentEnd < bytes.length; currentEnd++) {
- if (bytes[currentEnd] == LazySerDeParameters.DefaultSeparators[1]) {
- String one_offset = new String(bytes, currentStart, currentEnd
- - currentStart);
- Long offset = Long.parseLong(one_offset);
- bucket.getOffsets().add(offset);
- currentStart = currentEnd + 1;
- }
- }
- String one_offset = new String(bytes, currentStart, currentEnd
- - currentStart);
- bucket.getOffsets().add(Long.parseLong(one_offset));
- }
-
- @Override
- public boolean contains(FileSplit split) throws HiveException {
-
- if (buckets == null) {
- return false;
- }
- String bucketName = split.getPath().toString();
- IBucket bucket = buckets.get(bucketName);
- if (bucket == null) {
- bucketName = split.getPath().toUri().getPath();
- bucket = buckets.get(bucketName);
- if (bucket == null) {
- return false;
- }
- }
-
- for (Long offset : bucket.getOffsets()) {
- if ((offset >= split.getStart())
- && (offset <= split.getStart() + split.getLength())) {
- return true;
- }
- }
- return false;
- }
-}
[06/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_bitmap3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap3.q.out b/ql/src/test/results/clientpositive/index_bitmap3.q.out
deleted file mode 100644
index 87c9c36..0000000
--- a/ql/src/test/results/clientpositive/index_bitmap3.q.out
+++ /dev/null
@@ -1,1262 +0,0 @@
-PREHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-PREHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src2_index__
-PREHOOK: query: ALTER INDEX src1_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src1_index__
-POSTHOOK: query: ALTER INDEX src1_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-POSTHOOK: Lineage: default__src_src1_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: ALTER INDEX src2_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src2_index__
-POSTHOOK: query: ALTER INDEX src2_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src2_index__
-POSTHOOK: Lineage: default__src_src2_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: SELECT * FROM default__src_src1_index__
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src1_index__
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT * FROM default__src_src1_index__
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src1_index__
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0 hdfs://### HDFS PATH ### 2088 [1,2,4,8589934592,1,0]
-0 hdfs://### HDFS PATH ### 2632 [1,2,4,8589934592,1,0]
-0 hdfs://### HDFS PATH ### 968 [1,2,4,8589934592,1,0]
-10 hdfs://### HDFS PATH ### 2846 [1,2,4,8589934592,1,0]
-100 hdfs://### HDFS PATH ### 2156 [1,2,4,8589934592,1,0]
-100 hdfs://### HDFS PATH ### 5374 [1,2,4,8589934592,1,0]
-103 hdfs://### HDFS PATH ### 1484 [1,2,4,8589934592,1,0]
-103 hdfs://### HDFS PATH ### 3614 [1,2,4,8589934592,1,0]
-104 hdfs://### HDFS PATH ### 4114 [1,2,4,8589934592,1,0]
-104 hdfs://### HDFS PATH ### 4628 [1,2,4,8589934592,1,0]
-105 hdfs://### HDFS PATH ### 4196 [1,2,4,8589934592,1,0]
-11 hdfs://### HDFS PATH ### 3170 [1,2,4,8589934592,1,0]
-111 hdfs://### HDFS PATH ### 1186 [1,2,4,8589934592,1,0]
-113 hdfs://### HDFS PATH ### 3638 [1,2,4,8589934592,1,0]
-113 hdfs://### HDFS PATH ### 920 [1,2,4,8589934592,1,0]
-114 hdfs://### HDFS PATH ### 4280 [1,2,4,8589934592,1,0]
-116 hdfs://### HDFS PATH ### 3746 [1,2,4,8589934592,1,0]
-118 hdfs://### HDFS PATH ### 2686 [1,2,4,8589934592,1,0]
-118 hdfs://### HDFS PATH ### 2780 [1,2,4,8589934592,1,0]
-119 hdfs://### HDFS PATH ### 2064 [1,2,4,8589934592,1,0]
-119 hdfs://### HDFS PATH ### 3332 [1,2,4,8589934592,1,0]
-119 hdfs://### HDFS PATH ### 4674 [1,2,4,8589934592,1,0]
-12 hdfs://### HDFS PATH ### 1720 [1,2,4,8589934592,1,0]
-12 hdfs://### HDFS PATH ### 4362 [1,2,4,8589934592,1,0]
-120 hdfs://### HDFS PATH ### 2284 [1,2,4,8589934592,1,0]
-120 hdfs://### HDFS PATH ### 4830 [1,2,4,8589934592,1,0]
-125 hdfs://### HDFS PATH ### 1344 [1,2,4,8589934592,1,0]
-125 hdfs://### HDFS PATH ### 4468 [1,2,4,8589934592,1,0]
-126 hdfs://### HDFS PATH ### 5732 [1,2,4,8589934592,1,0]
-128 hdfs://### HDFS PATH ### 208 [1,2,4,8589934592,1,0]
-128 hdfs://### HDFS PATH ### 3896 [1,2,4,8589934592,1,0]
-128 hdfs://### HDFS PATH ### 988 [1,2,4,8589934592,1,0]
-129 hdfs://### HDFS PATH ### 1094 [1,2,4,8589934592,1,0]
-129 hdfs://### HDFS PATH ### 2040 [1,2,4,8589934592,1,0]
-131 hdfs://### HDFS PATH ### 2296 [1,2,4,8589934592,1,0]
-133 hdfs://### HDFS PATH ### 5164 [1,2,4,8589934592,1,0]
-134 hdfs://### HDFS PATH ### 2698 [1,2,4,8589934592,1,0]
-134 hdfs://### HDFS PATH ### 5294 [1,2,4,8589934592,1,0]
-136 hdfs://### HDFS PATH ### 5080 [1,2,4,8589934592,1,0]
-137 hdfs://### HDFS PATH ### 1650 [1,2,4,8589934592,1,0]
-137 hdfs://### HDFS PATH ### 2552 [1,2,4,8589934592,1,0]
-138 hdfs://### HDFS PATH ### 1472 [1,2,4,8589934592,1,0]
-138 hdfs://### HDFS PATH ### 1848 [1,2,4,8589934592,1,0]
-138 hdfs://### HDFS PATH ### 2734 [1,2,4,8589934592,1,0]
-138 hdfs://### HDFS PATH ### 3470 [1,2,4,8589934592,1,0]
-143 hdfs://### HDFS PATH ### 3226 [1,2,4,8589934592,1,0]
-145 hdfs://### HDFS PATH ### 304 [1,2,4,8589934592,1,0]
-146 hdfs://### HDFS PATH ### 232 [1,2,4,8589934592,1,0]
-146 hdfs://### HDFS PATH ### 5430 [1,2,4,8589934592,1,0]
-149 hdfs://### HDFS PATH ### 1058 [1,2,4,8589934592,1,0]
-149 hdfs://### HDFS PATH ### 3422 [1,2,4,8589934592,1,0]
-15 hdfs://### HDFS PATH ### 2770 [1,2,4,8589934592,1,0]
-15 hdfs://### HDFS PATH ### 386 [1,2,4,8589934592,1,0]
-150 hdfs://### HDFS PATH ### 150 [1,2,4,8589934592,1,0]
-152 hdfs://### HDFS PATH ### 280 [1,2,4,8589934592,1,0]
-152 hdfs://### HDFS PATH ### 5648 [1,2,4,8589934592,1,0]
-153 hdfs://### HDFS PATH ### 502 [1,2,4,8589934592,1,0]
-155 hdfs://### HDFS PATH ### 932 [1,2,4,8589934592,1,0]
-156 hdfs://### HDFS PATH ### 2352 [1,2,4,8589934592,1,0]
-157 hdfs://### HDFS PATH ### 1140 [1,2,4,8589934592,1,0]
-158 hdfs://### HDFS PATH ### 2052 [1,2,4,8589934592,1,0]
-160 hdfs://### HDFS PATH ### 3274 [1,2,4,8589934592,1,0]
-162 hdfs://### HDFS PATH ### 754 [1,2,4,8589934592,1,0]
-163 hdfs://### HDFS PATH ### 4650 [1,2,4,8589934592,1,0]
-164 hdfs://### HDFS PATH ### 4408 [1,2,4,8589934592,1,0]
-164 hdfs://### HDFS PATH ### 4492 [1,2,4,8589934592,1,0]
-165 hdfs://### HDFS PATH ### 2236 [1,2,4,8589934592,1,0]
-165 hdfs://### HDFS PATH ### 44 [1,2,4,8589934592,1,0]
-166 hdfs://### HDFS PATH ### 418 [1,2,4,8589934592,1,0]
-167 hdfs://### HDFS PATH ### 3686 [1,2,4,8589934592,1,0]
-167 hdfs://### HDFS PATH ### 5502 [1,2,4,8589934592,1,0]
-167 hdfs://### HDFS PATH ### 874 [1,2,4,8589934592,1,0]
-168 hdfs://### HDFS PATH ### 3180 [1,2,4,8589934592,1,0]
-169 hdfs://### HDFS PATH ### 1308 [1,2,4,8589934592,1,0]
-169 hdfs://### HDFS PATH ### 2588 [1,2,4,8589934592,1,0]
-169 hdfs://### HDFS PATH ### 4854 [1,2,4,8589934592,1,0]
-169 hdfs://### HDFS PATH ### 5754 [1,2,4,8589934592,1,0]
-17 hdfs://### HDFS PATH ### 910 [1,2,4,8589934592,1,0]
-170 hdfs://### HDFS PATH ### 1106 [1,2,4,8589934592,1,0]
-172 hdfs://### HDFS PATH ### 2018 [1,2,4,8589934592,1,0]
-172 hdfs://### HDFS PATH ### 5104 [1,2,4,8589934592,1,0]
-174 hdfs://### HDFS PATH ### 598 [1,2,4,8589934592,1,0]
-174 hdfs://### HDFS PATH ### 682 [1,2,4,8589934592,1,0]
-175 hdfs://### HDFS PATH ### 4150 [1,2,4,8589934592,1,0]
-175 hdfs://### HDFS PATH ### 5176 [1,2,4,8589934592,1,0]
-176 hdfs://### HDFS PATH ### 1428 [1,2,4,8589934592,1,0]
-176 hdfs://### HDFS PATH ### 1556 [1,2,4,8589934592,1,0]
-177 hdfs://### HDFS PATH ### 3036 [1,2,4,8589934592,1,0]
-178 hdfs://### HDFS PATH ### 4938 [1,2,4,8589934592,1,0]
-179 hdfs://### HDFS PATH ### 2006 [1,2,4,8589934592,1,0]
-179 hdfs://### HDFS PATH ### 2674 [1,2,4,8589934592,1,0]
-18 hdfs://### HDFS PATH ### 5340 [1,2,4,8589934592,1,0]
-18 hdfs://### HDFS PATH ### 5514 [1,2,4,8589934592,1,0]
-180 hdfs://### HDFS PATH ### 1696 [1,2,4,8589934592,1,0]
-181 hdfs://### HDFS PATH ### 1742 [1,2,4,8589934592,1,0]
-183 hdfs://### HDFS PATH ### 5536 [1,2,4,8589934592,1,0]
-186 hdfs://### HDFS PATH ### 5466 [1,2,4,8589934592,1,0]
-187 hdfs://### HDFS PATH ### 1416 [1,2,4,8589934592,1,0]
-187 hdfs://### HDFS PATH ### 2492 [1,2,4,8589934592,1,0]
-187 hdfs://### HDFS PATH ### 4516 [1,2,4,8589934592,1,0]
-189 hdfs://### HDFS PATH ### 5188 [1,2,4,8589934592,1,0]
-19 hdfs://### HDFS PATH ### 2824 [1,2,4,8589934592,1,0]
-190 hdfs://### HDFS PATH ### 4244 [1,2,4,8589934592,1,0]
-191 hdfs://### HDFS PATH ### 2192 [1,2,4,8589934592,1,0]
-191 hdfs://### HDFS PATH ### 3852 [1,2,4,8589934592,1,0]
-192 hdfs://### HDFS PATH ### 1392 [1,2,4,8589934592,1,0]
-193 hdfs://### HDFS PATH ### 126 [1,2,4,8589934592,1,0]
-193 hdfs://### HDFS PATH ### 4078 [1,2,4,8589934592,1,0]
-193 hdfs://### HDFS PATH ### 514 [1,2,4,8589934592,1,0]
-194 hdfs://### HDFS PATH ### 5684 [1,2,4,8589934592,1,0]
-195 hdfs://### HDFS PATH ### 3286 [1,2,4,8589934592,1,0]
-195 hdfs://### HDFS PATH ### 886 [1,2,4,8589934592,1,0]
-196 hdfs://### HDFS PATH ### 2410 [1,2,4,8589934592,1,0]
-197 hdfs://### HDFS PATH ### 2108 [1,2,4,8589934592,1,0]
-197 hdfs://### HDFS PATH ### 2480 [1,2,4,8589934592,1,0]
-199 hdfs://### HDFS PATH ### 2180 [1,2,4,8589934592,1,0]
-199 hdfs://### HDFS PATH ### 4806 [1,2,4,8589934592,1,0]
-199 hdfs://### HDFS PATH ### 646 [1,2,4,8589934592,1,0]
-2 hdfs://### HDFS PATH ### 4004 [1,2,4,8589934592,1,0]
-20 hdfs://### HDFS PATH ### 1118 [1,2,4,8589934592,1,0]
-200 hdfs://### HDFS PATH ### 4698 [1,2,4,8589934592,1,0]
-200 hdfs://### HDFS PATH ### 5790 [1,2,4,8589934592,1,0]
-201 hdfs://### HDFS PATH ### 4384 [1,2,4,8589934592,1,0]
-202 hdfs://### HDFS PATH ### 3932 [1,2,4,8589934592,1,0]
-203 hdfs://### HDFS PATH ### 4314 [1,2,4,8589934592,1,0]
-203 hdfs://### HDFS PATH ### 944 [1,2,4,8589934592,1,0]
-205 hdfs://### HDFS PATH ### 1046 [1,2,4,8589934592,1,0]
-205 hdfs://### HDFS PATH ### 2272 [1,2,4,8589934592,1,0]
-207 hdfs://### HDFS PATH ### 5022 [1,2,4,8589934592,1,0]
-207 hdfs://### HDFS PATH ### 634 [1,2,4,8589934592,1,0]
-208 hdfs://### HDFS PATH ### 1272 [1,2,4,8589934592,1,0]
-208 hdfs://### HDFS PATH ### 1948 [1,2,4,8589934592,1,0]
-208 hdfs://### HDFS PATH ### 670 [1,2,4,8589934592,1,0]
-209 hdfs://### HDFS PATH ### 3504 [1,2,4,8589934592,1,0]
-209 hdfs://### HDFS PATH ### 374 [1,2,4,8589934592,1,0]
-213 hdfs://### HDFS PATH ### 1508 [1,2,4,8589934592,1,0]
-213 hdfs://### HDFS PATH ### 220 [1,2,4,8589934592,1,0]
-214 hdfs://### HDFS PATH ### 5116 [1,2,4,8589934592,1,0]
-216 hdfs://### HDFS PATH ### 1520 [1,2,4,8589934592,1,0]
-216 hdfs://### HDFS PATH ### 3650 [1,2,4,8589934592,1,0]
-217 hdfs://### HDFS PATH ### 1860 [1,2,4,8589934592,1,0]
-217 hdfs://### HDFS PATH ### 4396 [1,2,4,8589934592,1,0]
-218 hdfs://### HDFS PATH ### 3446 [1,2,4,8589934592,1,0]
-219 hdfs://### HDFS PATH ### 3710 [1,2,4,8589934592,1,0]
-219 hdfs://### HDFS PATH ### 478 [1,2,4,8589934592,1,0]
-221 hdfs://### HDFS PATH ### 1164 [1,2,4,8589934592,1,0]
-221 hdfs://### HDFS PATH ### 1580 [1,2,4,8589934592,1,0]
-222 hdfs://### HDFS PATH ### 5720 [1,2,4,8589934592,1,0]
-223 hdfs://### HDFS PATH ### 3398 [1,2,4,8589934592,1,0]
-223 hdfs://### HDFS PATH ### 3758 [1,2,4,8589934592,1,0]
-224 hdfs://### HDFS PATH ### 174 [1,2,4,8589934592,1,0]
-224 hdfs://### HDFS PATH ### 2892 [1,2,4,8589934592,1,0]
-226 hdfs://### HDFS PATH ### 3048 [1,2,4,8589934592,1,0]
-228 hdfs://### HDFS PATH ### 3458 [1,2,4,8589934592,1,0]
-229 hdfs://### HDFS PATH ### 3202 [1,2,4,8589934592,1,0]
-229 hdfs://### HDFS PATH ### 3956 [1,2,4,8589934592,1,0]
-230 hdfs://### HDFS PATH ### 1730 [1,2,4,8589934592,1,0]
-230 hdfs://### HDFS PATH ### 1936 [1,2,4,8589934592,1,0]
-230 hdfs://### HDFS PATH ### 2260 [1,2,4,8589934592,1,0]
-230 hdfs://### HDFS PATH ### 3580 [1,2,4,8589934592,1,0]
-230 hdfs://### HDFS PATH ### 4914 [1,2,4,8589934592,1,0]
-233 hdfs://### HDFS PATH ### 3214 [1,2,4,8589934592,1,0]
-233 hdfs://### HDFS PATH ### 5140 [1,2,4,8589934592,1,0]
-235 hdfs://### HDFS PATH ### 4046 [1,2,4,8589934592,1,0]
-237 hdfs://### HDFS PATH ### 4722 [1,2,4,8589934592,1,0]
-237 hdfs://### HDFS PATH ### 574 [1,2,4,8589934592,1,0]
-238 hdfs://### HDFS PATH ### 0 [1,2,4,8589934592,1,0]
-238 hdfs://### HDFS PATH ### 2746 [1,2,4,8589934592,1,0]
-239 hdfs://### HDFS PATH ### 1496 [1,2,4,8589934592,1,0]
-239 hdfs://### HDFS PATH ### 3722 [1,2,4,8589934592,1,0]
-24 hdfs://### HDFS PATH ### 1972 [1,2,4,8589934592,1,0]
-24 hdfs://### HDFS PATH ### 4594 [1,2,4,8589934592,1,0]
-241 hdfs://### HDFS PATH ### 1662 [1,2,4,8589934592,1,0]
-242 hdfs://### HDFS PATH ### 2940 [1,2,4,8589934592,1,0]
-242 hdfs://### HDFS PATH ### 3012 [1,2,4,8589934592,1,0]
-244 hdfs://### HDFS PATH ### 3872 [1,2,4,8589934592,1,0]
-247 hdfs://### HDFS PATH ### 718 [1,2,4,8589934592,1,0]
-248 hdfs://### HDFS PATH ### 4758 [1,2,4,8589934592,1,0]
-249 hdfs://### HDFS PATH ### 5034 [1,2,4,8589934592,1,0]
-252 hdfs://### HDFS PATH ### 454 [1,2,4,8589934592,1,0]
-255 hdfs://### HDFS PATH ### 4616 [1,2,4,8589934592,1,0]
-255 hdfs://### HDFS PATH ### 68 [1,2,4,8589934592,1,0]
-256 hdfs://### HDFS PATH ### 3770 [1,2,4,8589934592,1,0]
-256 hdfs://### HDFS PATH ### 5272 [1,2,4,8589934592,1,0]
-257 hdfs://### HDFS PATH ### 4208 [1,2,4,8589934592,1,0]
-258 hdfs://### HDFS PATH ### 4292 [1,2,4,8589934592,1,0]
-26 hdfs://### HDFS PATH ### 2226 [1,2,4,8589934592,1,0]
-26 hdfs://### HDFS PATH ### 5284 [1,2,4,8589934592,1,0]
-260 hdfs://### HDFS PATH ### 1764 [1,2,4,8589934592,1,0]
-262 hdfs://### HDFS PATH ### 4326 [1,2,4,8589934592,1,0]
-263 hdfs://### HDFS PATH ### 3782 [1,2,4,8589934592,1,0]
-265 hdfs://### HDFS PATH ### 114 [1,2,4,8589934592,1,0]
-265 hdfs://### HDFS PATH ### 5046 [1,2,4,8589934592,1,0]
-266 hdfs://### HDFS PATH ### 814 [1,2,4,8589934592,1,0]
-27 hdfs://### HDFS PATH ### 34 [1,2,4,8589934592,1,0]
-272 hdfs://### HDFS PATH ### 1836 [1,2,4,8589934592,1,0]
-272 hdfs://### HDFS PATH ### 2976 [1,2,4,8589934592,1,0]
-273 hdfs://### HDFS PATH ### 162 [1,2,4,8589934592,1,0]
-273 hdfs://### HDFS PATH ### 2868 [1,2,4,8589934592,1,0]
-273 hdfs://### HDFS PATH ### 5524 [1,2,4,8589934592,1,0]
-274 hdfs://### HDFS PATH ### 3698 [1,2,4,8589934592,1,0]
-275 hdfs://### HDFS PATH ### 1638 [1,2,4,8589934592,1,0]
-277 hdfs://### HDFS PATH ### 1260 [1,2,4,8589934592,1,0]
-277 hdfs://### HDFS PATH ### 2856 [1,2,4,8589934592,1,0]
-277 hdfs://### HDFS PATH ### 362 [1,2,4,8589934592,1,0]
-277 hdfs://### HDFS PATH ### 4902 [1,2,4,8589934592,1,0]
-278 hdfs://### HDFS PATH ### 1544 [1,2,4,8589934592,1,0]
-278 hdfs://### HDFS PATH ### 80 [1,2,4,8589934592,1,0]
-28 hdfs://### HDFS PATH ### 5616 [1,2,4,8589934592,1,0]
-280 hdfs://### HDFS PATH ### 1226 [1,2,4,8589934592,1,0]
-280 hdfs://### HDFS PATH ### 3992 [1,2,4,8589934592,1,0]
-281 hdfs://### HDFS PATH ### 350 [1,2,4,8589934592,1,0]
-281 hdfs://### HDFS PATH ### 5548 [1,2,4,8589934592,1,0]
-282 hdfs://### HDFS PATH ### 2468 [1,2,4,8589934592,1,0]
-282 hdfs://### HDFS PATH ### 2722 [1,2,4,8589934592,1,0]
-283 hdfs://### HDFS PATH ### 4022 [1,2,4,8589934592,1,0]
-284 hdfs://### HDFS PATH ### 1708 [1,2,4,8589934592,1,0]
-285 hdfs://### HDFS PATH ### 5478 [1,2,4,8589934592,1,0]
-286 hdfs://### HDFS PATH ### 1404 [1,2,4,8589934592,1,0]
-287 hdfs://### HDFS PATH ### 490 [1,2,4,8589934592,1,0]
-288 hdfs://### HDFS PATH ### 2422 [1,2,4,8589934592,1,0]
-288 hdfs://### HDFS PATH ### 3840 [1,2,4,8589934592,1,0]
-289 hdfs://### HDFS PATH ### 1568 [1,2,4,8589934592,1,0]
-291 hdfs://### HDFS PATH ### 4582 [1,2,4,8589934592,1,0]
-292 hdfs://### HDFS PATH ### 466 [1,2,4,8589934592,1,0]
-296 hdfs://### HDFS PATH ### 3626 [1,2,4,8589934592,1,0]
-298 hdfs://### HDFS PATH ### 2168 [1,2,4,8589934592,1,0]
-298 hdfs://### HDFS PATH ### 4456 [1,2,4,8589934592,1,0]
-298 hdfs://### HDFS PATH ### 5386 [1,2,4,8589934592,1,0]
-30 hdfs://### HDFS PATH ### 3494 [1,2,4,8589934592,1,0]
-302 hdfs://### HDFS PATH ### 1034 [1,2,4,8589934592,1,0]
-305 hdfs://### HDFS PATH ### 4782 [1,2,4,8589934592,1,0]
-306 hdfs://### HDFS PATH ### 2880 [1,2,4,8589934592,1,0]
-307 hdfs://### HDFS PATH ### 2812 [1,2,4,8589934592,1,0]
-307 hdfs://### HDFS PATH ### 5672 [1,2,4,8589934592,1,0]
-308 hdfs://### HDFS PATH ### 2388 [1,2,4,8589934592,1,0]
-309 hdfs://### HDFS PATH ### 2904 [1,2,4,8589934592,1,0]
-309 hdfs://### HDFS PATH ### 790 [1,2,4,8589934592,1,0]
-310 hdfs://### HDFS PATH ### 4962 [1,2,4,8589934592,1,0]
-311 hdfs://### HDFS PATH ### 1000 [1,2,4,8589934592,1,0]
-311 hdfs://### HDFS PATH ### 1626 [1,2,4,8589934592,1,0]
-311 hdfs://### HDFS PATH ### 22 [1,2,4,8589934592,1,0]
-315 hdfs://### HDFS PATH ### 5594 [1,2,4,8589934592,1,0]
-316 hdfs://### HDFS PATH ### 1012 [1,2,4,8589934592,1,0]
-316 hdfs://### HDFS PATH ### 2576 [1,2,4,8589934592,1,0]
-316 hdfs://### HDFS PATH ### 3944 [1,2,4,8589934592,1,0]
-317 hdfs://### HDFS PATH ### 3104 [1,2,4,8589934592,1,0]
-317 hdfs://### HDFS PATH ### 4974 [1,2,4,8589934592,1,0]
-318 hdfs://### HDFS PATH ### 1602 [1,2,4,8589934592,1,0]
-318 hdfs://### HDFS PATH ### 2504 [1,2,4,8589934592,1,0]
-318 hdfs://### HDFS PATH ### 2516 [1,2,4,8589934592,1,0]
-321 hdfs://### HDFS PATH ### 3308 [1,2,4,8589934592,1,0]
-321 hdfs://### HDFS PATH ### 4090 [1,2,4,8589934592,1,0]
-322 hdfs://### HDFS PATH ### 2096 [1,2,4,8589934592,1,0]
-322 hdfs://### HDFS PATH ### 3250 [1,2,4,8589934592,1,0]
-323 hdfs://### HDFS PATH ### 4878 [1,2,4,8589934592,1,0]
-325 hdfs://### HDFS PATH ### 4890 [1,2,4,8589934592,1,0]
-325 hdfs://### HDFS PATH ### 862 [1,2,4,8589934592,1,0]
-327 hdfs://### HDFS PATH ### 2248 [1,2,4,8589934592,1,0]
-327 hdfs://### HDFS PATH ### 2928 [1,2,4,8589934592,1,0]
-327 hdfs://### HDFS PATH ### 338 [1,2,4,8589934592,1,0]
-33 hdfs://### HDFS PATH ### 3592 [1,2,4,8589934592,1,0]
-331 hdfs://### HDFS PATH ### 2988 [1,2,4,8589934592,1,0]
-331 hdfs://### HDFS PATH ### 4034 [1,2,4,8589934592,1,0]
-332 hdfs://### HDFS PATH ### 1614 [1,2,4,8589934592,1,0]
-333 hdfs://### HDFS PATH ### 1684 [1,2,4,8589934592,1,0]
-333 hdfs://### HDFS PATH ### 4986 [1,2,4,8589934592,1,0]
-335 hdfs://### HDFS PATH ### 4102 [1,2,4,8589934592,1,0]
-336 hdfs://### HDFS PATH ### 3148 [1,2,4,8589934592,1,0]
-338 hdfs://### HDFS PATH ### 526 [1,2,4,8589934592,1,0]
-339 hdfs://### HDFS PATH ### 956 [1,2,4,8589934592,1,0]
-34 hdfs://### HDFS PATH ### 3192 [1,2,4,8589934592,1,0]
-341 hdfs://### HDFS PATH ### 5406 [1,2,4,8589934592,1,0]
-342 hdfs://### HDFS PATH ### 3558 [1,2,4,8589934592,1,0]
-342 hdfs://### HDFS PATH ### 838 [1,2,4,8589934592,1,0]
-344 hdfs://### HDFS PATH ### 3674 [1,2,4,8589934592,1,0]
-344 hdfs://### HDFS PATH ### 5560 [1,2,4,8589934592,1,0]
-345 hdfs://### HDFS PATH ### 1082 [1,2,4,8589934592,1,0]
-348 hdfs://### HDFS PATH ### 1882 [1,2,4,8589934592,1,0]
-348 hdfs://### HDFS PATH ### 1960 [1,2,4,8589934592,1,0]
-348 hdfs://### HDFS PATH ### 4338 [1,2,4,8589934592,1,0]
-348 hdfs://### HDFS PATH ### 5490 [1,2,4,8589934592,1,0]
-348 hdfs://### HDFS PATH ### 5660 [1,2,4,8589934592,1,0]
-35 hdfs://### HDFS PATH ### 1238 [1,2,4,8589934592,1,0]
-35 hdfs://### HDFS PATH ### 3138 [1,2,4,8589934592,1,0]
-35 hdfs://### HDFS PATH ### 4012 [1,2,4,8589934592,1,0]
-351 hdfs://### HDFS PATH ### 4604 [1,2,4,8589934592,1,0]
-353 hdfs://### HDFS PATH ### 1812 [1,2,4,8589934592,1,0]
-353 hdfs://### HDFS PATH ### 5092 [1,2,4,8589934592,1,0]
-356 hdfs://### HDFS PATH ### 1284 [1,2,4,8589934592,1,0]
-360 hdfs://### HDFS PATH ### 4746 [1,2,4,8589934592,1,0]
-362 hdfs://### HDFS PATH ### 5454 [1,2,4,8589934592,1,0]
-364 hdfs://### HDFS PATH ### 2662 [1,2,4,8589934592,1,0]
-365 hdfs://### HDFS PATH ### 802 [1,2,4,8589934592,1,0]
-366 hdfs://### HDFS PATH ### 4138 [1,2,4,8589934592,1,0]
-367 hdfs://### HDFS PATH ### 3662 [1,2,4,8589934592,1,0]
-367 hdfs://### HDFS PATH ### 850 [1,2,4,8589934592,1,0]
-368 hdfs://### HDFS PATH ### 3602 [1,2,4,8589934592,1,0]
-369 hdfs://### HDFS PATH ### 186 [1,2,4,8589934592,1,0]
-369 hdfs://### HDFS PATH ### 2564 [1,2,4,8589934592,1,0]
-369 hdfs://### HDFS PATH ### 2952 [1,2,4,8589934592,1,0]
-37 hdfs://### HDFS PATH ### 328 [1,2,4,8589934592,1,0]
-37 hdfs://### HDFS PATH ### 5626 [1,2,4,8589934592,1,0]
-373 hdfs://### HDFS PATH ### 1824 [1,2,4,8589934592,1,0]
-374 hdfs://### HDFS PATH ### 268 [1,2,4,8589934592,1,0]
-375 hdfs://### HDFS PATH ### 5212 [1,2,4,8589934592,1,0]
-377 hdfs://### HDFS PATH ### 766 [1,2,4,8589934592,1,0]
-378 hdfs://### HDFS PATH ### 1152 [1,2,4,8589934592,1,0]
-379 hdfs://### HDFS PATH ### 5328 [1,2,4,8589934592,1,0]
-382 hdfs://### HDFS PATH ### 1320 [1,2,4,8589934592,1,0]
-382 hdfs://### HDFS PATH ### 4528 [1,2,4,8589934592,1,0]
-384 hdfs://### HDFS PATH ### 1788 [1,2,4,8589934592,1,0]
-384 hdfs://### HDFS PATH ### 5260 [1,2,4,8589934592,1,0]
-384 hdfs://### HDFS PATH ### 5316 [1,2,4,8589934592,1,0]
-386 hdfs://### HDFS PATH ### 1356 [1,2,4,8589934592,1,0]
-389 hdfs://### HDFS PATH ### 2916 [1,2,4,8589934592,1,0]
-392 hdfs://### HDFS PATH ### 2964 [1,2,4,8589934592,1,0]
-393 hdfs://### HDFS PATH ### 2132 [1,2,4,8589934592,1,0]
-394 hdfs://### HDFS PATH ### 562 [1,2,4,8589934592,1,0]
-395 hdfs://### HDFS PATH ### 2710 [1,2,4,8589934592,1,0]
-395 hdfs://### HDFS PATH ### 3116 [1,2,4,8589934592,1,0]
-396 hdfs://### HDFS PATH ### 3092 [1,2,4,8589934592,1,0]
-396 hdfs://### HDFS PATH ### 4372 [1,2,4,8589934592,1,0]
-396 hdfs://### HDFS PATH ### 706 [1,2,4,8589934592,1,0]
-397 hdfs://### HDFS PATH ### 4558 [1,2,4,8589934592,1,0]
-397 hdfs://### HDFS PATH ### 778 [1,2,4,8589934592,1,0]
-399 hdfs://### HDFS PATH ### 1296 [1,2,4,8589934592,1,0]
-399 hdfs://### HDFS PATH ### 694 [1,2,4,8589934592,1,0]
-4 hdfs://### HDFS PATH ### 1218 [1,2,4,8589934592,1,0]
-400 hdfs://### HDFS PATH ### 5778 [1,2,4,8589934592,1,0]
-401 hdfs://### HDFS PATH ### 138 [1,2,4,8589934592,1,0]
-401 hdfs://### HDFS PATH ### 3000 [1,2,4,8589934592,1,0]
-401 hdfs://### HDFS PATH ### 3828 [1,2,4,8589934592,1,0]
-401 hdfs://### HDFS PATH ### 4268 [1,2,4,8589934592,1,0]
-401 hdfs://### HDFS PATH ### 5224 [1,2,4,8589934592,1,0]
-402 hdfs://### HDFS PATH ### 3080 [1,2,4,8589934592,1,0]
-403 hdfs://### HDFS PATH ### 406 [1,2,4,8589934592,1,0]
-403 hdfs://### HDFS PATH ### 4162 [1,2,4,8589934592,1,0]
-403 hdfs://### HDFS PATH ### 5766 [1,2,4,8589934592,1,0]
-404 hdfs://### HDFS PATH ### 1776 [1,2,4,8589934592,1,0]
-404 hdfs://### HDFS PATH ### 2318 [1,2,4,8589934592,1,0]
-406 hdfs://### HDFS PATH ### 244 [1,2,4,8589934592,1,0]
-406 hdfs://### HDFS PATH ### 4220 [1,2,4,8589934592,1,0]
-406 hdfs://### HDFS PATH ### 4256 [1,2,4,8589934592,1,0]
-406 hdfs://### HDFS PATH ### 5152 [1,2,4,8589934592,1,0]
-407 hdfs://### HDFS PATH ### 5248 [1,2,4,8589934592,1,0]
-409 hdfs://### HDFS PATH ### 2528 [1,2,4,8589934592,1,0]
-409 hdfs://### HDFS PATH ### 4232 [1,2,4,8589934592,1,0]
-409 hdfs://### HDFS PATH ### 56 [1,2,4,8589934592,1,0]
-41 hdfs://### HDFS PATH ### 3388 [1,2,4,8589934592,1,0]
-411 hdfs://### HDFS PATH ### 1924 [1,2,4,8589934592,1,0]
-413 hdfs://### HDFS PATH ### 2600 [1,2,4,8589934592,1,0]
-413 hdfs://### HDFS PATH ### 610 [1,2,4,8589934592,1,0]
-414 hdfs://### HDFS PATH ### 4686 [1,2,4,8589934592,1,0]
-414 hdfs://### HDFS PATH ### 5696 [1,2,4,8589934592,1,0]
-417 hdfs://### HDFS PATH ### 430 [1,2,4,8589934592,1,0]
-417 hdfs://### HDFS PATH ### 4794 [1,2,4,8589934592,1,0]
-417 hdfs://### HDFS PATH ### 730 [1,2,4,8589934592,1,0]
-418 hdfs://### HDFS PATH ### 2204 [1,2,4,8589934592,1,0]
-419 hdfs://### HDFS PATH ### 2758 [1,2,4,8589934592,1,0]
-42 hdfs://### HDFS PATH ### 2030 [1,2,4,8589934592,1,0]
-42 hdfs://### HDFS PATH ### 3298 [1,2,4,8589934592,1,0]
-421 hdfs://### HDFS PATH ### 5236 [1,2,4,8589934592,1,0]
-424 hdfs://### HDFS PATH ### 4350 [1,2,4,8589934592,1,0]
-424 hdfs://### HDFS PATH ### 4504 [1,2,4,8589934592,1,0]
-427 hdfs://### HDFS PATH ### 1248 [1,2,4,8589934592,1,0]
-429 hdfs://### HDFS PATH ### 256 [1,2,4,8589934592,1,0]
-429 hdfs://### HDFS PATH ### 4842 [1,2,4,8589934592,1,0]
-43 hdfs://### HDFS PATH ### 2330 [1,2,4,8589934592,1,0]
-430 hdfs://### HDFS PATH ### 1532 [1,2,4,8589934592,1,0]
-430 hdfs://### HDFS PATH ### 3320 [1,2,4,8589934592,1,0]
-430 hdfs://### HDFS PATH ### 442 [1,2,4,8589934592,1,0]
-431 hdfs://### HDFS PATH ### 1994 [1,2,4,8589934592,1,0]
-431 hdfs://### HDFS PATH ### 4420 [1,2,4,8589934592,1,0]
-431 hdfs://### HDFS PATH ### 4480 [1,2,4,8589934592,1,0]
-432 hdfs://### HDFS PATH ### 3920 [1,2,4,8589934592,1,0]
-435 hdfs://### HDFS PATH ### 2834 [1,2,4,8589934592,1,0]
-436 hdfs://### HDFS PATH ### 2340 [1,2,4,8589934592,1,0]
-437 hdfs://### HDFS PATH ### 1368 [1,2,4,8589934592,1,0]
-438 hdfs://### HDFS PATH ### 1070 [1,2,4,8589934592,1,0]
-438 hdfs://### HDFS PATH ### 3884 [1,2,4,8589934592,1,0]
-438 hdfs://### HDFS PATH ### 4662 [1,2,4,8589934592,1,0]
-439 hdfs://### HDFS PATH ### 4734 [1,2,4,8589934592,1,0]
-439 hdfs://### HDFS PATH ### 826 [1,2,4,8589934592,1,0]
-44 hdfs://### HDFS PATH ### 4068 [1,2,4,8589934592,1,0]
-443 hdfs://### HDFS PATH ### 4866 [1,2,4,8589934592,1,0]
-444 hdfs://### HDFS PATH ### 4818 [1,2,4,8589934592,1,0]
-446 hdfs://### HDFS PATH ### 538 [1,2,4,8589934592,1,0]
-448 hdfs://### HDFS PATH ### 5636 [1,2,4,8589934592,1,0]
-449 hdfs://### HDFS PATH ### 3434 [1,2,4,8589934592,1,0]
-452 hdfs://### HDFS PATH ### 3024 [1,2,4,8589934592,1,0]
-453 hdfs://### HDFS PATH ### 3482 [1,2,4,8589934592,1,0]
-454 hdfs://### HDFS PATH ### 2144 [1,2,4,8589934592,1,0]
-454 hdfs://### HDFS PATH ### 4432 [1,2,4,8589934592,1,0]
-454 hdfs://### HDFS PATH ### 5200 [1,2,4,8589934592,1,0]
-455 hdfs://### HDFS PATH ### 976 [1,2,4,8589934592,1,0]
-457 hdfs://### HDFS PATH ### 2446 [1,2,4,8589934592,1,0]
-458 hdfs://### HDFS PATH ### 3356 [1,2,4,8589934592,1,0]
-458 hdfs://### HDFS PATH ### 5442 [1,2,4,8589934592,1,0]
-459 hdfs://### HDFS PATH ### 1450 [1,2,4,8589934592,1,0]
-459 hdfs://### HDFS PATH ### 550 [1,2,4,8589934592,1,0]
-460 hdfs://### HDFS PATH ### 5010 [1,2,4,8589934592,1,0]
-462 hdfs://### HDFS PATH ### 5128 [1,2,4,8589934592,1,0]
-462 hdfs://### HDFS PATH ### 5350 [1,2,4,8589934592,1,0]
-463 hdfs://### HDFS PATH ### 1982 [1,2,4,8589934592,1,0]
-463 hdfs://### HDFS PATH ### 3980 [1,2,4,8589934592,1,0]
-466 hdfs://### HDFS PATH ### 1894 [1,2,4,8589934592,1,0]
-466 hdfs://### HDFS PATH ### 4126 [1,2,4,8589934592,1,0]
-466 hdfs://### HDFS PATH ### 658 [1,2,4,8589934592,1,0]
-467 hdfs://### HDFS PATH ### 3908 [1,2,4,8589934592,1,0]
-468 hdfs://### HDFS PATH ### 2120 [1,2,4,8589934592,1,0]
-468 hdfs://### HDFS PATH ### 2376 [1,2,4,8589934592,1,0]
-468 hdfs://### HDFS PATH ### 3526 [1,2,4,8589934592,1,0]
-468 hdfs://### HDFS PATH ### 4950 [1,2,4,8589934592,1,0]
-469 hdfs://### HDFS PATH ### 1380 [1,2,4,8589934592,1,0]
-469 hdfs://### HDFS PATH ### 2364 [1,2,4,8589934592,1,0]
-469 hdfs://### HDFS PATH ### 292 [1,2,4,8589934592,1,0]
-469 hdfs://### HDFS PATH ### 3968 [1,2,4,8589934592,1,0]
-469 hdfs://### HDFS PATH ### 5582 [1,2,4,8589934592,1,0]
-47 hdfs://### HDFS PATH ### 1198 [1,2,4,8589934592,1,0]
-470 hdfs://### HDFS PATH ### 2540 [1,2,4,8589934592,1,0]
-472 hdfs://### HDFS PATH ### 3238 [1,2,4,8589934592,1,0]
-475 hdfs://### HDFS PATH ### 898 [1,2,4,8589934592,1,0]
-477 hdfs://### HDFS PATH ### 5708 [1,2,4,8589934592,1,0]
-478 hdfs://### HDFS PATH ### 4444 [1,2,4,8589934592,1,0]
-478 hdfs://### HDFS PATH ### 4926 [1,2,4,8589934592,1,0]
-479 hdfs://### HDFS PATH ### 4770 [1,2,4,8589934592,1,0]
-480 hdfs://### HDFS PATH ### 3816 [1,2,4,8589934592,1,0]
-480 hdfs://### HDFS PATH ### 4570 [1,2,4,8589934592,1,0]
-480 hdfs://### HDFS PATH ### 5058 [1,2,4,8589934592,1,0]
-481 hdfs://### HDFS PATH ### 2434 [1,2,4,8589934592,1,0]
-482 hdfs://### HDFS PATH ### 586 [1,2,4,8589934592,1,0]
-483 hdfs://### HDFS PATH ### 4174 [1,2,4,8589934592,1,0]
-484 hdfs://### HDFS PATH ### 102 [1,2,4,8589934592,1,0]
-485 hdfs://### HDFS PATH ### 3734 [1,2,4,8589934592,1,0]
-487 hdfs://### HDFS PATH ### 3804 [1,2,4,8589934592,1,0]
-489 hdfs://### HDFS PATH ### 1128 [1,2,4,8589934592,1,0]
-489 hdfs://### HDFS PATH ### 1800 [1,2,4,8589934592,1,0]
-489 hdfs://### HDFS PATH ### 3344 [1,2,4,8589934592,1,0]
-489 hdfs://### HDFS PATH ### 742 [1,2,4,8589934592,1,0]
-490 hdfs://### HDFS PATH ### 2640 [1,2,4,8589934592,1,0]
-491 hdfs://### HDFS PATH ### 4710 [1,2,4,8589934592,1,0]
-492 hdfs://### HDFS PATH ### 3410 [1,2,4,8589934592,1,0]
-492 hdfs://### HDFS PATH ### 5362 [1,2,4,8589934592,1,0]
-493 hdfs://### HDFS PATH ### 4998 [1,2,4,8589934592,1,0]
-494 hdfs://### HDFS PATH ### 622 [1,2,4,8589934592,1,0]
-495 hdfs://### HDFS PATH ### 316 [1,2,4,8589934592,1,0]
-496 hdfs://### HDFS PATH ### 2076 [1,2,4,8589934592,1,0]
-497 hdfs://### HDFS PATH ### 3068 [1,2,4,8589934592,1,0]
-498 hdfs://### HDFS PATH ### 1332 [1,2,4,8589934592,1,0]
-498 hdfs://### HDFS PATH ### 3262 [1,2,4,8589934592,1,0]
-498 hdfs://### HDFS PATH ### 5418 [1,2,4,8589934592,1,0]
-5 hdfs://### HDFS PATH ### 3060 [1,2,4,8589934592,1,0]
-5 hdfs://### HDFS PATH ### 3864 [1,2,4,8589934592,1,0]
-5 hdfs://### HDFS PATH ### 4540 [1,2,4,8589934592,1,0]
-51 hdfs://### HDFS PATH ### 1462 [1,2,4,8589934592,1,0]
-51 hdfs://### HDFS PATH ### 2308 [1,2,4,8589934592,1,0]
-53 hdfs://### HDFS PATH ### 4186 [1,2,4,8589934592,1,0]
-54 hdfs://### HDFS PATH ### 1440 [1,2,4,8589934592,1,0]
-57 hdfs://### HDFS PATH ### 1024 [1,2,4,8589934592,1,0]
-58 hdfs://### HDFS PATH ### 1906 [1,2,4,8589934592,1,0]
-58 hdfs://### HDFS PATH ### 3128 [1,2,4,8589934592,1,0]
-64 hdfs://### HDFS PATH ### 3516 [1,2,4,8589934592,1,0]
-65 hdfs://### HDFS PATH ### 1592 [1,2,4,8589934592,1,0]
-66 hdfs://### HDFS PATH ### 198 [1,2,4,8589934592,1,0]
-67 hdfs://### HDFS PATH ### 1754 [1,2,4,8589934592,1,0]
-67 hdfs://### HDFS PATH ### 5306 [1,2,4,8589934592,1,0]
-69 hdfs://### HDFS PATH ### 3570 [1,2,4,8589934592,1,0]
-70 hdfs://### HDFS PATH ### 3794 [1,2,4,8589934592,1,0]
-70 hdfs://### HDFS PATH ### 4548 [1,2,4,8589934592,1,0]
-70 hdfs://### HDFS PATH ### 4640 [1,2,4,8589934592,1,0]
-72 hdfs://### HDFS PATH ### 1208 [1,2,4,8589934592,1,0]
-72 hdfs://### HDFS PATH ### 2792 [1,2,4,8589934592,1,0]
-74 hdfs://### HDFS PATH ### 3548 [1,2,4,8589934592,1,0]
-76 hdfs://### HDFS PATH ### 3378 [1,2,4,8589934592,1,0]
-76 hdfs://### HDFS PATH ### 3538 [1,2,4,8589934592,1,0]
-77 hdfs://### HDFS PATH ### 2622 [1,2,4,8589934592,1,0]
-78 hdfs://### HDFS PATH ### 3368 [1,2,4,8589934592,1,0]
-8 hdfs://### HDFS PATH ### 1916 [1,2,4,8589934592,1,0]
-80 hdfs://### HDFS PATH ### 4058 [1,2,4,8589934592,1,0]
-82 hdfs://### HDFS PATH ### 396 [1,2,4,8589934592,1,0]
-83 hdfs://### HDFS PATH ### 1674 [1,2,4,8589934592,1,0]
-83 hdfs://### HDFS PATH ### 5070 [1,2,4,8589934592,1,0]
-84 hdfs://### HDFS PATH ### 1872 [1,2,4,8589934592,1,0]
-84 hdfs://### HDFS PATH ### 5606 [1,2,4,8589934592,1,0]
-85 hdfs://### HDFS PATH ### 2612 [1,2,4,8589934592,1,0]
-86 hdfs://### HDFS PATH ### 12 [1,2,4,8589934592,1,0]
-87 hdfs://### HDFS PATH ### 2652 [1,2,4,8589934592,1,0]
-9 hdfs://### HDFS PATH ### 5398 [1,2,4,8589934592,1,0]
-90 hdfs://### HDFS PATH ### 2802 [1,2,4,8589934592,1,0]
-90 hdfs://### HDFS PATH ### 4304 [1,2,4,8589934592,1,0]
-90 hdfs://### HDFS PATH ### 5744 [1,2,4,8589934592,1,0]
-92 hdfs://### HDFS PATH ### 1176 [1,2,4,8589934592,1,0]
-95 hdfs://### HDFS PATH ### 2400 [1,2,4,8589934592,1,0]
-95 hdfs://### HDFS PATH ### 3160 [1,2,4,8589934592,1,0]
-96 hdfs://### HDFS PATH ### 2216 [1,2,4,8589934592,1,0]
-97 hdfs://### HDFS PATH ### 5572 [1,2,4,8589934592,1,0]
-97 hdfs://### HDFS PATH ### 5802 [1,2,4,8589934592,1,0]
-98 hdfs://### HDFS PATH ### 2458 [1,2,4,8589934592,1,0]
-98 hdfs://### HDFS PATH ### 92 [1,2,4,8589934592,1,0]
-PREHOOK: query: SELECT * FROM default__src_src2_index__
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src2_index__
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT * FROM default__src_src2_index__
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src2_index__
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-val_0 hdfs://### HDFS PATH ### 2088 [1,2,4,8589934592,1,0]
-val_0 hdfs://### HDFS PATH ### 2632 [1,2,4,8589934592,1,0]
-val_0 hdfs://### HDFS PATH ### 968 [1,2,4,8589934592,1,0]
-val_10 hdfs://### HDFS PATH ### 2846 [1,2,4,8589934592,1,0]
-val_100 hdfs://### HDFS PATH ### 2156 [1,2,4,8589934592,1,0]
-val_100 hdfs://### HDFS PATH ### 5374 [1,2,4,8589934592,1,0]
-val_103 hdfs://### HDFS PATH ### 1484 [1,2,4,8589934592,1,0]
-val_103 hdfs://### HDFS PATH ### 3614 [1,2,4,8589934592,1,0]
-val_104 hdfs://### HDFS PATH ### 4114 [1,2,4,8589934592,1,0]
-val_104 hdfs://### HDFS PATH ### 4628 [1,2,4,8589934592,1,0]
-val_105 hdfs://### HDFS PATH ### 4196 [1,2,4,8589934592,1,0]
-val_11 hdfs://### HDFS PATH ### 3170 [1,2,4,8589934592,1,0]
-val_111 hdfs://### HDFS PATH ### 1186 [1,2,4,8589934592,1,0]
-val_113 hdfs://### HDFS PATH ### 3638 [1,2,4,8589934592,1,0]
-val_113 hdfs://### HDFS PATH ### 920 [1,2,4,8589934592,1,0]
-val_114 hdfs://### HDFS PATH ### 4280 [1,2,4,8589934592,1,0]
-val_116 hdfs://### HDFS PATH ### 3746 [1,2,4,8589934592,1,0]
-val_118 hdfs://### HDFS PATH ### 2686 [1,2,4,8589934592,1,0]
-val_118 hdfs://### HDFS PATH ### 2780 [1,2,4,8589934592,1,0]
-val_119 hdfs://### HDFS PATH ### 2064 [1,2,4,8589934592,1,0]
-val_119 hdfs://### HDFS PATH ### 3332 [1,2,4,8589934592,1,0]
-val_119 hdfs://### HDFS PATH ### 4674 [1,2,4,8589934592,1,0]
-val_12 hdfs://### HDFS PATH ### 1720 [1,2,4,8589934592,1,0]
-val_12 hdfs://### HDFS PATH ### 4362 [1,2,4,8589934592,1,0]
-val_120 hdfs://### HDFS PATH ### 2284 [1,2,4,8589934592,1,0]
-val_120 hdfs://### HDFS PATH ### 4830 [1,2,4,8589934592,1,0]
-val_125 hdfs://### HDFS PATH ### 1344 [1,2,4,8589934592,1,0]
-val_125 hdfs://### HDFS PATH ### 4468 [1,2,4,8589934592,1,0]
-val_126 hdfs://### HDFS PATH ### 5732 [1,2,4,8589934592,1,0]
-val_128 hdfs://### HDFS PATH ### 208 [1,2,4,8589934592,1,0]
-val_128 hdfs://### HDFS PATH ### 3896 [1,2,4,8589934592,1,0]
-val_128 hdfs://### HDFS PATH ### 988 [1,2,4,8589934592,1,0]
-val_129 hdfs://### HDFS PATH ### 1094 [1,2,4,8589934592,1,0]
-val_129 hdfs://### HDFS PATH ### 2040 [1,2,4,8589934592,1,0]
-val_131 hdfs://### HDFS PATH ### 2296 [1,2,4,8589934592,1,0]
-val_133 hdfs://### HDFS PATH ### 5164 [1,2,4,8589934592,1,0]
-val_134 hdfs://### HDFS PATH ### 2698 [1,2,4,8589934592,1,0]
-val_134 hdfs://### HDFS PATH ### 5294 [1,2,4,8589934592,1,0]
-val_136 hdfs://### HDFS PATH ### 5080 [1,2,4,8589934592,1,0]
-val_137 hdfs://### HDFS PATH ### 1650 [1,2,4,8589934592,1,0]
-val_137 hdfs://### HDFS PATH ### 2552 [1,2,4,8589934592,1,0]
-val_138 hdfs://### HDFS PATH ### 1472 [1,2,4,8589934592,1,0]
-val_138 hdfs://### HDFS PATH ### 1848 [1,2,4,8589934592,1,0]
-val_138 hdfs://### HDFS PATH ### 2734 [1,2,4,8589934592,1,0]
-val_138 hdfs://### HDFS PATH ### 3470 [1,2,4,8589934592,1,0]
-val_143 hdfs://### HDFS PATH ### 3226 [1,2,4,8589934592,1,0]
-val_145 hdfs://### HDFS PATH ### 304 [1,2,4,8589934592,1,0]
-val_146 hdfs://### HDFS PATH ### 232 [1,2,4,8589934592,1,0]
-val_146 hdfs://### HDFS PATH ### 5430 [1,2,4,8589934592,1,0]
-val_149 hdfs://### HDFS PATH ### 1058 [1,2,4,8589934592,1,0]
-val_149 hdfs://### HDFS PATH ### 3422 [1,2,4,8589934592,1,0]
-val_15 hdfs://### HDFS PATH ### 2770 [1,2,4,8589934592,1,0]
-val_15 hdfs://### HDFS PATH ### 386 [1,2,4,8589934592,1,0]
-val_150 hdfs://### HDFS PATH ### 150 [1,2,4,8589934592,1,0]
-val_152 hdfs://### HDFS PATH ### 280 [1,2,4,8589934592,1,0]
-val_152 hdfs://### HDFS PATH ### 5648 [1,2,4,8589934592,1,0]
-val_153 hdfs://### HDFS PATH ### 502 [1,2,4,8589934592,1,0]
-val_155 hdfs://### HDFS PATH ### 932 [1,2,4,8589934592,1,0]
-val_156 hdfs://### HDFS PATH ### 2352 [1,2,4,8589934592,1,0]
-val_157 hdfs://### HDFS PATH ### 1140 [1,2,4,8589934592,1,0]
-val_158 hdfs://### HDFS PATH ### 2052 [1,2,4,8589934592,1,0]
-val_160 hdfs://### HDFS PATH ### 3274 [1,2,4,8589934592,1,0]
-val_162 hdfs://### HDFS PATH ### 754 [1,2,4,8589934592,1,0]
-val_163 hdfs://### HDFS PATH ### 4650 [1,2,4,8589934592,1,0]
-val_164 hdfs://### HDFS PATH ### 4408 [1,2,4,8589934592,1,0]
-val_164 hdfs://### HDFS PATH ### 4492 [1,2,4,8589934592,1,0]
-val_165 hdfs://### HDFS PATH ### 2236 [1,2,4,8589934592,1,0]
-val_165 hdfs://### HDFS PATH ### 44 [1,2,4,8589934592,1,0]
-val_166 hdfs://### HDFS PATH ### 418 [1,2,4,8589934592,1,0]
-val_167 hdfs://### HDFS PATH ### 3686 [1,2,4,8589934592,1,0]
-val_167 hdfs://### HDFS PATH ### 5502 [1,2,4,8589934592,1,0]
-val_167 hdfs://### HDFS PATH ### 874 [1,2,4,8589934592,1,0]
-val_168 hdfs://### HDFS PATH ### 3180 [1,2,4,8589934592,1,0]
-val_169 hdfs://### HDFS PATH ### 1308 [1,2,4,8589934592,1,0]
-val_169 hdfs://### HDFS PATH ### 2588 [1,2,4,8589934592,1,0]
-val_169 hdfs://### HDFS PATH ### 4854 [1,2,4,8589934592,1,0]
-val_169 hdfs://### HDFS PATH ### 5754 [1,2,4,8589934592,1,0]
-val_17 hdfs://### HDFS PATH ### 910 [1,2,4,8589934592,1,0]
-val_170 hdfs://### HDFS PATH ### 1106 [1,2,4,8589934592,1,0]
-val_172 hdfs://### HDFS PATH ### 2018 [1,2,4,8589934592,1,0]
-val_172 hdfs://### HDFS PATH ### 5104 [1,2,4,8589934592,1,0]
-val_174 hdfs://### HDFS PATH ### 598 [1,2,4,8589934592,1,0]
-val_174 hdfs://### HDFS PATH ### 682 [1,2,4,8589934592,1,0]
-val_175 hdfs://### HDFS PATH ### 4150 [1,2,4,8589934592,1,0]
-val_175 hdfs://### HDFS PATH ### 5176 [1,2,4,8589934592,1,0]
-val_176 hdfs://### HDFS PATH ### 1428 [1,2,4,8589934592,1,0]
-val_176 hdfs://### HDFS PATH ### 1556 [1,2,4,8589934592,1,0]
-val_177 hdfs://### HDFS PATH ### 3036 [1,2,4,8589934592,1,0]
-val_178 hdfs://### HDFS PATH ### 4938 [1,2,4,8589934592,1,0]
-val_179 hdfs://### HDFS PATH ### 2006 [1,2,4,8589934592,1,0]
-val_179 hdfs://### HDFS PATH ### 2674 [1,2,4,8589934592,1,0]
-val_18 hdfs://### HDFS PATH ### 5340 [1,2,4,8589934592,1,0]
-val_18 hdfs://### HDFS PATH ### 5514 [1,2,4,8589934592,1,0]
-val_180 hdfs://### HDFS PATH ### 1696 [1,2,4,8589934592,1,0]
-val_181 hdfs://### HDFS PATH ### 1742 [1,2,4,8589934592,1,0]
-val_183 hdfs://### HDFS PATH ### 5536 [1,2,4,8589934592,1,0]
-val_186 hdfs://### HDFS PATH ### 5466 [1,2,4,8589934592,1,0]
-val_187 hdfs://### HDFS PATH ### 1416 [1,2,4,8589934592,1,0]
-val_187 hdfs://### HDFS PATH ### 2492 [1,2,4,8589934592,1,0]
-val_187 hdfs://### HDFS PATH ### 4516 [1,2,4,8589934592,1,0]
-val_189 hdfs://### HDFS PATH ### 5188 [1,2,4,8589934592,1,0]
-val_19 hdfs://### HDFS PATH ### 2824 [1,2,4,8589934592,1,0]
-val_190 hdfs://### HDFS PATH ### 4244 [1,2,4,8589934592,1,0]
-val_191 hdfs://### HDFS PATH ### 2192 [1,2,4,8589934592,1,0]
-val_191 hdfs://### HDFS PATH ### 3852 [1,2,4,8589934592,1,0]
-val_192 hdfs://### HDFS PATH ### 1392 [1,2,4,8589934592,1,0]
-val_193 hdfs://### HDFS PATH ### 126 [1,2,4,8589934592,1,0]
-val_193 hdfs://### HDFS PATH ### 4078 [1,2,4,8589934592,1,0]
-val_193 hdfs://### HDFS PATH ### 514 [1,2,4,8589934592,1,0]
-val_194 hdfs://### HDFS PATH ### 5684 [1,2,4,8589934592,1,0]
-val_195 hdfs://### HDFS PATH ### 3286 [1,2,4,8589934592,1,0]
-val_195 hdfs://### HDFS PATH ### 886 [1,2,4,8589934592,1,0]
-val_196 hdfs://### HDFS PATH ### 2410 [1,2,4,8589934592,1,0]
-val_197 hdfs://### HDFS PATH ### 2108 [1,2,4,8589934592,1,0]
-val_197 hdfs://### HDFS PATH ### 2480 [1,2,4,8589934592,1,0]
-val_199 hdfs://### HDFS PATH ### 2180 [1,2,4,8589934592,1,0]
-val_199 hdfs://### HDFS PATH ### 4806 [1,2,4,8589934592,1,0]
-val_199 hdfs://### HDFS PATH ### 646 [1,2,4,8589934592,1,0]
-val_2 hdfs://### HDFS PATH ### 4004 [1,2,4,8589934592,1,0]
-val_20 hdfs://### HDFS PATH ### 1118 [1,2,4,8589934592,1,0]
-val_200 hdfs://### HDFS PATH ### 4698 [1,2,4,8589934592,1,0]
-val_200 hdfs://### HDFS PATH ### 5790 [1,2,4,8589934592,1,0]
-val_201 hdfs://### HDFS PATH ### 4384 [1,2,4,8589934592,1,0]
-val_202 hdfs://### HDFS PATH ### 3932 [1,2,4,8589934592,1,0]
-val_203 hdfs://### HDFS PATH ### 4314 [1,2,4,8589934592,1,0]
-val_203 hdfs://### HDFS PATH ### 944 [1,2,4,8589934592,1,0]
-val_205 hdfs://### HDFS PATH ### 1046 [1,2,4,8589934592,1,0]
-val_205 hdfs://### HDFS PATH ### 2272 [1,2,4,8589934592,1,0]
-val_207 hdfs://### HDFS PATH ### 5022 [1,2,4,8589934592,1,0]
-val_207 hdfs://### HDFS PATH ### 634 [1,2,4,8589934592,1,0]
-val_208 hdfs://### HDFS PATH ### 1272 [1,2,4,8589934592,1,0]
-val_208 hdfs://### HDFS PATH ### 1948 [1,2,4,8589934592,1,0]
-val_208 hdfs://### HDFS PATH ### 670 [1,2,4,8589934592,1,0]
-val_209 hdfs://### HDFS PATH ### 3504 [1,2,4,8589934592,1,0]
-val_209 hdfs://### HDFS PATH ### 374 [1,2,4,8589934592,1,0]
-val_213 hdfs://### HDFS PATH ### 1508 [1,2,4,8589934592,1,0]
-val_213 hdfs://### HDFS PATH ### 220 [1,2,4,8589934592,1,0]
-val_214 hdfs://### HDFS PATH ### 5116 [1,2,4,8589934592,1,0]
-val_216 hdfs://### HDFS PATH ### 1520 [1,2,4,8589934592,1,0]
-val_216 hdfs://### HDFS PATH ### 3650 [1,2,4,8589934592,1,0]
-val_217 hdfs://### HDFS PATH ### 1860 [1,2,4,8589934592,1,0]
-val_217 hdfs://### HDFS PATH ### 4396 [1,2,4,8589934592,1,0]
-val_218 hdfs://### HDFS PATH ### 3446 [1,2,4,8589934592,1,0]
-val_219 hdfs://### HDFS PATH ### 3710 [1,2,4,8589934592,1,0]
-val_219 hdfs://### HDFS PATH ### 478 [1,2,4,8589934592,1,0]
-val_221 hdfs://### HDFS PATH ### 1164 [1,2,4,8589934592,1,0]
-val_221 hdfs://### HDFS PATH ### 1580 [1,2,4,8589934592,1,0]
-val_222 hdfs://### HDFS PATH ### 5720 [1,2,4,8589934592,1,0]
-val_223 hdfs://### HDFS PATH ### 3398 [1,2,4,8589934592,1,0]
-val_223 hdfs://### HDFS PATH ### 3758 [1,2,4,8589934592,1,0]
-val_224 hdfs://### HDFS PATH ### 174 [1,2,4,8589934592,1,0]
-val_224 hdfs://### HDFS PATH ### 2892 [1,2,4,8589934592,1,0]
-val_226 hdfs://### HDFS PATH ### 3048 [1,2,4,8589934592,1,0]
-val_228 hdfs://### HDFS PATH ### 3458 [1,2,4,8589934592,1,0]
-val_229 hdfs://### HDFS PATH ### 3202 [1,2,4,8589934592,1,0]
-val_229 hdfs://### HDFS PATH ### 3956 [1,2,4,8589934592,1,0]
-val_230 hdfs://### HDFS PATH ### 1730 [1,2,4,8589934592,1,0]
-val_230 hdfs://### HDFS PATH ### 1936 [1,2,4,8589934592,1,0]
-val_230 hdfs://### HDFS PATH ### 2260 [1,2,4,8589934592,1,0]
-val_230 hdfs://### HDFS PATH ### 3580 [1,2,4,8589934592,1,0]
-val_230 hdfs://### HDFS PATH ### 4914 [1,2,4,8589934592,1,0]
-val_233 hdfs://### HDFS PATH ### 3214 [1,2,4,8589934592,1,0]
-val_233 hdfs://### HDFS PATH ### 5140 [1,2,4,8589934592,1,0]
-val_235 hdfs://### HDFS PATH ### 4046 [1,2,4,8589934592,1,0]
-val_237 hdfs://### HDFS PATH ### 4722 [1,2,4,8589934592,1,0]
-val_237 hdfs://### HDFS PATH ### 574 [1,2,4,8589934592,1,0]
-val_238 hdfs://### HDFS PATH ### 0 [1,2,4,8589934592,1,0]
-val_238 hdfs://### HDFS PATH ### 2746 [1,2,4,8589934592,1,0]
-val_239 hdfs://### HDFS PATH ### 1496 [1,2,4,8589934592,1,0]
-val_239 hdfs://### HDFS PATH ### 3722 [1,2,4,8589934592,1,0]
-val_24 hdfs://### HDFS PATH ### 1972 [1,2,4,8589934592,1,0]
-val_24 hdfs://### HDFS PATH ### 4594 [1,2,4,8589934592,1,0]
-val_241 hdfs://### HDFS PATH ### 1662 [1,2,4,8589934592,1,0]
-val_242 hdfs://### HDFS PATH ### 2940 [1,2,4,8589934592,1,0]
-val_242 hdfs://### HDFS PATH ### 3012 [1,2,4,8589934592,1,0]
-val_244 hdfs://### HDFS PATH ### 3872 [1,2,4,8589934592,1,0]
-val_247 hdfs://### HDFS PATH ### 718 [1,2,4,8589934592,1,0]
-val_248 hdfs://### HDFS PATH ### 4758 [1,2,4,8589934592,1,0]
-val_249 hdfs://### HDFS PATH ### 5034 [1,2,4,8589934592,1,0]
-val_252 hdfs://### HDFS PATH ### 454 [1,2,4,8589934592,1,0]
-val_255 hdfs://### HDFS PATH ### 4616 [1,2,4,8589934592,1,0]
-val_255 hdfs://### HDFS PATH ### 68 [1,2,4,8589934592,1,0]
-val_256 hdfs://### HDFS PATH ### 3770 [1,2,4,8589934592,1,0]
-val_256 hdfs://### HDFS PATH ### 5272 [1,2,4,8589934592,1,0]
-val_257 hdfs://### HDFS PATH ### 4208 [1,2,4,8589934592,1,0]
-val_258 hdfs://### HDFS PATH ### 4292 [1,2,4,8589934592,1,0]
-val_26 hdfs://### HDFS PATH ### 2226 [1,2,4,8589934592,1,0]
-val_26 hdfs://### HDFS PATH ### 5284 [1,2,4,8589934592,1,0]
-val_260 hdfs://### HDFS PATH ### 1764 [1,2,4,8589934592,1,0]
-val_262 hdfs://### HDFS PATH ### 4326 [1,2,4,8589934592,1,0]
-val_263 hdfs://### HDFS PATH ### 3782 [1,2,4,8589934592,1,0]
-val_265 hdfs://### HDFS PATH ### 114 [1,2,4,8589934592,1,0]
-val_265 hdfs://### HDFS PATH ### 5046 [1,2,4,8589934592,1,0]
-val_266 hdfs://### HDFS PATH ### 814 [1,2,4,8589934592,1,0]
-val_27 hdfs://### HDFS PATH ### 34 [1,2,4,8589934592,1,0]
-val_272 hdfs://### HDFS PATH ### 1836 [1,2,4,8589934592,1,0]
-val_272 hdfs://### HDFS PATH ### 2976 [1,2,4,8589934592,1,0]
-val_273 hdfs://### HDFS PATH ### 162 [1,2,4,8589934592,1,0]
-val_273 hdfs://### HDFS PATH ### 2868 [1,2,4,8589934592,1,0]
-val_273 hdfs://### HDFS PATH ### 5524 [1,2,4,8589934592,1,0]
-val_274 hdfs://### HDFS PATH ### 3698 [1,2,4,8589934592,1,0]
-val_275 hdfs://### HDFS PATH ### 1638 [1,2,4,8589934592,1,0]
-val_277 hdfs://### HDFS PATH ### 1260 [1,2,4,8589934592,1,0]
-val_277 hdfs://### HDFS PATH ### 2856 [1,2,4,8589934592,1,0]
-val_277 hdfs://### HDFS PATH ### 362 [1,2,4,8589934592,1,0]
-val_277 hdfs://### HDFS PATH ### 4902 [1,2,4,8589934592,1,0]
-val_278 hdfs://### HDFS PATH ### 1544 [1,2,4,8589934592,1,0]
-val_278 hdfs://### HDFS PATH ### 80 [1,2,4,8589934592,1,0]
-val_28 hdfs://### HDFS PATH ### 5616 [1,2,4,8589934592,1,0]
-val_280 hdfs://### HDFS PATH ### 1226 [1,2,4,8589934592,1,0]
-val_280 hdfs://### HDFS PATH ### 3992 [1,2,4,8589934592,1,0]
-val_281 hdfs://### HDFS PATH ### 350 [1,2,4,8589934592,1,0]
-val_281 hdfs://### HDFS PATH ### 5548 [1,2,4,8589934592,1,0]
-val_282 hdfs://### HDFS PATH ### 2468 [1,2,4,8589934592,1,0]
-val_282 hdfs://### HDFS PATH ### 2722 [1,2,4,8589934592,1,0]
-val_283 hdfs://### HDFS PATH ### 4022 [1,2,4,8589934592,1,0]
-val_284 hdfs://### HDFS PATH ### 1708 [1,2,4,8589934592,1,0]
-val_285 hdfs://### HDFS PATH ### 5478 [1,2,4,8589934592,1,0]
-val_286 hdfs://### HDFS PATH ### 1404 [1,2,4,8589934592,1,0]
-val_287 hdfs://### HDFS PATH ### 490 [1,2,4,8589934592,1,0]
-val_288 hdfs://### HDFS PATH ### 2422 [1,2,4,8589934592,1,0]
-val_288 hdfs://### HDFS PATH ### 3840 [1,2,4,8589934592,1,0]
-val_289 hdfs://### HDFS PATH ### 1568 [1,2,4,8589934592,1,0]
-val_291 hdfs://### HDFS PATH ### 4582 [1,2,4,8589934592,1,0]
-val_292 hdfs://### HDFS PATH ### 466 [1,2,4,8589934592,1,0]
-val_296 hdfs://### HDFS PATH ### 3626 [1,2,4,8589934592,1,0]
-val_298 hdfs://### HDFS PATH ### 2168 [1,2,4,8589934592,1,0]
-val_298 hdfs://### HDFS PATH ### 4456 [1,2,4,8589934592,1,0]
-val_298 hdfs://### HDFS PATH ### 5386 [1,2,4,8589934592,1,0]
-val_30 hdfs://### HDFS PATH ### 3494 [1,2,4,8589934592,1,0]
-val_302 hdfs://### HDFS PATH ### 1034 [1,2,4,8589934592,1,0]
-val_305 hdfs://### HDFS PATH ### 4782 [1,2,4,8589934592,1,0]
-val_306 hdfs://### HDFS PATH ### 2880 [1,2,4,8589934592,1,0]
-val_307 hdfs://### HDFS PATH ### 2812 [1,2,4,8589934592,1,0]
-val_307 hdfs://### HDFS PATH ### 5672 [1,2,4,8589934592,1,0]
-val_308 hdfs://### HDFS PATH ### 2388 [1,2,4,8589934592,1,0]
-val_309 hdfs://### HDFS PATH ### 2904 [1,2,4,8589934592,1,0]
-val_309 hdfs://### HDFS PATH ### 790 [1,2,4,8589934592,1,0]
-val_310 hdfs://### HDFS PATH ### 4962 [1,2,4,8589934592,1,0]
-val_311 hdfs://### HDFS PATH ### 1000 [1,2,4,8589934592,1,0]
-val_311 hdfs://### HDFS PATH ### 1626 [1,2,4,8589934592,1,0]
-val_311 hdfs://### HDFS PATH ### 22 [1,2,4,8589934592,1,0]
-val_315 hdfs://### HDFS PATH ### 5594 [1,2,4,8589934592,1,0]
-val_316 hdfs://### HDFS PATH ### 1012 [1,2,4,8589934592,1,0]
-val_316 hdfs://### HDFS PATH ### 2576 [1,2,4,8589934592,1,0]
-val_316 hdfs://### HDFS PATH ### 3944 [1,2,4,8589934592,1,0]
-val_317 hdfs://### HDFS PATH ### 3104 [1,2,4,8589934592,1,0]
-val_317 hdfs://### HDFS PATH ### 4974 [1,2,4,8589934592,1,0]
-val_318 hdfs://### HDFS PATH ### 1602 [1,2,4,8589934592,1,0]
-val_318 hdfs://### HDFS PATH ### 2504 [1,2,4,8589934592,1,0]
-val_318 hdfs://### HDFS PATH ### 2516 [1,2,4,8589934592,1,0]
-val_321 hdfs://### HDFS PATH ### 3308 [1,2,4,8589934592,1,0]
-val_321 hdfs://### HDFS PATH ### 4090 [1,2,4,8589934592,1,0]
-val_322 hdfs://### HDFS PATH ### 2096 [1,2,4,8589934592,1,0]
-val_322 hdfs://### HDFS PATH ### 3250 [1,2,4,8589934592,1,0]
-val_323 hdfs://### HDFS PATH ### 4878 [1,2,4,8589934592,1,0]
-val_325 hdfs://### HDFS PATH ### 4890 [1,2,4,8589934592,1,0]
-val_325 hdfs://### HDFS PATH ### 862 [1,2,4,8589934592,1,0]
-val_327 hdfs://### HDFS PATH ### 2248 [1,2,4,8589934592,1,0]
-val_327 hdfs://### HDFS PATH ### 2928 [1,2,4,8589934592,1,0]
-val_327 hdfs://### HDFS PATH ### 338 [1,2,4,8589934592,1,0]
-val_33 hdfs://### HDFS PATH ### 3592 [1,2,4,8589934592,1,0]
-val_331 hdfs://### HDFS PATH ### 2988 [1,2,4,8589934592,1,0]
-val_331 hdfs://### HDFS PATH ### 4034 [1,2,4,8589934592,1,0]
-val_332 hdfs://### HDFS PATH ### 1614 [1,2,4,8589934592,1,0]
-val_333 hdfs://### HDFS PATH ### 1684 [1,2,4,8589934592,1,0]
-val_333 hdfs://### HDFS PATH ### 4986 [1,2,4,8589934592,1,0]
-val_335 hdfs://### HDFS PATH ### 4102 [1,2,4,8589934592,1,0]
-val_336 hdfs://### HDFS PATH ### 3148 [1,2,4,8589934592,1,0]
-val_338 hdfs://### HDFS PATH ### 526 [1,2,4,8589934592,1,0]
-val_339 hdfs://### HDFS PATH ### 956 [1,2,4,8589934592,1,0]
-val_34 hdfs://### HDFS PATH ### 3192 [1,2,4,8589934592,1,0]
-val_341 hdfs://### HDFS PATH ### 5406 [1,2,4,8589934592,1,0]
-val_342 hdfs://### HDFS PATH ### 3558 [1,2,4,8589934592,1,0]
-val_342 hdfs://### HDFS PATH ### 838 [1,2,4,8589934592,1,0]
-val_344 hdfs://### HDFS PATH ### 3674 [1,2,4,8589934592,1,0]
-val_344 hdfs://### HDFS PATH ### 5560 [1,2,4,8589934592,1,0]
-val_345 hdfs://### HDFS PATH ### 1082 [1,2,4,8589934592,1,0]
-val_348 hdfs://### HDFS PATH ### 1882 [1,2,4,8589934592,1,0]
-val_348 hdfs://### HDFS PATH ### 1960 [1,2,4,8589934592,1,0]
-val_348 hdfs://### HDFS PATH ### 4338 [1,2,4,8589934592,1,0]
-val_348 hdfs://### HDFS PATH ### 5490 [1,2,4,8589934592,1,0]
-val_348 hdfs://### HDFS PATH ### 5660 [1,2,4,8589934592,1,0]
-val_35 hdfs://### HDFS PATH ### 1238 [1,2,4,8589934592,1,0]
-val_35 hdfs://### HDFS PATH ### 3138 [1,2,4,8589934592,1,0]
-val_35 hdfs://### HDFS PATH ### 4012 [1,2,4,8589934592,1,0]
-val_351 hdfs://### HDFS PATH ### 4604 [1,2,4,8589934592,1,0]
-val_353 hdfs://### HDFS PATH ### 1812 [1,2,4,8589934592,1,0]
-val_353 hdfs://### HDFS PATH ### 5092 [1,2,4,8589934592,1,0]
-val_356 hdfs://### HDFS PATH ### 1284 [1,2,4,8589934592,1,0]
-val_360 hdfs://### HDFS PATH ### 4746 [1,2,4,8589934592,1,0]
-val_362 hdfs://### HDFS PATH ### 5454 [1,2,4,8589934592,1,0]
-val_364 hdfs://### HDFS PATH ### 2662 [1,2,4,8589934592,1,0]
-val_365 hdfs://### HDFS PATH ### 802 [1,2,4,8589934592,1,0]
-val_366 hdfs://### HDFS PATH ### 4138 [1,2,4,8589934592,1,0]
-val_367 hdfs://### HDFS PATH ### 3662 [1,2,4,8589934592,1,0]
-val_367 hdfs://### HDFS PATH ### 850 [1,2,4,8589934592,1,0]
-val_368 hdfs://### HDFS PATH ### 3602 [1,2,4,8589934592,1,0]
-val_369 hdfs://### HDFS PATH ### 186 [1,2,4,8589934592,1,0]
-val_369 hdfs://### HDFS PATH ### 2564 [1,2,4,8589934592,1,0]
-val_369 hdfs://### HDFS PATH ### 2952 [1,2,4,8589934592,1,0]
-val_37 hdfs://### HDFS PATH ### 328 [1,2,4,8589934592,1,0]
-val_37 hdfs://### HDFS PATH ### 5626 [1,2,4,8589934592,1,0]
-val_373 hdfs://### HDFS PATH ### 1824 [1,2,4,8589934592,1,0]
-val_374 hdfs://### HDFS PATH ### 268 [1,2,4,8589934592,1,0]
-val_375 hdfs://### HDFS PATH ### 5212 [1,2,4,8589934592,1,0]
-val_377 hdfs://### HDFS PATH ### 766 [1,2,4,8589934592,1,0]
-val_378 hdfs://### HDFS PATH ### 1152 [1,2,4,8589934592,1,0]
-val_379 hdfs://### HDFS PATH ### 5328 [1,2,4,8589934592,1,0]
-val_382 hdfs://### HDFS PATH ### 1320 [1,2,4,8589934592,1,0]
-val_382 hdfs://### HDFS PATH ### 4528 [1,2,4,8589934592,1,0]
-val_384 hdfs://### HDFS PATH ### 1788 [1,2,4,8589934592,1,0]
-val_384 hdfs://### HDFS PATH ### 5260 [1,2,4,8589934592,1,0]
-val_384 hdfs://### HDFS PATH ### 5316 [1,2,4,8589934592,1,0]
-val_386 hdfs://### HDFS PATH ### 1356 [1,2,4,8589934592,1,0]
-val_389 hdfs://### HDFS PATH ### 2916 [1,2,4,8589934592,1,0]
-val_392 hdfs://### HDFS PATH ### 2964 [1,2,4,8589934592,1,0]
-val_393 hdfs://### HDFS PATH ### 2132 [1,2,4,8589934592,1,0]
-val_394 hdfs://### HDFS PATH ### 562 [1,2,4,8589934592,1,0]
-val_395 hdfs://### HDFS PATH ### 2710 [1,2,4,8589934592,1,0]
-val_395 hdfs://### HDFS PATH ### 3116 [1,2,4,8589934592,1,0]
-val_396 hdfs://### HDFS PATH ### 3092 [1,2,4,8589934592,1,0]
-val_396 hdfs://### HDFS PATH ### 4372 [1,2,4,8589934592,1,0]
-val_396 hdfs://### HDFS PATH ### 706 [1,2,4,8589934592,1,0]
-val_397 hdfs://### HDFS PATH ### 4558 [1,2,4,8589934592,1,0]
-val_397 hdfs://### HDFS PATH ### 778 [1,2,4,8589934592,1,0]
-val_399 hdfs://### HDFS PATH ### 1296 [1,2,4,8589934592,1,0]
-val_399 hdfs://### HDFS PATH ### 694 [1,2,4,8589934592,1,0]
-val_4 hdfs://### HDFS PATH ### 1218 [1,2,4,8589934592,1,0]
-val_400 hdfs://### HDFS PATH ### 5778 [1,2,4,8589934592,1,0]
-val_401 hdfs://### HDFS PATH ### 138 [1,2,4,8589934592,1,0]
-val_401 hdfs://### HDFS PATH ### 3000 [1,2,4,8589934592,1,0]
-val_401 hdfs://### HDFS PATH ### 3828 [1,2,4,8589934592,1,0]
-val_401 hdfs://### HDFS PATH ### 4268 [1,2,4,8589934592,1,0]
-val_401 hdfs://### HDFS PATH ### 5224 [1,2,4,8589934592,1,0]
-val_402 hdfs://### HDFS PATH ### 3080 [1,2,4,8589934592,1,0]
-val_403 hdfs://### HDFS PATH ### 406 [1,2,4,8589934592,1,0]
-val_403 hdfs://### HDFS PATH ### 4162 [1,2,4,8589934592,1,0]
-val_403 hdfs://### HDFS PATH ### 5766 [1,2,4,8589934592,1,0]
-val_404 hdfs://### HDFS PATH ### 1776 [1,2,4,8589934592,1,0]
-val_404 hdfs://### HDFS PATH ### 2318 [1,2,4,8589934592,1,0]
-val_406 hdfs://### HDFS PATH ### 244 [1,2,4,8589934592,1,0]
-val_406 hdfs://### HDFS PATH ### 4220 [1,2,4,8589934592,1,0]
-val_406 hdfs://### HDFS PATH ### 4256 [1,2,4,8589934592,1,0]
-val_406 hdfs://### HDFS PATH ### 5152 [1,2,4,8589934592,1,0]
-val_407 hdfs://### HDFS PATH ### 5248 [1,2,4,8589934592,1,0]
-val_409 hdfs://### HDFS PATH ### 2528 [1,2,4,8589934592,1,0]
-val_409 hdfs://### HDFS PATH ### 4232 [1,2,4,8589934592,1,0]
-val_409 hdfs://### HDFS PATH ### 56 [1,2,4,8589934592,1,0]
-val_41 hdfs://### HDFS PATH ### 3388 [1,2,4,8589934592,1,0]
-val_411 hdfs://### HDFS PATH ### 1924 [1,2,4,8589934592,1,0]
-val_413 hdfs://### HDFS PATH ### 2600 [1,2,4,8589934592,1,0]
-val_413 hdfs://### HDFS PATH ### 610 [1,2,4,8589934592,1,0]
-val_414 hdfs://### HDFS PATH ### 4686 [1,2,4,8589934592,1,0]
-val_414 hdfs://### HDFS PATH ### 5696 [1,2,4,8589934592,1,0]
-val_417 hdfs://### HDFS PATH ### 430 [1,2,4,8589934592,1,0]
-val_417 hdfs://### HDFS PATH ### 4794 [1,2,4,8589934592,1,0]
-val_417 hdfs://### HDFS PATH ### 730 [1,2,4,8589934592,1,0]
-val_418 hdfs://### HDFS PATH ### 2204 [1,2,4,8589934592,1,0]
-val_419 hdfs://### HDFS PATH ### 2758 [1,2,4,8589934592,1,0]
-val_42 hdfs://### HDFS PATH ### 2030 [1,2,4,8589934592,1,0]
-val_42 hdfs://### HDFS PATH ### 3298 [1,2,4,8589934592,1,0]
-val_421 hdfs://### HDFS PATH ### 5236 [1,2,4,8589934592,1,0]
-val_424 hdfs://### HDFS PATH ### 4350 [1,2,4,8589934592,1,0]
-val_424 hdfs://### HDFS PATH ### 4504 [1,2,4,8589934592,1,0]
-val_427 hdfs://### HDFS PATH ### 1248 [1,2,4,8589934592,1,0]
-val_429 hdfs://### HDFS PATH ### 256 [1,2,4,8589934592,1,0]
-val_429 hdfs://### HDFS PATH ### 4842 [1,2,4,8589934592,1,0]
-val_43 hdfs://### HDFS PATH ### 2330 [1,2,4,8589934592,1,0]
-val_430 hdfs://### HDFS PATH ### 1532 [1,2,4,8589934592,1,0]
-val_430 hdfs://### HDFS PATH ### 3320 [1,2,4,8589934592,1,0]
-val_430 hdfs://### HDFS PATH ### 442 [1,2,4,8589934592,1,0]
-val_431 hdfs://### HDFS PATH ### 1994 [1,2,4,8589934592,1,0]
-val_431 hdfs://### HDFS PATH ### 4420 [1,2,4,8589934592,1,0]
-val_431 hdfs://### HDFS PATH ### 4480 [1,2,4,8589934592,1,0]
-val_432 hdfs://### HDFS PATH ### 3920 [1,2,4,8589934592,1,0]
-val_435 hdfs://### HDFS PATH ### 2834 [1,2,4,8589934592,1,0]
-val_436 hdfs://### HDFS PATH ### 2340 [1,2,4,8589934592,1,0]
-val_437 hdfs://### HDFS PATH ### 1368 [1,2,4,8589934592,1,0]
-val_438 hdfs://### HDFS PATH ### 1070 [1,2,4,8589934592,1,0]
-val_438 hdfs://### HDFS PATH ### 3884 [1,2,4,8589934592,1,0]
-val_438 hdfs://### HDFS PATH ### 4662 [1,2,4,8589934592,1,0]
-val_439 hdfs://### HDFS PATH ### 4734 [1,2,4,8589934592,1,0]
-val_439 hdfs://### HDFS PATH ### 826 [1,2,4,8589934592,1,0]
-val_44 hdfs://### HDFS PATH ### 4068 [1,2,4,8589934592,1,0]
-val_443 hdfs://### HDFS PATH ### 4866 [1,2,4,8589934592,1,0]
-val_444 hdfs://### HDFS PATH ### 4818 [1,2,4,8589934592,1,0]
-val_446 hdfs://### HDFS PATH ### 538 [1,2,4,8589934592,1,0]
-val_448 hdfs://### HDFS PATH ### 5636 [1,2,4,8589934592,1,0]
-val_449 hdfs://### HDFS PATH ### 3434 [1,2,4,8589934592,1,0]
-val_452 hdfs://### HDFS PATH ### 3024 [1,2,4,8589934592,1,0]
-val_453 hdfs://### HDFS PATH ### 3482 [1,2,4,8589934592,1,0]
-val_454 hdfs://### HDFS PATH ### 2144 [1,2,4,8589934592,1,0]
-val_454 hdfs://### HDFS PATH ### 4432 [1,2,4,8589934592,1,0]
-val_454 hdfs://### HDFS PATH ### 5200 [1,2,4,8589934592,1,0]
-val_455 hdfs://### HDFS PATH ### 976 [1,2,4,8589934592,1,0]
-val_457 hdfs://### HDFS PATH ### 2446 [1,2,4,8589934592,1,0]
-val_458 hdfs://### HDFS PATH ### 3356 [1,2,4,8589934592,1,0]
-val_458 hdfs://### HDFS PATH ### 5442 [1,2,4,8589934592,1,0]
-val_459 hdfs://### HDFS PATH ### 1450 [1,2,4,8589934592,1,0]
-val_459 hdfs://### HDFS PATH ### 550 [1,2,4,8589934592,1,0]
-val_460 hdfs://### HDFS PATH ### 5010 [1,2,4,8589934592,1,0]
-val_462 hdfs://### HDFS PATH ### 5128 [1,2,4,8589934592,1,0]
-val_462 hdfs://### HDFS PATH ### 5350 [1,2,4,8589934592,1,0]
-val_463 hdfs://### HDFS PATH ### 1982 [1,2,4,8589934592,1,0]
-val_463 hdfs://### HDFS PATH ### 3980 [1,2,4,8589934592,1,0]
-val_466 hdfs://### HDFS PATH ### 1894 [1,2,4,8589934592,1,0]
-val_466 hdfs://### HDFS PATH ### 4126 [1,2,4,8589934592,1,0]
-val_466 hdfs://### HDFS PATH ### 658 [1,2,4,8589934592,1,0]
-val_467 hdfs://### HDFS PATH ### 3908 [1,2,4,8589934592,1,0]
-val_468 hdfs://### HDFS PATH ### 2120 [1,2,4,8589934592,1,0]
-val_468 hdfs://### HDFS PATH ### 2376 [1,2,4,8589934592,1,0]
-val_468 hdfs://### HDFS PATH ### 3526 [1,2,4,8589934592,1,0]
-val_468 hdfs://### HDFS PATH ### 4950 [1,2,4,8589934592,1,0]
-val_469 hdfs://### HDFS PATH ### 1380 [1,2,4,8589934592,1,0]
-val_469 hdfs://### HDFS PATH ### 2364 [1,2,4,8589934592,1,0]
-val_469 hdfs://### HDFS PATH ### 292 [1,2,4,8589934592,1,0]
-val_469 hdfs://### HDFS PATH ### 3968 [1,2,4,8589934592,1,0]
-val_469 hdfs://### HDFS PATH ### 5582 [1,2,4,8589934592,1,0]
-val_47 hdfs://### HDFS PATH ### 1198 [1,2,4,8589934592,1,0]
-val_470 hdfs://### HDFS PATH ### 2540 [1,2,4,8589934592,1,0]
-val_472 hdfs://### HDFS PATH ### 3238 [1,2,4,8589934592,1,0]
-val_475 hdfs://### HDFS PATH ### 898 [1,2,4,8589934592,1,0]
-val_477 hdfs://### HDFS PATH ### 5708 [1,2,4,8589934592,1,0]
-val_478 hdfs://### HDFS PATH ### 4444 [1,2,4,8589934592,1,0]
-val_478 hdfs://### HDFS PATH ### 4926 [1,2,4,8589934592,1,0]
-val_479 hdfs://### HDFS PATH ### 4770 [1,2,4,8589934592,1,0]
-val_480 hdfs://### HDFS PATH ### 3816 [1,2,4,8589934592,1,0]
-val_480 hdfs://### HDFS PATH ### 4570 [1,2,4,8589934592,1,0]
-val_480 hdfs://### HDFS PATH ### 5058 [1,2,4,8589934592,1,0]
-val_481 hdfs://### HDFS PATH ### 2434 [1,2,4,8589934592,1,0]
-val_482 hdfs://### HDFS PATH ### 586 [1,2,4,8589934592,1,0]
-val_483 hdfs://### HDFS PATH ### 4174 [1,2,4,8589934592,1,0]
-val_484 hdfs://### HDFS PATH ### 102 [1,2,4,8589934592,1,0]
-val_485 hdfs://### HDFS PATH ### 3734 [1,2,4,8589934592,1,0]
-val_487 hdfs://### HDFS PATH ### 3804 [1,2,4,8589934592,1,0]
-val_489 hdfs://### HDFS PATH ### 1128 [1,2,4,8589934592,1,0]
-val_489 hdfs://### HDFS PATH ### 1800 [1,2,4,8589934592,1,0]
-val_489 hdfs://### HDFS PATH ### 3344 [1,2,4,8589934592,1,0]
-val_489 hdfs://### HDFS PATH ### 742 [1,2,4,8589934592,1,0]
-val_490 hdfs://### HDFS PATH ### 2640 [1,2,4,8589934592,1,0]
-val_491 hdfs://### HDFS PATH ### 4710 [1,2,4,8589934592,1,0]
-val_492 hdfs://### HDFS PATH ### 3410 [1,2,4,8589934592,1,0]
-val_492 hdfs://### HDFS PATH ### 5362 [1,2,4,8589934592,1,0]
-val_493 hdfs://### HDFS PATH ### 4998 [1,2,4,8589934592,1,0]
-val_494 hdfs://### HDFS PATH ### 622 [1,2,4,8589934592,1,0]
-val_495 hdfs://### HDFS PATH ### 316 [1,2,4,8589934592,1,0]
-val_496 hdfs://### HDFS PATH ### 2076 [1,2,4,8589934592,1,0]
-val_497 hdfs://### HDFS PATH ### 3068 [1,2,4,8589934592,1,0]
-val_498 hdfs://### HDFS PATH ### 1332 [1,2,4,8589934592,1,0]
-val_498 hdfs://### HDFS PATH ### 3262 [1,2,4,8589934592,1,0]
-val_498 hdfs://### HDFS PATH ### 5418 [1,2,4,8589934592,1,0]
-val_5 hdfs://### HDFS PATH ### 3060 [1,2,4,8589934592,1,0]
-val_5 hdfs://### HDFS PATH ### 3864 [1,2,4,8589934592,1,0]
-val_5 hdfs://### HDFS PATH ### 4540 [1,2,4,8589934592,1,0]
-val_51 hdfs://### HDFS PATH ### 1462 [1,2,4,8589934592,1,0]
-val_51 hdfs://### HDFS PATH ### 2308 [1,2,4,8589934592,1,0]
-val_53 hdfs://### HDFS PATH ### 4186 [1,2,4,8589934592,1,0]
-val_54 hdfs://### HDFS PATH ### 1440 [1,2,4,8589934592,1,0]
-val_57 hdfs://### HDFS PATH ### 1024 [1,2,4,8589934592,1,0]
-val_58 hdfs://### HDFS PATH ### 1906 [1,2,4,8589934592,1,0]
-val_58 hdfs://### HDFS PATH ### 3128 [1,2,4,8589934592,1,0]
-val_64 hdfs://### HDFS PATH ### 3516 [1,2,4,8589934592,1,0]
-val_65 hdfs://### HDFS PATH ### 1592 [1,2,4,8589934592,1,0]
-val_66 hdfs://### HDFS PATH ### 198 [1,2,4,8589934592,1,0]
-val_67 hdfs://### HDFS PATH ### 1754 [1,2,4,8589934592,1,0]
-val_67 hdfs://### HDFS PATH ### 5306 [1,2,4,8589934592,1,0]
-val_69 hdfs://### HDFS PATH ### 3570 [1,2,4,8589934592,1,0]
-val_70 hdfs://### HDFS PATH ### 3794 [1,2,4,8589934592,1,0]
-val_70 hdfs://### HDFS PATH ### 4548 [1,2,4,8589934592,1,0]
-val_70 hdfs://### HDFS PATH ### 4640 [1,2,4,8589934592,1,0]
-val_72 hdfs://### HDFS PATH ### 1208 [1,2,4,8589934592,1,0]
-val_72 hdfs://### HDFS PATH ### 2792 [1,2,4,8589934592,1,0]
-val_74 hdfs://### HDFS PATH ### 3548 [1,2,4,8589934592,1,0]
-val_76 hdfs://### HDFS PATH ### 3378 [1,2,4,8589934592,1,0]
-val_76 hdfs://### HDFS PATH ### 3538 [1,2,4,8589934592,1,0]
-val_77 hdfs://### HDFS PATH ### 2622 [1,2,4,8589934592,1,0]
-val_78 hdfs://### HDFS PATH ### 3368 [1,2,4,8589934592,1,0]
-val_8 hdfs://### HDFS PATH ### 1916 [1,2,4,8589934592,1,0]
-val_80 hdfs://### HDFS PATH ### 4058 [1,2,4,8589934592,1,0]
-val_82 hdfs://### HDFS PATH ### 396 [1,2,4,8589934592,1,0]
-val_83 hdfs://### HDFS PATH ### 1674 [1,2,4,8589934592,1,0]
-val_83 hdfs://### HDFS PATH ### 5070 [1,2,4,8589934592,1,0]
-val_84 hdfs://### HDFS PATH ### 1872 [1,2,4,8589934592,1,0]
-val_84 hdfs://### HDFS PATH ### 5606 [1,2,4,8589934592,1,0]
-val_85 hdfs://### HDFS PATH ### 2612 [1,2,4,8589934592,1,0]
-val_86 hdfs://### HDFS PATH ### 12 [1,2,4,8589934592,1,0]
-val_87 hdfs://### HDFS PATH ### 2652 [1,2,4,8589934592,1,0]
-val_9 hdfs://### HDFS PATH ### 5398 [1,2,4,8589934592,1,0]
-val_90 hdfs://### HDFS PATH ### 2802 [1,2,4,8589934592,1,0]
-val_90 hdfs://### HDFS PATH ### 4304 [1,2,4,8589934592,1,0]
-val_90 hdfs://### HDFS PATH ### 5744 [1,2,4,8589934592,1,0]
-val_92 hdfs://### HDFS PATH ### 1176 [1,2,4,8589934592,1,0]
-val_95 hdfs://### HDFS PATH ### 2400 [1,2,4,8589934592,1,0]
-val_95 hdfs://### HDFS PATH ### 3160 [1,2,4,8589934592,1,0]
-val_96 hdfs://### HDFS PATH ### 2216 [1,2,4,8589934592,1,0]
-val_97 hdfs://### HDFS PATH ### 5572 [1,2,4,8589934592,1,0]
-val_97 hdfs://### HDFS PATH ### 5802 [1,2,4,8589934592,1,0]
-val_98 hdfs://### HDFS PATH ### 2458 [1,2,4,8589934592,1,0]
-val_98 hdfs://### HDFS PATH ### 92 [1,2,4,8589934592,1,0]
-PREHOOK: query: EXPLAIN
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-2 depends on stages: Stage-1
- Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src1_index__
- Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean)
- Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: bigint)
- sort order: ++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
- Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: array<bigint>)
- TableScan
- alias: default__src_src2_index__
- Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean)
- Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: bigint)
- sort order: ++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
- Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: array<bigint>)
- Reduce Operator Tree:
- Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: string), _col1 (type: bigint)
- 1 _col0 (type: string), _col1 (type: bigint)
- outputColumnNames: _col0, _col1, _col2, _col5
- Statistics: Num rows: 275 Data size: 25470 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (not EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(_col2,_col5))) (type: boolean)
- Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), _col1 (type: bigint)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
- Group By Operator
- aggregations: collect_set(_col1)
- keys: _col0 (type: string)
- mode: hash
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
- Stage: Stage-2
- Map Reduce
- Map Operator Tree:
- TableScan
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: array<bigint>)
- Reduce Operator Tree:
- Group By Operator
- aggregations: collect_set(VALUE._col0)
- keys: KEY._col0 (type: string)
- mode: mergepartial
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-#### A masked pattern was here ####
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src1_index__
-PREHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src1_index__
-POSTHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: DROP INDEX src1_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src1_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: DROP INDEX src2_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src2_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
[05/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_bitmap_auto.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap_auto.q.out b/ql/src/test/results/clientpositive/index_bitmap_auto.q.out
deleted file mode 100644
index 3d4c394..0000000
--- a/ql/src/test/results/clientpositive/index_bitmap_auto.q.out
+++ /dev/null
@@ -1,1273 +0,0 @@
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-PREHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src2_index ON TABLE src(value) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src2_index__
-PREHOOK: query: ALTER INDEX src1_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src1_index__
-POSTHOOK: query: ALTER INDEX src1_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src1_index__
-POSTHOOK: Lineage: default__src_src1_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src1_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: ALTER INDEX src2_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src2_index__
-POSTHOOK: query: ALTER INDEX src2_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src2_index__
-POSTHOOK: Lineage: default__src_src2_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src2_index__.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: SELECT * FROM default__src_src1_index__
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src1_index__
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT * FROM default__src_src1_index__
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src1_index__
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0 hdfs://### HDFS PATH ### 2088 [1,2,4,8589934592,1,0]
-0 hdfs://### HDFS PATH ### 2632 [1,2,4,8589934592,1,0]
-0 hdfs://### HDFS PATH ### 968 [1,2,4,8589934592,1,0]
-10 hdfs://### HDFS PATH ### 2846 [1,2,4,8589934592,1,0]
-100 hdfs://### HDFS PATH ### 2156 [1,2,4,8589934592,1,0]
-100 hdfs://### HDFS PATH ### 5374 [1,2,4,8589934592,1,0]
-103 hdfs://### HDFS PATH ### 1484 [1,2,4,8589934592,1,0]
-103 hdfs://### HDFS PATH ### 3614 [1,2,4,8589934592,1,0]
-104 hdfs://### HDFS PATH ### 4114 [1,2,4,8589934592,1,0]
-104 hdfs://### HDFS PATH ### 4628 [1,2,4,8589934592,1,0]
-105 hdfs://### HDFS PATH ### 4196 [1,2,4,8589934592,1,0]
-11 hdfs://### HDFS PATH ### 3170 [1,2,4,8589934592,1,0]
-111 hdfs://### HDFS PATH ### 1186 [1,2,4,8589934592,1,0]
-113 hdfs://### HDFS PATH ### 3638 [1,2,4,8589934592,1,0]
-113 hdfs://### HDFS PATH ### 920 [1,2,4,8589934592,1,0]
-114 hdfs://### HDFS PATH ### 4280 [1,2,4,8589934592,1,0]
-116 hdfs://### HDFS PATH ### 3746 [1,2,4,8589934592,1,0]
-118 hdfs://### HDFS PATH ### 2686 [1,2,4,8589934592,1,0]
-118 hdfs://### HDFS PATH ### 2780 [1,2,4,8589934592,1,0]
-119 hdfs://### HDFS PATH ### 2064 [1,2,4,8589934592,1,0]
-119 hdfs://### HDFS PATH ### 3332 [1,2,4,8589934592,1,0]
-119 hdfs://### HDFS PATH ### 4674 [1,2,4,8589934592,1,0]
-12 hdfs://### HDFS PATH ### 1720 [1,2,4,8589934592,1,0]
-12 hdfs://### HDFS PATH ### 4362 [1,2,4,8589934592,1,0]
-120 hdfs://### HDFS PATH ### 2284 [1,2,4,8589934592,1,0]
-120 hdfs://### HDFS PATH ### 4830 [1,2,4,8589934592,1,0]
-125 hdfs://### HDFS PATH ### 1344 [1,2,4,8589934592,1,0]
-125 hdfs://### HDFS PATH ### 4468 [1,2,4,8589934592,1,0]
-126 hdfs://### HDFS PATH ### 5732 [1,2,4,8589934592,1,0]
-128 hdfs://### HDFS PATH ### 208 [1,2,4,8589934592,1,0]
-128 hdfs://### HDFS PATH ### 3896 [1,2,4,8589934592,1,0]
-128 hdfs://### HDFS PATH ### 988 [1,2,4,8589934592,1,0]
-129 hdfs://### HDFS PATH ### 1094 [1,2,4,8589934592,1,0]
-129 hdfs://### HDFS PATH ### 2040 [1,2,4,8589934592,1,0]
-131 hdfs://### HDFS PATH ### 2296 [1,2,4,8589934592,1,0]
-133 hdfs://### HDFS PATH ### 5164 [1,2,4,8589934592,1,0]
-134 hdfs://### HDFS PATH ### 2698 [1,2,4,8589934592,1,0]
-134 hdfs://### HDFS PATH ### 5294 [1,2,4,8589934592,1,0]
-136 hdfs://### HDFS PATH ### 5080 [1,2,4,8589934592,1,0]
-137 hdfs://### HDFS PATH ### 1650 [1,2,4,8589934592,1,0]
-137 hdfs://### HDFS PATH ### 2552 [1,2,4,8589934592,1,0]
-138 hdfs://### HDFS PATH ### 1472 [1,2,4,8589934592,1,0]
-138 hdfs://### HDFS PATH ### 1848 [1,2,4,8589934592,1,0]
-138 hdfs://### HDFS PATH ### 2734 [1,2,4,8589934592,1,0]
-138 hdfs://### HDFS PATH ### 3470 [1,2,4,8589934592,1,0]
-143 hdfs://### HDFS PATH ### 3226 [1,2,4,8589934592,1,0]
-145 hdfs://### HDFS PATH ### 304 [1,2,4,8589934592,1,0]
-146 hdfs://### HDFS PATH ### 232 [1,2,4,8589934592,1,0]
-146 hdfs://### HDFS PATH ### 5430 [1,2,4,8589934592,1,0]
-149 hdfs://### HDFS PATH ### 1058 [1,2,4,8589934592,1,0]
-149 hdfs://### HDFS PATH ### 3422 [1,2,4,8589934592,1,0]
-15 hdfs://### HDFS PATH ### 2770 [1,2,4,8589934592,1,0]
-15 hdfs://### HDFS PATH ### 386 [1,2,4,8589934592,1,0]
-150 hdfs://### HDFS PATH ### 150 [1,2,4,8589934592,1,0]
-152 hdfs://### HDFS PATH ### 280 [1,2,4,8589934592,1,0]
-152 hdfs://### HDFS PATH ### 5648 [1,2,4,8589934592,1,0]
-153 hdfs://### HDFS PATH ### 502 [1,2,4,8589934592,1,0]
-155 hdfs://### HDFS PATH ### 932 [1,2,4,8589934592,1,0]
-156 hdfs://### HDFS PATH ### 2352 [1,2,4,8589934592,1,0]
-157 hdfs://### HDFS PATH ### 1140 [1,2,4,8589934592,1,0]
-158 hdfs://### HDFS PATH ### 2052 [1,2,4,8589934592,1,0]
-160 hdfs://### HDFS PATH ### 3274 [1,2,4,8589934592,1,0]
-162 hdfs://### HDFS PATH ### 754 [1,2,4,8589934592,1,0]
-163 hdfs://### HDFS PATH ### 4650 [1,2,4,8589934592,1,0]
-164 hdfs://### HDFS PATH ### 4408 [1,2,4,8589934592,1,0]
-164 hdfs://### HDFS PATH ### 4492 [1,2,4,8589934592,1,0]
-165 hdfs://### HDFS PATH ### 2236 [1,2,4,8589934592,1,0]
-165 hdfs://### HDFS PATH ### 44 [1,2,4,8589934592,1,0]
-166 hdfs://### HDFS PATH ### 418 [1,2,4,8589934592,1,0]
-167 hdfs://### HDFS PATH ### 3686 [1,2,4,8589934592,1,0]
-167 hdfs://### HDFS PATH ### 5502 [1,2,4,8589934592,1,0]
-167 hdfs://### HDFS PATH ### 874 [1,2,4,8589934592,1,0]
-168 hdfs://### HDFS PATH ### 3180 [1,2,4,8589934592,1,0]
-169 hdfs://### HDFS PATH ### 1308 [1,2,4,8589934592,1,0]
-169 hdfs://### HDFS PATH ### 2588 [1,2,4,8589934592,1,0]
-169 hdfs://### HDFS PATH ### 4854 [1,2,4,8589934592,1,0]
-169 hdfs://### HDFS PATH ### 5754 [1,2,4,8589934592,1,0]
-17 hdfs://### HDFS PATH ### 910 [1,2,4,8589934592,1,0]
-170 hdfs://### HDFS PATH ### 1106 [1,2,4,8589934592,1,0]
-172 hdfs://### HDFS PATH ### 2018 [1,2,4,8589934592,1,0]
-172 hdfs://### HDFS PATH ### 5104 [1,2,4,8589934592,1,0]
-174 hdfs://### HDFS PATH ### 598 [1,2,4,8589934592,1,0]
-174 hdfs://### HDFS PATH ### 682 [1,2,4,8589934592,1,0]
-175 hdfs://### HDFS PATH ### 4150 [1,2,4,8589934592,1,0]
-175 hdfs://### HDFS PATH ### 5176 [1,2,4,8589934592,1,0]
-176 hdfs://### HDFS PATH ### 1428 [1,2,4,8589934592,1,0]
-176 hdfs://### HDFS PATH ### 1556 [1,2,4,8589934592,1,0]
-177 hdfs://### HDFS PATH ### 3036 [1,2,4,8589934592,1,0]
-178 hdfs://### HDFS PATH ### 4938 [1,2,4,8589934592,1,0]
-179 hdfs://### HDFS PATH ### 2006 [1,2,4,8589934592,1,0]
-179 hdfs://### HDFS PATH ### 2674 [1,2,4,8589934592,1,0]
-18 hdfs://### HDFS PATH ### 5340 [1,2,4,8589934592,1,0]
-18 hdfs://### HDFS PATH ### 5514 [1,2,4,8589934592,1,0]
-180 hdfs://### HDFS PATH ### 1696 [1,2,4,8589934592,1,0]
-181 hdfs://### HDFS PATH ### 1742 [1,2,4,8589934592,1,0]
-183 hdfs://### HDFS PATH ### 5536 [1,2,4,8589934592,1,0]
-186 hdfs://### HDFS PATH ### 5466 [1,2,4,8589934592,1,0]
-187 hdfs://### HDFS PATH ### 1416 [1,2,4,8589934592,1,0]
-187 hdfs://### HDFS PATH ### 2492 [1,2,4,8589934592,1,0]
-187 hdfs://### HDFS PATH ### 4516 [1,2,4,8589934592,1,0]
-189 hdfs://### HDFS PATH ### 5188 [1,2,4,8589934592,1,0]
-19 hdfs://### HDFS PATH ### 2824 [1,2,4,8589934592,1,0]
-190 hdfs://### HDFS PATH ### 4244 [1,2,4,8589934592,1,0]
-191 hdfs://### HDFS PATH ### 2192 [1,2,4,8589934592,1,0]
-191 hdfs://### HDFS PATH ### 3852 [1,2,4,8589934592,1,0]
-192 hdfs://### HDFS PATH ### 1392 [1,2,4,8589934592,1,0]
-193 hdfs://### HDFS PATH ### 126 [1,2,4,8589934592,1,0]
-193 hdfs://### HDFS PATH ### 4078 [1,2,4,8589934592,1,0]
-193 hdfs://### HDFS PATH ### 514 [1,2,4,8589934592,1,0]
-194 hdfs://### HDFS PATH ### 5684 [1,2,4,8589934592,1,0]
-195 hdfs://### HDFS PATH ### 3286 [1,2,4,8589934592,1,0]
-195 hdfs://### HDFS PATH ### 886 [1,2,4,8589934592,1,0]
-196 hdfs://### HDFS PATH ### 2410 [1,2,4,8589934592,1,0]
-197 hdfs://### HDFS PATH ### 2108 [1,2,4,8589934592,1,0]
-197 hdfs://### HDFS PATH ### 2480 [1,2,4,8589934592,1,0]
-199 hdfs://### HDFS PATH ### 2180 [1,2,4,8589934592,1,0]
-199 hdfs://### HDFS PATH ### 4806 [1,2,4,8589934592,1,0]
-199 hdfs://### HDFS PATH ### 646 [1,2,4,8589934592,1,0]
-2 hdfs://### HDFS PATH ### 4004 [1,2,4,8589934592,1,0]
-20 hdfs://### HDFS PATH ### 1118 [1,2,4,8589934592,1,0]
-200 hdfs://### HDFS PATH ### 4698 [1,2,4,8589934592,1,0]
-200 hdfs://### HDFS PATH ### 5790 [1,2,4,8589934592,1,0]
-201 hdfs://### HDFS PATH ### 4384 [1,2,4,8589934592,1,0]
-202 hdfs://### HDFS PATH ### 3932 [1,2,4,8589934592,1,0]
-203 hdfs://### HDFS PATH ### 4314 [1,2,4,8589934592,1,0]
-203 hdfs://### HDFS PATH ### 944 [1,2,4,8589934592,1,0]
-205 hdfs://### HDFS PATH ### 1046 [1,2,4,8589934592,1,0]
-205 hdfs://### HDFS PATH ### 2272 [1,2,4,8589934592,1,0]
-207 hdfs://### HDFS PATH ### 5022 [1,2,4,8589934592,1,0]
-207 hdfs://### HDFS PATH ### 634 [1,2,4,8589934592,1,0]
-208 hdfs://### HDFS PATH ### 1272 [1,2,4,8589934592,1,0]
-208 hdfs://### HDFS PATH ### 1948 [1,2,4,8589934592,1,0]
-208 hdfs://### HDFS PATH ### 670 [1,2,4,8589934592,1,0]
-209 hdfs://### HDFS PATH ### 3504 [1,2,4,8589934592,1,0]
-209 hdfs://### HDFS PATH ### 374 [1,2,4,8589934592,1,0]
-213 hdfs://### HDFS PATH ### 1508 [1,2,4,8589934592,1,0]
-213 hdfs://### HDFS PATH ### 220 [1,2,4,8589934592,1,0]
-214 hdfs://### HDFS PATH ### 5116 [1,2,4,8589934592,1,0]
-216 hdfs://### HDFS PATH ### 1520 [1,2,4,8589934592,1,0]
-216 hdfs://### HDFS PATH ### 3650 [1,2,4,8589934592,1,0]
-217 hdfs://### HDFS PATH ### 1860 [1,2,4,8589934592,1,0]
-217 hdfs://### HDFS PATH ### 4396 [1,2,4,8589934592,1,0]
-218 hdfs://### HDFS PATH ### 3446 [1,2,4,8589934592,1,0]
-219 hdfs://### HDFS PATH ### 3710 [1,2,4,8589934592,1,0]
-219 hdfs://### HDFS PATH ### 478 [1,2,4,8589934592,1,0]
-221 hdfs://### HDFS PATH ### 1164 [1,2,4,8589934592,1,0]
-221 hdfs://### HDFS PATH ### 1580 [1,2,4,8589934592,1,0]
-222 hdfs://### HDFS PATH ### 5720 [1,2,4,8589934592,1,0]
-223 hdfs://### HDFS PATH ### 3398 [1,2,4,8589934592,1,0]
-223 hdfs://### HDFS PATH ### 3758 [1,2,4,8589934592,1,0]
-224 hdfs://### HDFS PATH ### 174 [1,2,4,8589934592,1,0]
-224 hdfs://### HDFS PATH ### 2892 [1,2,4,8589934592,1,0]
-226 hdfs://### HDFS PATH ### 3048 [1,2,4,8589934592,1,0]
-228 hdfs://### HDFS PATH ### 3458 [1,2,4,8589934592,1,0]
-229 hdfs://### HDFS PATH ### 3202 [1,2,4,8589934592,1,0]
-229 hdfs://### HDFS PATH ### 3956 [1,2,4,8589934592,1,0]
-230 hdfs://### HDFS PATH ### 1730 [1,2,4,8589934592,1,0]
-230 hdfs://### HDFS PATH ### 1936 [1,2,4,8589934592,1,0]
-230 hdfs://### HDFS PATH ### 2260 [1,2,4,8589934592,1,0]
-230 hdfs://### HDFS PATH ### 3580 [1,2,4,8589934592,1,0]
-230 hdfs://### HDFS PATH ### 4914 [1,2,4,8589934592,1,0]
-233 hdfs://### HDFS PATH ### 3214 [1,2,4,8589934592,1,0]
-233 hdfs://### HDFS PATH ### 5140 [1,2,4,8589934592,1,0]
-235 hdfs://### HDFS PATH ### 4046 [1,2,4,8589934592,1,0]
-237 hdfs://### HDFS PATH ### 4722 [1,2,4,8589934592,1,0]
-237 hdfs://### HDFS PATH ### 574 [1,2,4,8589934592,1,0]
-238 hdfs://### HDFS PATH ### 0 [1,2,4,8589934592,1,0]
-238 hdfs://### HDFS PATH ### 2746 [1,2,4,8589934592,1,0]
-239 hdfs://### HDFS PATH ### 1496 [1,2,4,8589934592,1,0]
-239 hdfs://### HDFS PATH ### 3722 [1,2,4,8589934592,1,0]
-24 hdfs://### HDFS PATH ### 1972 [1,2,4,8589934592,1,0]
-24 hdfs://### HDFS PATH ### 4594 [1,2,4,8589934592,1,0]
-241 hdfs://### HDFS PATH ### 1662 [1,2,4,8589934592,1,0]
-242 hdfs://### HDFS PATH ### 2940 [1,2,4,8589934592,1,0]
-242 hdfs://### HDFS PATH ### 3012 [1,2,4,8589934592,1,0]
-244 hdfs://### HDFS PATH ### 3872 [1,2,4,8589934592,1,0]
-247 hdfs://### HDFS PATH ### 718 [1,2,4,8589934592,1,0]
-248 hdfs://### HDFS PATH ### 4758 [1,2,4,8589934592,1,0]
-249 hdfs://### HDFS PATH ### 5034 [1,2,4,8589934592,1,0]
-252 hdfs://### HDFS PATH ### 454 [1,2,4,8589934592,1,0]
-255 hdfs://### HDFS PATH ### 4616 [1,2,4,8589934592,1,0]
-255 hdfs://### HDFS PATH ### 68 [1,2,4,8589934592,1,0]
-256 hdfs://### HDFS PATH ### 3770 [1,2,4,8589934592,1,0]
-256 hdfs://### HDFS PATH ### 5272 [1,2,4,8589934592,1,0]
-257 hdfs://### HDFS PATH ### 4208 [1,2,4,8589934592,1,0]
-258 hdfs://### HDFS PATH ### 4292 [1,2,4,8589934592,1,0]
-26 hdfs://### HDFS PATH ### 2226 [1,2,4,8589934592,1,0]
-26 hdfs://### HDFS PATH ### 5284 [1,2,4,8589934592,1,0]
-260 hdfs://### HDFS PATH ### 1764 [1,2,4,8589934592,1,0]
-262 hdfs://### HDFS PATH ### 4326 [1,2,4,8589934592,1,0]
-263 hdfs://### HDFS PATH ### 3782 [1,2,4,8589934592,1,0]
-265 hdfs://### HDFS PATH ### 114 [1,2,4,8589934592,1,0]
-265 hdfs://### HDFS PATH ### 5046 [1,2,4,8589934592,1,0]
-266 hdfs://### HDFS PATH ### 814 [1,2,4,8589934592,1,0]
-27 hdfs://### HDFS PATH ### 34 [1,2,4,8589934592,1,0]
-272 hdfs://### HDFS PATH ### 1836 [1,2,4,8589934592,1,0]
-272 hdfs://### HDFS PATH ### 2976 [1,2,4,8589934592,1,0]
-273 hdfs://### HDFS PATH ### 162 [1,2,4,8589934592,1,0]
-273 hdfs://### HDFS PATH ### 2868 [1,2,4,8589934592,1,0]
-273 hdfs://### HDFS PATH ### 5524 [1,2,4,8589934592,1,0]
-274 hdfs://### HDFS PATH ### 3698 [1,2,4,8589934592,1,0]
-275 hdfs://### HDFS PATH ### 1638 [1,2,4,8589934592,1,0]
-277 hdfs://### HDFS PATH ### 1260 [1,2,4,8589934592,1,0]
-277 hdfs://### HDFS PATH ### 2856 [1,2,4,8589934592,1,0]
-277 hdfs://### HDFS PATH ### 362 [1,2,4,8589934592,1,0]
-277 hdfs://### HDFS PATH ### 4902 [1,2,4,8589934592,1,0]
-278 hdfs://### HDFS PATH ### 1544 [1,2,4,8589934592,1,0]
-278 hdfs://### HDFS PATH ### 80 [1,2,4,8589934592,1,0]
-28 hdfs://### HDFS PATH ### 5616 [1,2,4,8589934592,1,0]
-280 hdfs://### HDFS PATH ### 1226 [1,2,4,8589934592,1,0]
-280 hdfs://### HDFS PATH ### 3992 [1,2,4,8589934592,1,0]
-281 hdfs://### HDFS PATH ### 350 [1,2,4,8589934592,1,0]
-281 hdfs://### HDFS PATH ### 5548 [1,2,4,8589934592,1,0]
-282 hdfs://### HDFS PATH ### 2468 [1,2,4,8589934592,1,0]
-282 hdfs://### HDFS PATH ### 2722 [1,2,4,8589934592,1,0]
-283 hdfs://### HDFS PATH ### 4022 [1,2,4,8589934592,1,0]
-284 hdfs://### HDFS PATH ### 1708 [1,2,4,8589934592,1,0]
-285 hdfs://### HDFS PATH ### 5478 [1,2,4,8589934592,1,0]
-286 hdfs://### HDFS PATH ### 1404 [1,2,4,8589934592,1,0]
-287 hdfs://### HDFS PATH ### 490 [1,2,4,8589934592,1,0]
-288 hdfs://### HDFS PATH ### 2422 [1,2,4,8589934592,1,0]
-288 hdfs://### HDFS PATH ### 3840 [1,2,4,8589934592,1,0]
-289 hdfs://### HDFS PATH ### 1568 [1,2,4,8589934592,1,0]
-291 hdfs://### HDFS PATH ### 4582 [1,2,4,8589934592,1,0]
-292 hdfs://### HDFS PATH ### 466 [1,2,4,8589934592,1,0]
-296 hdfs://### HDFS PATH ### 3626 [1,2,4,8589934592,1,0]
-298 hdfs://### HDFS PATH ### 2168 [1,2,4,8589934592,1,0]
-298 hdfs://### HDFS PATH ### 4456 [1,2,4,8589934592,1,0]
-298 hdfs://### HDFS PATH ### 5386 [1,2,4,8589934592,1,0]
-30 hdfs://### HDFS PATH ### 3494 [1,2,4,8589934592,1,0]
-302 hdfs://### HDFS PATH ### 1034 [1,2,4,8589934592,1,0]
-305 hdfs://### HDFS PATH ### 4782 [1,2,4,8589934592,1,0]
-306 hdfs://### HDFS PATH ### 2880 [1,2,4,8589934592,1,0]
-307 hdfs://### HDFS PATH ### 2812 [1,2,4,8589934592,1,0]
-307 hdfs://### HDFS PATH ### 5672 [1,2,4,8589934592,1,0]
-308 hdfs://### HDFS PATH ### 2388 [1,2,4,8589934592,1,0]
-309 hdfs://### HDFS PATH ### 2904 [1,2,4,8589934592,1,0]
-309 hdfs://### HDFS PATH ### 790 [1,2,4,8589934592,1,0]
-310 hdfs://### HDFS PATH ### 4962 [1,2,4,8589934592,1,0]
-311 hdfs://### HDFS PATH ### 1000 [1,2,4,8589934592,1,0]
-311 hdfs://### HDFS PATH ### 1626 [1,2,4,8589934592,1,0]
-311 hdfs://### HDFS PATH ### 22 [1,2,4,8589934592,1,0]
-315 hdfs://### HDFS PATH ### 5594 [1,2,4,8589934592,1,0]
-316 hdfs://### HDFS PATH ### 1012 [1,2,4,8589934592,1,0]
-316 hdfs://### HDFS PATH ### 2576 [1,2,4,8589934592,1,0]
-316 hdfs://### HDFS PATH ### 3944 [1,2,4,8589934592,1,0]
-317 hdfs://### HDFS PATH ### 3104 [1,2,4,8589934592,1,0]
-317 hdfs://### HDFS PATH ### 4974 [1,2,4,8589934592,1,0]
-318 hdfs://### HDFS PATH ### 1602 [1,2,4,8589934592,1,0]
-318 hdfs://### HDFS PATH ### 2504 [1,2,4,8589934592,1,0]
-318 hdfs://### HDFS PATH ### 2516 [1,2,4,8589934592,1,0]
-321 hdfs://### HDFS PATH ### 3308 [1,2,4,8589934592,1,0]
-321 hdfs://### HDFS PATH ### 4090 [1,2,4,8589934592,1,0]
-322 hdfs://### HDFS PATH ### 2096 [1,2,4,8589934592,1,0]
-322 hdfs://### HDFS PATH ### 3250 [1,2,4,8589934592,1,0]
-323 hdfs://### HDFS PATH ### 4878 [1,2,4,8589934592,1,0]
-325 hdfs://### HDFS PATH ### 4890 [1,2,4,8589934592,1,0]
-325 hdfs://### HDFS PATH ### 862 [1,2,4,8589934592,1,0]
-327 hdfs://### HDFS PATH ### 2248 [1,2,4,8589934592,1,0]
-327 hdfs://### HDFS PATH ### 2928 [1,2,4,8589934592,1,0]
-327 hdfs://### HDFS PATH ### 338 [1,2,4,8589934592,1,0]
-33 hdfs://### HDFS PATH ### 3592 [1,2,4,8589934592,1,0]
-331 hdfs://### HDFS PATH ### 2988 [1,2,4,8589934592,1,0]
-331 hdfs://### HDFS PATH ### 4034 [1,2,4,8589934592,1,0]
-332 hdfs://### HDFS PATH ### 1614 [1,2,4,8589934592,1,0]
-333 hdfs://### HDFS PATH ### 1684 [1,2,4,8589934592,1,0]
-333 hdfs://### HDFS PATH ### 4986 [1,2,4,8589934592,1,0]
-335 hdfs://### HDFS PATH ### 4102 [1,2,4,8589934592,1,0]
-336 hdfs://### HDFS PATH ### 3148 [1,2,4,8589934592,1,0]
-338 hdfs://### HDFS PATH ### 526 [1,2,4,8589934592,1,0]
-339 hdfs://### HDFS PATH ### 956 [1,2,4,8589934592,1,0]
-34 hdfs://### HDFS PATH ### 3192 [1,2,4,8589934592,1,0]
-341 hdfs://### HDFS PATH ### 5406 [1,2,4,8589934592,1,0]
-342 hdfs://### HDFS PATH ### 3558 [1,2,4,8589934592,1,0]
-342 hdfs://### HDFS PATH ### 838 [1,2,4,8589934592,1,0]
-344 hdfs://### HDFS PATH ### 3674 [1,2,4,8589934592,1,0]
-344 hdfs://### HDFS PATH ### 5560 [1,2,4,8589934592,1,0]
-345 hdfs://### HDFS PATH ### 1082 [1,2,4,8589934592,1,0]
-348 hdfs://### HDFS PATH ### 1882 [1,2,4,8589934592,1,0]
-348 hdfs://### HDFS PATH ### 1960 [1,2,4,8589934592,1,0]
-348 hdfs://### HDFS PATH ### 4338 [1,2,4,8589934592,1,0]
-348 hdfs://### HDFS PATH ### 5490 [1,2,4,8589934592,1,0]
-348 hdfs://### HDFS PATH ### 5660 [1,2,4,8589934592,1,0]
-35 hdfs://### HDFS PATH ### 1238 [1,2,4,8589934592,1,0]
-35 hdfs://### HDFS PATH ### 3138 [1,2,4,8589934592,1,0]
-35 hdfs://### HDFS PATH ### 4012 [1,2,4,8589934592,1,0]
-351 hdfs://### HDFS PATH ### 4604 [1,2,4,8589934592,1,0]
-353 hdfs://### HDFS PATH ### 1812 [1,2,4,8589934592,1,0]
-353 hdfs://### HDFS PATH ### 5092 [1,2,4,8589934592,1,0]
-356 hdfs://### HDFS PATH ### 1284 [1,2,4,8589934592,1,0]
-360 hdfs://### HDFS PATH ### 4746 [1,2,4,8589934592,1,0]
-362 hdfs://### HDFS PATH ### 5454 [1,2,4,8589934592,1,0]
-364 hdfs://### HDFS PATH ### 2662 [1,2,4,8589934592,1,0]
-365 hdfs://### HDFS PATH ### 802 [1,2,4,8589934592,1,0]
-366 hdfs://### HDFS PATH ### 4138 [1,2,4,8589934592,1,0]
-367 hdfs://### HDFS PATH ### 3662 [1,2,4,8589934592,1,0]
-367 hdfs://### HDFS PATH ### 850 [1,2,4,8589934592,1,0]
-368 hdfs://### HDFS PATH ### 3602 [1,2,4,8589934592,1,0]
-369 hdfs://### HDFS PATH ### 186 [1,2,4,8589934592,1,0]
-369 hdfs://### HDFS PATH ### 2564 [1,2,4,8589934592,1,0]
-369 hdfs://### HDFS PATH ### 2952 [1,2,4,8589934592,1,0]
-37 hdfs://### HDFS PATH ### 328 [1,2,4,8589934592,1,0]
-37 hdfs://### HDFS PATH ### 5626 [1,2,4,8589934592,1,0]
-373 hdfs://### HDFS PATH ### 1824 [1,2,4,8589934592,1,0]
-374 hdfs://### HDFS PATH ### 268 [1,2,4,8589934592,1,0]
-375 hdfs://### HDFS PATH ### 5212 [1,2,4,8589934592,1,0]
-377 hdfs://### HDFS PATH ### 766 [1,2,4,8589934592,1,0]
-378 hdfs://### HDFS PATH ### 1152 [1,2,4,8589934592,1,0]
-379 hdfs://### HDFS PATH ### 5328 [1,2,4,8589934592,1,0]
-382 hdfs://### HDFS PATH ### 1320 [1,2,4,8589934592,1,0]
-382 hdfs://### HDFS PATH ### 4528 [1,2,4,8589934592,1,0]
-384 hdfs://### HDFS PATH ### 1788 [1,2,4,8589934592,1,0]
-384 hdfs://### HDFS PATH ### 5260 [1,2,4,8589934592,1,0]
-384 hdfs://### HDFS PATH ### 5316 [1,2,4,8589934592,1,0]
-386 hdfs://### HDFS PATH ### 1356 [1,2,4,8589934592,1,0]
-389 hdfs://### HDFS PATH ### 2916 [1,2,4,8589934592,1,0]
-392 hdfs://### HDFS PATH ### 2964 [1,2,4,8589934592,1,0]
-393 hdfs://### HDFS PATH ### 2132 [1,2,4,8589934592,1,0]
-394 hdfs://### HDFS PATH ### 562 [1,2,4,8589934592,1,0]
-395 hdfs://### HDFS PATH ### 2710 [1,2,4,8589934592,1,0]
-395 hdfs://### HDFS PATH ### 3116 [1,2,4,8589934592,1,0]
-396 hdfs://### HDFS PATH ### 3092 [1,2,4,8589934592,1,0]
-396 hdfs://### HDFS PATH ### 4372 [1,2,4,8589934592,1,0]
-396 hdfs://### HDFS PATH ### 706 [1,2,4,8589934592,1,0]
-397 hdfs://### HDFS PATH ### 4558 [1,2,4,8589934592,1,0]
-397 hdfs://### HDFS PATH ### 778 [1,2,4,8589934592,1,0]
-399 hdfs://### HDFS PATH ### 1296 [1,2,4,8589934592,1,0]
-399 hdfs://### HDFS PATH ### 694 [1,2,4,8589934592,1,0]
-4 hdfs://### HDFS PATH ### 1218 [1,2,4,8589934592,1,0]
-400 hdfs://### HDFS PATH ### 5778 [1,2,4,8589934592,1,0]
-401 hdfs://### HDFS PATH ### 138 [1,2,4,8589934592,1,0]
-401 hdfs://### HDFS PATH ### 3000 [1,2,4,8589934592,1,0]
-401 hdfs://### HDFS PATH ### 3828 [1,2,4,8589934592,1,0]
-401 hdfs://### HDFS PATH ### 4268 [1,2,4,8589934592,1,0]
-401 hdfs://### HDFS PATH ### 5224 [1,2,4,8589934592,1,0]
-402 hdfs://### HDFS PATH ### 3080 [1,2,4,8589934592,1,0]
-403 hdfs://### HDFS PATH ### 406 [1,2,4,8589934592,1,0]
-403 hdfs://### HDFS PATH ### 4162 [1,2,4,8589934592,1,0]
-403 hdfs://### HDFS PATH ### 5766 [1,2,4,8589934592,1,0]
-404 hdfs://### HDFS PATH ### 1776 [1,2,4,8589934592,1,0]
-404 hdfs://### HDFS PATH ### 2318 [1,2,4,8589934592,1,0]
-406 hdfs://### HDFS PATH ### 244 [1,2,4,8589934592,1,0]
-406 hdfs://### HDFS PATH ### 4220 [1,2,4,8589934592,1,0]
-406 hdfs://### HDFS PATH ### 4256 [1,2,4,8589934592,1,0]
-406 hdfs://### HDFS PATH ### 5152 [1,2,4,8589934592,1,0]
-407 hdfs://### HDFS PATH ### 5248 [1,2,4,8589934592,1,0]
-409 hdfs://### HDFS PATH ### 2528 [1,2,4,8589934592,1,0]
-409 hdfs://### HDFS PATH ### 4232 [1,2,4,8589934592,1,0]
-409 hdfs://### HDFS PATH ### 56 [1,2,4,8589934592,1,0]
-41 hdfs://### HDFS PATH ### 3388 [1,2,4,8589934592,1,0]
-411 hdfs://### HDFS PATH ### 1924 [1,2,4,8589934592,1,0]
-413 hdfs://### HDFS PATH ### 2600 [1,2,4,8589934592,1,0]
-413 hdfs://### HDFS PATH ### 610 [1,2,4,8589934592,1,0]
-414 hdfs://### HDFS PATH ### 4686 [1,2,4,8589934592,1,0]
-414 hdfs://### HDFS PATH ### 5696 [1,2,4,8589934592,1,0]
-417 hdfs://### HDFS PATH ### 430 [1,2,4,8589934592,1,0]
-417 hdfs://### HDFS PATH ### 4794 [1,2,4,8589934592,1,0]
-417 hdfs://### HDFS PATH ### 730 [1,2,4,8589934592,1,0]
-418 hdfs://### HDFS PATH ### 2204 [1,2,4,8589934592,1,0]
-419 hdfs://### HDFS PATH ### 2758 [1,2,4,8589934592,1,0]
-42 hdfs://### HDFS PATH ### 2030 [1,2,4,8589934592,1,0]
-42 hdfs://### HDFS PATH ### 3298 [1,2,4,8589934592,1,0]
-421 hdfs://### HDFS PATH ### 5236 [1,2,4,8589934592,1,0]
-424 hdfs://### HDFS PATH ### 4350 [1,2,4,8589934592,1,0]
-424 hdfs://### HDFS PATH ### 4504 [1,2,4,8589934592,1,0]
-427 hdfs://### HDFS PATH ### 1248 [1,2,4,8589934592,1,0]
-429 hdfs://### HDFS PATH ### 256 [1,2,4,8589934592,1,0]
-429 hdfs://### HDFS PATH ### 4842 [1,2,4,8589934592,1,0]
-43 hdfs://### HDFS PATH ### 2330 [1,2,4,8589934592,1,0]
-430 hdfs://### HDFS PATH ### 1532 [1,2,4,8589934592,1,0]
-430 hdfs://### HDFS PATH ### 3320 [1,2,4,8589934592,1,0]
-430 hdfs://### HDFS PATH ### 442 [1,2,4,8589934592,1,0]
-431 hdfs://### HDFS PATH ### 1994 [1,2,4,8589934592,1,0]
-431 hdfs://### HDFS PATH ### 4420 [1,2,4,8589934592,1,0]
-431 hdfs://### HDFS PATH ### 4480 [1,2,4,8589934592,1,0]
-432 hdfs://### HDFS PATH ### 3920 [1,2,4,8589934592,1,0]
-435 hdfs://### HDFS PATH ### 2834 [1,2,4,8589934592,1,0]
-436 hdfs://### HDFS PATH ### 2340 [1,2,4,8589934592,1,0]
-437 hdfs://### HDFS PATH ### 1368 [1,2,4,8589934592,1,0]
-438 hdfs://### HDFS PATH ### 1070 [1,2,4,8589934592,1,0]
-438 hdfs://### HDFS PATH ### 3884 [1,2,4,8589934592,1,0]
-438 hdfs://### HDFS PATH ### 4662 [1,2,4,8589934592,1,0]
-439 hdfs://### HDFS PATH ### 4734 [1,2,4,8589934592,1,0]
-439 hdfs://### HDFS PATH ### 826 [1,2,4,8589934592,1,0]
-44 hdfs://### HDFS PATH ### 4068 [1,2,4,8589934592,1,0]
-443 hdfs://### HDFS PATH ### 4866 [1,2,4,8589934592,1,0]
-444 hdfs://### HDFS PATH ### 4818 [1,2,4,8589934592,1,0]
-446 hdfs://### HDFS PATH ### 538 [1,2,4,8589934592,1,0]
-448 hdfs://### HDFS PATH ### 5636 [1,2,4,8589934592,1,0]
-449 hdfs://### HDFS PATH ### 3434 [1,2,4,8589934592,1,0]
-452 hdfs://### HDFS PATH ### 3024 [1,2,4,8589934592,1,0]
-453 hdfs://### HDFS PATH ### 3482 [1,2,4,8589934592,1,0]
-454 hdfs://### HDFS PATH ### 2144 [1,2,4,8589934592,1,0]
-454 hdfs://### HDFS PATH ### 4432 [1,2,4,8589934592,1,0]
-454 hdfs://### HDFS PATH ### 5200 [1,2,4,8589934592,1,0]
-455 hdfs://### HDFS PATH ### 976 [1,2,4,8589934592,1,0]
-457 hdfs://### HDFS PATH ### 2446 [1,2,4,8589934592,1,0]
-458 hdfs://### HDFS PATH ### 3356 [1,2,4,8589934592,1,0]
-458 hdfs://### HDFS PATH ### 5442 [1,2,4,8589934592,1,0]
-459 hdfs://### HDFS PATH ### 1450 [1,2,4,8589934592,1,0]
-459 hdfs://### HDFS PATH ### 550 [1,2,4,8589934592,1,0]
-460 hdfs://### HDFS PATH ### 5010 [1,2,4,8589934592,1,0]
-462 hdfs://### HDFS PATH ### 5128 [1,2,4,8589934592,1,0]
-462 hdfs://### HDFS PATH ### 5350 [1,2,4,8589934592,1,0]
-463 hdfs://### HDFS PATH ### 1982 [1,2,4,8589934592,1,0]
-463 hdfs://### HDFS PATH ### 3980 [1,2,4,8589934592,1,0]
-466 hdfs://### HDFS PATH ### 1894 [1,2,4,8589934592,1,0]
-466 hdfs://### HDFS PATH ### 4126 [1,2,4,8589934592,1,0]
-466 hdfs://### HDFS PATH ### 658 [1,2,4,8589934592,1,0]
-467 hdfs://### HDFS PATH ### 3908 [1,2,4,8589934592,1,0]
-468 hdfs://### HDFS PATH ### 2120 [1,2,4,8589934592,1,0]
-468 hdfs://### HDFS PATH ### 2376 [1,2,4,8589934592,1,0]
-468 hdfs://### HDFS PATH ### 3526 [1,2,4,8589934592,1,0]
-468 hdfs://### HDFS PATH ### 4950 [1,2,4,8589934592,1,0]
-469 hdfs://### HDFS PATH ### 1380 [1,2,4,8589934592,1,0]
-469 hdfs://### HDFS PATH ### 2364 [1,2,4,8589934592,1,0]
-469 hdfs://### HDFS PATH ### 292 [1,2,4,8589934592,1,0]
-469 hdfs://### HDFS PATH ### 3968 [1,2,4,8589934592,1,0]
-469 hdfs://### HDFS PATH ### 5582 [1,2,4,8589934592,1,0]
-47 hdfs://### HDFS PATH ### 1198 [1,2,4,8589934592,1,0]
-470 hdfs://### HDFS PATH ### 2540 [1,2,4,8589934592,1,0]
-472 hdfs://### HDFS PATH ### 3238 [1,2,4,8589934592,1,0]
-475 hdfs://### HDFS PATH ### 898 [1,2,4,8589934592,1,0]
-477 hdfs://### HDFS PATH ### 5708 [1,2,4,8589934592,1,0]
-478 hdfs://### HDFS PATH ### 4444 [1,2,4,8589934592,1,0]
-478 hdfs://### HDFS PATH ### 4926 [1,2,4,8589934592,1,0]
-479 hdfs://### HDFS PATH ### 4770 [1,2,4,8589934592,1,0]
-480 hdfs://### HDFS PATH ### 3816 [1,2,4,8589934592,1,0]
-480 hdfs://### HDFS PATH ### 4570 [1,2,4,8589934592,1,0]
-480 hdfs://### HDFS PATH ### 5058 [1,2,4,8589934592,1,0]
-481 hdfs://### HDFS PATH ### 2434 [1,2,4,8589934592,1,0]
-482 hdfs://### HDFS PATH ### 586 [1,2,4,8589934592,1,0]
-483 hdfs://### HDFS PATH ### 4174 [1,2,4,8589934592,1,0]
-484 hdfs://### HDFS PATH ### 102 [1,2,4,8589934592,1,0]
-485 hdfs://### HDFS PATH ### 3734 [1,2,4,8589934592,1,0]
-487 hdfs://### HDFS PATH ### 3804 [1,2,4,8589934592,1,0]
-489 hdfs://### HDFS PATH ### 1128 [1,2,4,8589934592,1,0]
-489 hdfs://### HDFS PATH ### 1800 [1,2,4,8589934592,1,0]
-489 hdfs://### HDFS PATH ### 3344 [1,2,4,8589934592,1,0]
-489 hdfs://### HDFS PATH ### 742 [1,2,4,8589934592,1,0]
-490 hdfs://### HDFS PATH ### 2640 [1,2,4,8589934592,1,0]
-491 hdfs://### HDFS PATH ### 4710 [1,2,4,8589934592,1,0]
-492 hdfs://### HDFS PATH ### 3410 [1,2,4,8589934592,1,0]
-492 hdfs://### HDFS PATH ### 5362 [1,2,4,8589934592,1,0]
-493 hdfs://### HDFS PATH ### 4998 [1,2,4,8589934592,1,0]
-494 hdfs://### HDFS PATH ### 622 [1,2,4,8589934592,1,0]
-495 hdfs://### HDFS PATH ### 316 [1,2,4,8589934592,1,0]
-496 hdfs://### HDFS PATH ### 2076 [1,2,4,8589934592,1,0]
-497 hdfs://### HDFS PATH ### 3068 [1,2,4,8589934592,1,0]
-498 hdfs://### HDFS PATH ### 1332 [1,2,4,8589934592,1,0]
-498 hdfs://### HDFS PATH ### 3262 [1,2,4,8589934592,1,0]
-498 hdfs://### HDFS PATH ### 5418 [1,2,4,8589934592,1,0]
-5 hdfs://### HDFS PATH ### 3060 [1,2,4,8589934592,1,0]
-5 hdfs://### HDFS PATH ### 3864 [1,2,4,8589934592,1,0]
-5 hdfs://### HDFS PATH ### 4540 [1,2,4,8589934592,1,0]
-51 hdfs://### HDFS PATH ### 1462 [1,2,4,8589934592,1,0]
-51 hdfs://### HDFS PATH ### 2308 [1,2,4,8589934592,1,0]
-53 hdfs://### HDFS PATH ### 4186 [1,2,4,8589934592,1,0]
-54 hdfs://### HDFS PATH ### 1440 [1,2,4,8589934592,1,0]
-57 hdfs://### HDFS PATH ### 1024 [1,2,4,8589934592,1,0]
-58 hdfs://### HDFS PATH ### 1906 [1,2,4,8589934592,1,0]
-58 hdfs://### HDFS PATH ### 3128 [1,2,4,8589934592,1,0]
-64 hdfs://### HDFS PATH ### 3516 [1,2,4,8589934592,1,0]
-65 hdfs://### HDFS PATH ### 1592 [1,2,4,8589934592,1,0]
-66 hdfs://### HDFS PATH ### 198 [1,2,4,8589934592,1,0]
-67 hdfs://### HDFS PATH ### 1754 [1,2,4,8589934592,1,0]
-67 hdfs://### HDFS PATH ### 5306 [1,2,4,8589934592,1,0]
-69 hdfs://### HDFS PATH ### 3570 [1,2,4,8589934592,1,0]
-70 hdfs://### HDFS PATH ### 3794 [1,2,4,8589934592,1,0]
-70 hdfs://### HDFS PATH ### 4548 [1,2,4,8589934592,1,0]
-70 hdfs://### HDFS PATH ### 4640 [1,2,4,8589934592,1,0]
-72 hdfs://### HDFS PATH ### 1208 [1,2,4,8589934592,1,0]
-72 hdfs://### HDFS PATH ### 2792 [1,2,4,8589934592,1,0]
-74 hdfs://### HDFS PATH ### 3548 [1,2,4,8589934592,1,0]
-76 hdfs://### HDFS PATH ### 3378 [1,2,4,8589934592,1,0]
-76 hdfs://### HDFS PATH ### 3538 [1,2,4,8589934592,1,0]
-77 hdfs://### HDFS PATH ### 2622 [1,2,4,8589934592,1,0]
-78 hdfs://### HDFS PATH ### 3368 [1,2,4,8589934592,1,0]
-8 hdfs://### HDFS PATH ### 1916 [1,2,4,8589934592,1,0]
-80 hdfs://### HDFS PATH ### 4058 [1,2,4,8589934592,1,0]
-82 hdfs://### HDFS PATH ### 396 [1,2,4,8589934592,1,0]
-83 hdfs://### HDFS PATH ### 1674 [1,2,4,8589934592,1,0]
-83 hdfs://### HDFS PATH ### 5070 [1,2,4,8589934592,1,0]
-84 hdfs://### HDFS PATH ### 1872 [1,2,4,8589934592,1,0]
-84 hdfs://### HDFS PATH ### 5606 [1,2,4,8589934592,1,0]
-85 hdfs://### HDFS PATH ### 2612 [1,2,4,8589934592,1,0]
-86 hdfs://### HDFS PATH ### 12 [1,2,4,8589934592,1,0]
-87 hdfs://### HDFS PATH ### 2652 [1,2,4,8589934592,1,0]
-9 hdfs://### HDFS PATH ### 5398 [1,2,4,8589934592,1,0]
-90 hdfs://### HDFS PATH ### 2802 [1,2,4,8589934592,1,0]
-90 hdfs://### HDFS PATH ### 4304 [1,2,4,8589934592,1,0]
-90 hdfs://### HDFS PATH ### 5744 [1,2,4,8589934592,1,0]
-92 hdfs://### HDFS PATH ### 1176 [1,2,4,8589934592,1,0]
-95 hdfs://### HDFS PATH ### 2400 [1,2,4,8589934592,1,0]
-95 hdfs://### HDFS PATH ### 3160 [1,2,4,8589934592,1,0]
-96 hdfs://### HDFS PATH ### 2216 [1,2,4,8589934592,1,0]
-97 hdfs://### HDFS PATH ### 5572 [1,2,4,8589934592,1,0]
-97 hdfs://### HDFS PATH ### 5802 [1,2,4,8589934592,1,0]
-98 hdfs://### HDFS PATH ### 2458 [1,2,4,8589934592,1,0]
-98 hdfs://### HDFS PATH ### 92 [1,2,4,8589934592,1,0]
-PREHOOK: query: SELECT * FROM default__src_src2_index__
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src2_index__
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT * FROM default__src_src2_index__
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src2_index__
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-val_0 hdfs://### HDFS PATH ### 2088 [1,2,4,8589934592,1,0]
-val_0 hdfs://### HDFS PATH ### 2632 [1,2,4,8589934592,1,0]
-val_0 hdfs://### HDFS PATH ### 968 [1,2,4,8589934592,1,0]
-val_10 hdfs://### HDFS PATH ### 2846 [1,2,4,8589934592,1,0]
-val_100 hdfs://### HDFS PATH ### 2156 [1,2,4,8589934592,1,0]
-val_100 hdfs://### HDFS PATH ### 5374 [1,2,4,8589934592,1,0]
-val_103 hdfs://### HDFS PATH ### 1484 [1,2,4,8589934592,1,0]
-val_103 hdfs://### HDFS PATH ### 3614 [1,2,4,8589934592,1,0]
-val_104 hdfs://### HDFS PATH ### 4114 [1,2,4,8589934592,1,0]
-val_104 hdfs://### HDFS PATH ### 4628 [1,2,4,8589934592,1,0]
-val_105 hdfs://### HDFS PATH ### 4196 [1,2,4,8589934592,1,0]
-val_11 hdfs://### HDFS PATH ### 3170 [1,2,4,8589934592,1,0]
-val_111 hdfs://### HDFS PATH ### 1186 [1,2,4,8589934592,1,0]
-val_113 hdfs://### HDFS PATH ### 3638 [1,2,4,8589934592,1,0]
-val_113 hdfs://### HDFS PATH ### 920 [1,2,4,8589934592,1,0]
-val_114 hdfs://### HDFS PATH ### 4280 [1,2,4,8589934592,1,0]
-val_116 hdfs://### HDFS PATH ### 3746 [1,2,4,8589934592,1,0]
-val_118 hdfs://### HDFS PATH ### 2686 [1,2,4,8589934592,1,0]
-val_118 hdfs://### HDFS PATH ### 2780 [1,2,4,8589934592,1,0]
-val_119 hdfs://### HDFS PATH ### 2064 [1,2,4,8589934592,1,0]
-val_119 hdfs://### HDFS PATH ### 3332 [1,2,4,8589934592,1,0]
-val_119 hdfs://### HDFS PATH ### 4674 [1,2,4,8589934592,1,0]
-val_12 hdfs://### HDFS PATH ### 1720 [1,2,4,8589934592,1,0]
-val_12 hdfs://### HDFS PATH ### 4362 [1,2,4,8589934592,1,0]
-val_120 hdfs://### HDFS PATH ### 2284 [1,2,4,8589934592,1,0]
-val_120 hdfs://### HDFS PATH ### 4830 [1,2,4,8589934592,1,0]
-val_125 hdfs://### HDFS PATH ### 1344 [1,2,4,8589934592,1,0]
-val_125 hdfs://### HDFS PATH ### 4468 [1,2,4,8589934592,1,0]
-val_126 hdfs://### HDFS PATH ### 5732 [1,2,4,8589934592,1,0]
-val_128 hdfs://### HDFS PATH ### 208 [1,2,4,8589934592,1,0]
-val_128 hdfs://### HDFS PATH ### 3896 [1,2,4,8589934592,1,0]
-val_128 hdfs://### HDFS PATH ### 988 [1,2,4,8589934592,1,0]
-val_129 hdfs://### HDFS PATH ### 1094 [1,2,4,8589934592,1,0]
-val_129 hdfs://### HDFS PATH ### 2040 [1,2,4,8589934592,1,0]
-val_131 hdfs://### HDFS PATH ### 2296 [1,2,4,8589934592,1,0]
-val_133 hdfs://### HDFS PATH ### 5164 [1,2,4,8589934592,1,0]
-val_134 hdfs://### HDFS PATH ### 2698 [1,2,4,8589934592,1,0]
-val_134 hdfs://### HDFS PATH ### 5294 [1,2,4,8589934592,1,0]
-val_136 hdfs://### HDFS PATH ### 5080 [1,2,4,8589934592,1,0]
-val_137 hdfs://### HDFS PATH ### 1650 [1,2,4,8589934592,1,0]
-val_137 hdfs://### HDFS PATH ### 2552 [1,2,4,8589934592,1,0]
-val_138 hdfs://### HDFS PATH ### 1472 [1,2,4,8589934592,1,0]
-val_138 hdfs://### HDFS PATH ### 1848 [1,2,4,8589934592,1,0]
-val_138 hdfs://### HDFS PATH ### 2734 [1,2,4,8589934592,1,0]
-val_138 hdfs://### HDFS PATH ### 3470 [1,2,4,8589934592,1,0]
-val_143 hdfs://### HDFS PATH ### 3226 [1,2,4,8589934592,1,0]
-val_145 hdfs://### HDFS PATH ### 304 [1,2,4,8589934592,1,0]
-val_146 hdfs://### HDFS PATH ### 232 [1,2,4,8589934592,1,0]
-val_146 hdfs://### HDFS PATH ### 5430 [1,2,4,8589934592,1,0]
-val_149 hdfs://### HDFS PATH ### 1058 [1,2,4,8589934592,1,0]
-val_149 hdfs://### HDFS PATH ### 3422 [1,2,4,8589934592,1,0]
-val_15 hdfs://### HDFS PATH ### 2770 [1,2,4,8589934592,1,0]
-val_15 hdfs://### HDFS PATH ### 386 [1,2,4,8589934592,1,0]
-val_150 hdfs://### HDFS PATH ### 150 [1,2,4,8589934592,1,0]
-val_152 hdfs://### HDFS PATH ### 280 [1,2,4,8589934592,1,0]
-val_152 hdfs://### HDFS PATH ### 5648 [1,2,4,8589934592,1,0]
-val_153 hdfs://### HDFS PATH ### 502 [1,2,4,8589934592,1,0]
-val_155 hdfs://### HDFS PATH ### 932 [1,2,4,8589934592,1,0]
-val_156 hdfs://### HDFS PATH ### 2352 [1,2,4,8589934592,1,0]
-val_157 hdfs://### HDFS PATH ### 1140 [1,2,4,8589934592,1,0]
-val_158 hdfs://### HDFS PATH ### 2052 [1,2,4,8589934592,1,0]
-val_160 hdfs://### HDFS PATH ### 3274 [1,2,4,8589934592,1,0]
-val_162 hdfs://### HDFS PATH ### 754 [1,2,4,8589934592,1,0]
-val_163 hdfs://### HDFS PATH ### 4650 [1,2,4,8589934592,1,0]
-val_164 hdfs://### HDFS PATH ### 4408 [1,2,4,8589934592,1,0]
-val_164 hdfs://### HDFS PATH ### 4492 [1,2,4,8589934592,1,0]
-val_165 hdfs://### HDFS PATH ### 2236 [1,2,4,8589934592,1,0]
-val_165 hdfs://### HDFS PATH ### 44 [1,2,4,8589934592,1,0]
-val_166 hdfs://### HDFS PATH ### 418 [1,2,4,8589934592,1,0]
-val_167 hdfs://### HDFS PATH ### 3686 [1,2,4,8589934592,1,0]
-val_167 hdfs://### HDFS PATH ### 5502 [1,2,4,8589934592,1,0]
-val_167 hdfs://### HDFS PATH ### 874 [1,2,4,8589934592,1,0]
-val_168 hdfs://### HDFS PATH ### 3180 [1,2,4,8589934592,1,0]
-val_169 hdfs://### HDFS PATH ### 1308 [1,2,4,8589934592,1,0]
-val_169 hdfs://### HDFS PATH ### 2588 [1,2,4,8589934592,1,0]
-val_169 hdfs://### HDFS PATH ### 4854 [1,2,4,8589934592,1,0]
-val_169 hdfs://### HDFS PATH ### 5754 [1,2,4,8589934592,1,0]
-val_17 hdfs://### HDFS PATH ### 910 [1,2,4,8589934592,1,0]
-val_170 hdfs://### HDFS PATH ### 1106 [1,2,4,8589934592,1,0]
-val_172 hdfs://### HDFS PATH ### 2018 [1,2,4,8589934592,1,0]
-val_172 hdfs://### HDFS PATH ### 5104 [1,2,4,8589934592,1,0]
-val_174 hdfs://### HDFS PATH ### 598 [1,2,4,8589934592,1,0]
-val_174 hdfs://### HDFS PATH ### 682 [1,2,4,8589934592,1,0]
-val_175 hdfs://### HDFS PATH ### 4150 [1,2,4,8589934592,1,0]
-val_175 hdfs://### HDFS PATH ### 5176 [1,2,4,8589934592,1,0]
-val_176 hdfs://### HDFS PATH ### 1428 [1,2,4,8589934592,1,0]
-val_176 hdfs://### HDFS PATH ### 1556 [1,2,4,8589934592,1,0]
-val_177 hdfs://### HDFS PATH ### 3036 [1,2,4,8589934592,1,0]
-val_178 hdfs://### HDFS PATH ### 4938 [1,2,4,8589934592,1,0]
-val_179 hdfs://### HDFS PATH ### 2006 [1,2,4,8589934592,1,0]
-val_179 hdfs://### HDFS PATH ### 2674 [1,2,4,8589934592,1,0]
-val_18 hdfs://### HDFS PATH ### 5340 [1,2,4,8589934592,1,0]
-val_18 hdfs://### HDFS PATH ### 5514 [1,2,4,8589934592,1,0]
-val_180 hdfs://### HDFS PATH ### 1696 [1,2,4,8589934592,1,0]
-val_181 hdfs://### HDFS PATH ### 1742 [1,2,4,8589934592,1,0]
-val_183 hdfs://### HDFS PATH ### 5536 [1,2,4,8589934592,1,0]
-val_186 hdfs://### HDFS PATH ### 5466 [1,2,4,8589934592,1,0]
-val_187 hdfs://### HDFS PATH ### 1416 [1,2,4,8589934592,1,0]
-val_187 hdfs://### HDFS PATH ### 2492 [1,2,4,8589934592,1,0]
-val_187 hdfs://### HDFS PATH ### 4516 [1,2,4,8589934592,1,0]
-val_189 hdfs://### HDFS PATH ### 5188 [1,2,4,8589934592,1,0]
-val_19 hdfs://### HDFS PATH ### 2824 [1,2,4,8589934592,1,0]
-val_190 hdfs://### HDFS PATH ### 4244 [1,2,4,8589934592,1,0]
-val_191 hdfs://### HDFS PATH ### 2192 [1,2,4,8589934592,1,0]
-val_191 hdfs://### HDFS PATH ### 3852 [1,2,4,8589934592,1,0]
-val_192 hdfs://### HDFS PATH ### 1392 [1,2,4,8589934592,1,0]
-val_193 hdfs://### HDFS PATH ### 126 [1,2,4,8589934592,1,0]
-val_193 hdfs://### HDFS PATH ### 4078 [1,2,4,8589934592,1,0]
-val_193 hdfs://### HDFS PATH ### 514 [1,2,4,8589934592,1,0]
-val_194 hdfs://### HDFS PATH ### 5684 [1,2,4,8589934592,1,0]
-val_195 hdfs://### HDFS PATH ### 3286 [1,2,4,8589934592,1,0]
-val_195 hdfs://### HDFS PATH ### 886 [1,2,4,8589934592,1,0]
-val_196 hdfs://### HDFS PATH ### 2410 [1,2,4,8589934592,1,0]
-val_197 hdfs://### HDFS PATH ### 2108 [1,2,4,8589934592,1,0]
-val_197 hdfs://### HDFS PATH ### 2480 [1,2,4,8589934592,1,0]
-val_199 hdfs://### HDFS PATH ### 2180 [1,2,4,8589934592,1,0]
-val_199 hdfs://### HDFS PATH ### 4806 [1,2,4,8589934592,1,0]
-val_199 hdfs://### HDFS PATH ### 646 [1,2,4,8589934592,1,0]
-val_2 hdfs://### HDFS PATH ### 4004 [1,2,4,8589934592,1,0]
-val_20 hdfs://### HDFS PATH ### 1118 [1,2,4,8589934592,1,0]
-val_200 hdfs://### HDFS PATH ### 4698 [1,2,4,8589934592,1,0]
-val_200 hdfs://### HDFS PATH ### 5790 [1,2,4,8589934592,1,0]
-val_201 hdfs://### HDFS PATH ### 4384 [1,2,4,8589934592,1,0]
-val_202 hdfs://### HDFS PATH ### 3932 [1,2,4,8589934592,1,0]
-val_203 hdfs://### HDFS PATH ### 4314 [1,2,4,8589934592,1,0]
-val_203 hdfs://### HDFS PATH ### 944 [1,2,4,8589934592,1,0]
-val_205 hdfs://### HDFS PATH ### 1046 [1,2,4,8589934592,1,0]
-val_205 hdfs://### HDFS PATH ### 2272 [1,2,4,8589934592,1,0]
-val_207 hdfs://### HDFS PATH ### 5022 [1,2,4,8589934592,1,0]
-val_207 hdfs://### HDFS PATH ### 634 [1,2,4,8589934592,1,0]
-val_208 hdfs://### HDFS PATH ### 1272 [1,2,4,8589934592,1,0]
-val_208 hdfs://### HDFS PATH ### 1948 [1,2,4,8589934592,1,0]
-val_208 hdfs://### HDFS PATH ### 670 [1,2,4,8589934592,1,0]
-val_209 hdfs://### HDFS PATH ### 3504 [1,2,4,8589934592,1,0]
-val_209 hdfs://### HDFS PATH ### 374 [1,2,4,8589934592,1,0]
-val_213 hdfs://### HDFS PATH ### 1508 [1,2,4,8589934592,1,0]
-val_213 hdfs://### HDFS PATH ### 220 [1,2,4,8589934592,1,0]
-val_214 hdfs://### HDFS PATH ### 5116 [1,2,4,8589934592,1,0]
-val_216 hdfs://### HDFS PATH ### 1520 [1,2,4,8589934592,1,0]
-val_216 hdfs://### HDFS PATH ### 3650 [1,2,4,8589934592,1,0]
-val_217 hdfs://### HDFS PATH ### 1860 [1,2,4,8589934592,1,0]
-val_217 hdfs://### HDFS PATH ### 4396 [1,2,4,8589934592,1,0]
-val_218 hdfs://### HDFS PATH ### 3446 [1,2,4,8589934592,1,0]
-val_219 hdfs://### HDFS PATH ### 3710 [1,2,4,8589934592,1,0]
-val_219 hdfs://### HDFS PATH ### 478 [1,2,4,8589934592,1,0]
-val_221 hdfs://### HDFS PATH ### 1164 [1,2,4,8589934592,1,0]
-val_221 hdfs://### HDFS PATH ### 1580 [1,2,4,8589934592,1,0]
-val_222 hdfs://### HDFS PATH ### 5720 [1,2,4,8589934592,1,0]
-val_223 hdfs://### HDFS PATH ### 3398 [1,2,4,8589934592,1,0]
-val_223 hdfs://### HDFS PATH ### 3758 [1,2,4,8589934592,1,0]
-val_224 hdfs://### HDFS PATH ### 174 [1,2,4,8589934592,1,0]
-val_224 hdfs://### HDFS PATH ### 2892 [1,2,4,8589934592,1,0]
-val_226 hdfs://### HDFS PATH ### 3048 [1,2,4,8589934592,1,0]
-val_228 hdfs://### HDFS PATH ### 3458 [1,2,4,8589934592,1,0]
-val_229 hdfs://### HDFS PATH ### 3202 [1,2,4,8589934592,1,0]
-val_229 hdfs://### HDFS PATH ### 3956 [1,2,4,8589934592,1,0]
-val_230 hdfs://### HDFS PATH ### 1730 [1,2,4,8589934592,1,0]
-val_230 hdfs://### HDFS PATH ### 1936 [1,2,4,8589934592,1,0]
-val_230 hdfs://### HDFS PATH ### 2260 [1,2,4,8589934592,1,0]
-val_230 hdfs://### HDFS PATH ### 3580 [1,2,4,8589934592,1,0]
-val_230 hdfs://### HDFS PATH ### 4914 [1,2,4,8589934592,1,0]
-val_233 hdfs://### HDFS PATH ### 3214 [1,2,4,8589934592,1,0]
-val_233 hdfs://### HDFS PATH ### 5140 [1,2,4,8589934592,1,0]
-val_235 hdfs://### HDFS PATH ### 4046 [1,2,4,8589934592,1,0]
-val_237 hdfs://### HDFS PATH ### 4722 [1,2,4,8589934592,1,0]
-val_237 hdfs://### HDFS PATH ### 574 [1,2,4,8589934592,1,0]
-val_238 hdfs://### HDFS PATH ### 0 [1,2,4,8589934592,1,0]
-val_238 hdfs://### HDFS PATH ### 2746 [1,2,4,8589934592,1,0]
-val_239 hdfs://### HDFS PATH ### 1496 [1,2,4,8589934592,1,0]
-val_239 hdfs://### HDFS PATH ### 3722 [1,2,4,8589934592,1,0]
-val_24 hdfs://### HDFS PATH ### 1972 [1,2,4,8589934592,1,0]
-val_24 hdfs://### HDFS PATH ### 4594 [1,2,4,8589934592,1,0]
-val_241 hdfs://### HDFS PATH ### 1662 [1,2,4,8589934592,1,0]
-val_242 hdfs://### HDFS PATH ### 2940 [1,2,4,8589934592,1,0]
-val_242 hdfs://### HDFS PATH ### 3012 [1,2,4,8589934592,1,0]
-val_244 hdfs://### HDFS PATH ### 3872 [1,2,4,8589934592,1,0]
-val_247 hdfs://### HDFS PATH ### 718 [1,2,4,8589934592,1,0]
-val_248 hdfs://### HDFS PATH ### 4758 [1,2,4,8589934592,1,0]
-val_249 hdfs://### HDFS PATH ### 5034 [1,2,4,8589934592,1,0]
-val_252 hdfs://### HDFS PATH ### 454 [1,2,4,8589934592,1,0]
-val_255 hdfs://### HDFS PATH ### 4616 [1,2,4,8589934592,1,0]
-val_255 hdfs://### HDFS PATH ### 68 [1,2,4,8589934592,1,0]
-val_256 hdfs://### HDFS PATH ### 3770 [1,2,4,8589934592,1,0]
-val_256 hdfs://### HDFS PATH ### 5272 [1,2,4,8589934592,1,0]
-val_257 hdfs://### HDFS PATH ### 4208 [1,2,4,8589934592,1,0]
-val_258 hdfs://### HDFS PATH ### 4292 [1,2,4,8589934592,1,0]
-val_26 hdfs://### HDFS PATH ### 2226 [1,2,4,8589934592,1,0]
-val_26 hdfs://### HDFS PATH ### 5284 [1,2,4,8589934592,1,0]
-val_260 hdfs://### HDFS PATH ### 1764 [1,2,4,8589934592,1,0]
-val_262 hdfs://### HDFS PATH ### 4326 [1,2,4,8589934592,1,0]
-val_263 hdfs://### HDFS PATH ### 3782 [1,2,4,8589934592,1,0]
-val_265 hdfs://### HDFS PATH ### 114 [1,2,4,8589934592,1,0]
-val_265 hdfs://### HDFS PATH ### 5046 [1,2,4,8589934592,1,0]
-val_266 hdfs://### HDFS PATH ### 814 [1,2,4,8589934592,1,0]
-val_27 hdfs://### HDFS PATH ### 34 [1,2,4,8589934592,1,0]
-val_272 hdfs://### HDFS PATH ### 1836 [1,2,4,8589934592,1,0]
-val_272 hdfs://### HDFS PATH ### 2976 [1,2,4,8589934592,1,0]
-val_273 hdfs://### HDFS PATH ### 162 [1,2,4,8589934592,1,0]
-val_273 hdfs://### HDFS PATH ### 2868 [1,2,4,8589934592,1,0]
-val_273 hdfs://### HDFS PATH ### 5524 [1,2,4,8589934592,1,0]
-val_274 hdfs://### HDFS PATH ### 3698 [1,2,4,8589934592,1,0]
-val_275 hdfs://### HDFS PATH ### 1638 [1,2,4,8589934592,1,0]
-val_277 hdfs://### HDFS PATH ### 1260 [1,2,4,8589934592,1,0]
-val_277 hdfs://### HDFS PATH ### 2856 [1,2,4,8589934592,1,0]
-val_277 hdfs://### HDFS PATH ### 362 [1,2,4,8589934592,1,0]
-val_277 hdfs://### HDFS PATH ### 4902 [1,2,4,8589934592,1,0]
-val_278 hdfs://### HDFS PATH ### 1544 [1,2,4,8589934592,1,0]
-val_278 hdfs://### HDFS PATH ### 80 [1,2,4,8589934592,1,0]
-val_28 hdfs://### HDFS PATH ### 5616 [1,2,4,8589934592,1,0]
-val_280 hdfs://### HDFS PATH ### 1226 [1,2,4,8589934592,1,0]
-val_280 hdfs://### HDFS PATH ### 3992 [1,2,4,8589934592,1,0]
-val_281 hdfs://### HDFS PATH ### 350 [1,2,4,8589934592,1,0]
-val_281 hdfs://### HDFS PATH ### 5548 [1,2,4,8589934592,1,0]
-val_282 hdfs://### HDFS PATH ### 2468 [1,2,4,8589934592,1,0]
-val_282 hdfs://### HDFS PATH ### 2722 [1,2,4,8589934592,1,0]
-val_283 hdfs://### HDFS PATH ### 4022 [1,2,4,8589934592,1,0]
-val_284 hdfs://### HDFS PATH ### 1708 [1,2,4,8589934592,1,0]
-val_285 hdfs://### HDFS PATH ### 5478 [1,2,4,8589934592,1,0]
-val_286 hdfs://### HDFS PATH ### 1404 [1,2,4,8589934592,1,0]
-val_287 hdfs://### HDFS PATH ### 490 [1,2,4,8589934592,1,0]
-val_288 hdfs://### HDFS PATH ### 2422 [1,2,4,8589934592,1,0]
-val_288 hdfs://### HDFS PATH ### 3840 [1,2,4,8589934592,1,0]
-val_289 hdfs://### HDFS PATH ### 1568 [1,2,4,8589934592,1,0]
-val_291 hdfs://### HDFS PATH ### 4582 [1,2,4,8589934592,1,0]
-val_292 hdfs://### HDFS PATH ### 466 [1,2,4,8589934592,1,0]
-val_296 hdfs://### HDFS PATH ### 3626 [1,2,4,8589934592,1,0]
-val_298 hdfs://### HDFS PATH ### 2168 [1,2,4,8589934592,1,0]
-val_298 hdfs://### HDFS PATH ### 4456 [1,2,4,8589934592,1,0]
-val_298 hdfs://### HDFS PATH ### 5386 [1,2,4,8589934592,1,0]
-val_30 hdfs://### HDFS PATH ### 3494 [1,2,4,8589934592,1,0]
-val_302 hdfs://### HDFS PATH ### 1034 [1,2,4,8589934592,1,0]
-val_305 hdfs://### HDFS PATH ### 4782 [1,2,4,8589934592,1,0]
-val_306 hdfs://### HDFS PATH ### 2880 [1,2,4,8589934592,1,0]
-val_307 hdfs://### HDFS PATH ### 2812 [1,2,4,8589934592,1,0]
-val_307 hdfs://### HDFS PATH ### 5672 [1,2,4,8589934592,1,0]
-val_308 hdfs://### HDFS PATH ### 2388 [1,2,4,8589934592,1,0]
-val_309 hdfs://### HDFS PATH ### 2904 [1,2,4,8589934592,1,0]
-val_309 hdfs://### HDFS PATH ### 790 [1,2,4,8589934592,1,0]
-val_310 hdfs://### HDFS PATH ### 4962 [1,2,4,8589934592,1,0]
-val_311 hdfs://### HDFS PATH ### 1000 [1,2,4,8589934592,1,0]
-val_311 hdfs://### HDFS PATH ### 1626 [1,2,4,8589934592,1,0]
-val_311 hdfs://### HDFS PATH ### 22 [1,2,4,8589934592,1,0]
-val_315 hdfs://### HDFS PATH ### 5594 [1,2,4,8589934592,1,0]
-val_316 hdfs://### HDFS PATH ### 1012 [1,2,4,8589934592,1,0]
-val_316 hdfs://### HDFS PATH ### 2576 [1,2,4,8589934592,1,0]
-val_316 hdfs://### HDFS PATH ### 3944 [1,2,4,8589934592,1,0]
-val_317 hdfs://### HDFS PATH ### 3104 [1,2,4,8589934592,1,0]
-val_317 hdfs://### HDFS PATH ### 4974 [1,2,4,8589934592,1,0]
-val_318 hdfs://### HDFS PATH ### 1602 [1,2,4,8589934592,1,0]
-val_318 hdfs://### HDFS PATH ### 2504 [1,2,4,8589934592,1,0]
-val_318 hdfs://### HDFS PATH ### 2516 [1,2,4,8589934592,1,0]
-val_321 hdfs://### HDFS PATH ### 3308 [1,2,4,8589934592,1,0]
-val_321 hdfs://### HDFS PATH ### 4090 [1,2,4,8589934592,1,0]
-val_322 hdfs://### HDFS PATH ### 2096 [1,2,4,8589934592,1,0]
-val_322 hdfs://### HDFS PATH ### 3250 [1,2,4,8589934592,1,0]
-val_323 hdfs://### HDFS PATH ### 4878 [1,2,4,8589934592,1,0]
-val_325 hdfs://### HDFS PATH ### 4890 [1,2,4,8589934592,1,0]
-val_325 hdfs://### HDFS PATH ### 862 [1,2,4,8589934592,1,0]
-val_327 hdfs://### HDFS PATH ### 2248 [1,2,4,8589934592,1,0]
-val_327 hdfs://### HDFS PATH ### 2928 [1,2,4,8589934592,1,0]
-val_327 hdfs://### HDFS PATH ### 338 [1,2,4,8589934592,1,0]
-val_33 hdfs://### HDFS PATH ### 3592 [1,2,4,8589934592,1,0]
-val_331 hdfs://### HDFS PATH ### 2988 [1,2,4,8589934592,1,0]
-val_331 hdfs://### HDFS PATH ### 4034 [1,2,4,8589934592,1,0]
-val_332 hdfs://### HDFS PATH ### 1614 [1,2,4,8589934592,1,0]
-val_333 hdfs://### HDFS PATH ### 1684 [1,2,4,8589934592,1,0]
-val_333 hdfs://### HDFS PATH ### 4986 [1,2,4,8589934592,1,0]
-val_335 hdfs://### HDFS PATH ### 4102 [1,2,4,8589934592,1,0]
-val_336 hdfs://### HDFS PATH ### 3148 [1,2,4,8589934592,1,0]
-val_338 hdfs://### HDFS PATH ### 526 [1,2,4,8589934592,1,0]
-val_339 hdfs://### HDFS PATH ### 956 [1,2,4,8589934592,1,0]
-val_34 hdfs://### HDFS PATH ### 3192 [1,2,4,8589934592,1,0]
-val_341 hdfs://### HDFS PATH ### 5406 [1,2,4,8589934592,1,0]
-val_342 hdfs://### HDFS PATH ### 3558 [1,2,4,8589934592,1,0]
-val_342 hdfs://### HDFS PATH ### 838 [1,2,4,8589934592,1,0]
-val_344 hdfs://### HDFS PATH ### 3674 [1,2,4,8589934592,1,0]
-val_344 hdfs://### HDFS PATH ### 5560 [1,2,4,8589934592,1,0]
-val_345 hdfs://### HDFS PATH ### 1082 [1,2,4,8589934592,1,0]
-val_348 hdfs://### HDFS PATH ### 1882 [1,2,4,8589934592,1,0]
-val_348 hdfs://### HDFS PATH ### 1960 [1,2,4,8589934592,1,0]
-val_348 hdfs://### HDFS PATH ### 4338 [1,2,4,8589934592,1,0]
-val_348 hdfs://### HDFS PATH ### 5490 [1,2,4,8589934592,1,0]
-val_348 hdfs://### HDFS PATH ### 5660 [1,2,4,8589934592,1,0]
-val_35 hdfs://### HDFS PATH ### 1238 [1,2,4,8589934592,1,0]
-val_35 hdfs://### HDFS PATH ### 3138 [1,2,4,8589934592,1,0]
-val_35 hdfs://### HDFS PATH ### 4012 [1,2,4,8589934592,1,0]
-val_351 hdfs://### HDFS PATH ### 4604 [1,2,4,8589934592,1,0]
-val_353 hdfs://### HDFS PATH ### 1812 [1,2,4,8589934592,1,0]
-val_353 hdfs://### HDFS PATH ### 5092 [1,2,4,8589934592,1,0]
-val_356 hdfs://### HDFS PATH ### 1284 [1,2,4,8589934592,1,0]
-val_360 hdfs://### HDFS PATH ### 4746 [1,2,4,8589934592,1,0]
-val_362 hdfs://### HDFS PATH ### 5454 [1,2,4,8589934592,1,0]
-val_364 hdfs://### HDFS PATH ### 2662 [1,2,4,8589934592,1,0]
-val_365 hdfs://### HDFS PATH ### 802 [1,2,4,8589934592,1,0]
-val_366 hdfs://### HDFS PATH ### 4138 [1,2,4,8589934592,1,0]
-val_367 hdfs://### HDFS PATH ### 3662 [1,2,4,8589934592,1,0]
-val_367 hdfs://### HDFS PATH ### 850 [1,2,4,8589934592,1,0]
-val_368 hdfs://### HDFS PATH ### 3602 [1,2,4,8589934592,1,0]
-val_369 hdfs://### HDFS PATH ### 186 [1,2,4,8589934592,1,0]
-val_369 hdfs://### HDFS PATH ### 2564 [1,2,4,8589934592,1,0]
-val_369 hdfs://### HDFS PATH ### 2952 [1,2,4,8589934592,1,0]
-val_37 hdfs://### HDFS PATH ### 328 [1,2,4,8589934592,1,0]
-val_37 hdfs://### HDFS PATH ### 5626 [1,2,4,8589934592,1,0]
-val_373 hdfs://### HDFS PATH ### 1824 [1,2,4,8589934592,1,0]
-val_374 hdfs://### HDFS PATH ### 268 [1,2,4,8589934592,1,0]
-val_375 hdfs://### HDFS PATH ### 5212 [1,2,4,8589934592,1,0]
-val_377 hdfs://### HDFS PATH ### 766 [1,2,4,8589934592,1,0]
-val_378 hdfs://### HDFS PATH ### 1152 [1,2,4,8589934592,1,0]
-val_379 hdfs://### HDFS PATH ### 5328 [1,2,4,8589934592,1,0]
-val_382 hdfs://### HDFS PATH ### 1320 [1,2,4,8589934592,1,0]
-val_382 hdfs://### HDFS PATH ### 4528 [1,2,4,8589934592,1,0]
-val_384 hdfs://### HDFS PATH ### 1788 [1,2,4,8589934592,1,0]
-val_384 hdfs://### HDFS PATH ### 5260 [1,2,4,8589934592,1,0]
-val_384 hdfs://### HDFS PATH ### 5316 [1,2,4,8589934592,1,0]
-val_386 hdfs://### HDFS PATH ### 1356 [1,2,4,8589934592,1,0]
-val_389 hdfs://### HDFS PATH ### 2916 [1,2,4,8589934592,1,0]
-val_392 hdfs://### HDFS PATH ### 2964 [1,2,4,8589934592,1,0]
-val_393 hdfs://### HDFS PATH ### 2132 [1,2,4,8589934592,1,0]
-val_394 hdfs://### HDFS PATH ### 562 [1,2,4,8589934592,1,0]
-val_395 hdfs://### HDFS PATH ### 2710 [1,2,4,8589934592,1,0]
-val_395 hdfs://### HDFS PATH ### 3116 [1,2,4,8589934592,1,0]
-val_396 hdfs://### HDFS PATH ### 3092 [1,2,4,8589934592,1,0]
-val_396 hdfs://### HDFS PATH ### 4372 [1,2,4,8589934592,1,0]
-val_396 hdfs://### HDFS PATH ### 706 [1,2,4,8589934592,1,0]
-val_397 hdfs://### HDFS PATH ### 4558 [1,2,4,8589934592,1,0]
-val_397 hdfs://### HDFS PATH ### 778 [1,2,4,8589934592,1,0]
-val_399 hdfs://### HDFS PATH ### 1296 [1,2,4,8589934592,1,0]
-val_399 hdfs://### HDFS PATH ### 694 [1,2,4,8589934592,1,0]
-val_4 hdfs://### HDFS PATH ### 1218 [1,2,4,8589934592,1,0]
-val_400 hdfs://### HDFS PATH ### 5778 [1,2,4,8589934592,1,0]
-val_401 hdfs://### HDFS PATH ### 138 [1,2,4,8589934592,1,0]
-val_401 hdfs://### HDFS PATH ### 3000 [1,2,4,8589934592,1,0]
-val_401 hdfs://### HDFS PATH ### 3828 [1,2,4,8589934592,1,0]
-val_401 hdfs://### HDFS PATH ### 4268 [1,2,4,8589934592,1,0]
-val_401 hdfs://### HDFS PATH ### 5224 [1,2,4,8589934592,1,0]
-val_402 hdfs://### HDFS PATH ### 3080 [1,2,4,8589934592,1,0]
-val_403 hdfs://### HDFS PATH ### 406 [1,2,4,8589934592,1,0]
-val_403 hdfs://### HDFS PATH ### 4162 [1,2,4,8589934592,1,0]
-val_403 hdfs://### HDFS PATH ### 5766 [1,2,4,8589934592,1,0]
-val_404 hdfs://### HDFS PATH ### 1776 [1,2,4,8589934592,1,0]
-val_404 hdfs://### HDFS PATH ### 2318 [1,2,4,8589934592,1,0]
-val_406 hdfs://### HDFS PATH ### 244 [1,2,4,8589934592,1,0]
-val_406 hdfs://### HDFS PATH ### 4220 [1,2,4,8589934592,1,0]
-val_406 hdfs://### HDFS PATH ### 4256 [1,2,4,8589934592,1,0]
-val_406 hdfs://### HDFS PATH ### 5152 [1,2,4,8589934592,1,0]
-val_407 hdfs://### HDFS PATH ### 5248 [1,2,4,8589934592,1,0]
-val_409 hdfs://### HDFS PATH ### 2528 [1,2,4,8589934592,1,0]
-val_409 hdfs://### HDFS PATH ### 4232 [1,2,4,8589934592,1,0]
-val_409 hdfs://### HDFS PATH ### 56 [1,2,4,8589934592,1,0]
-val_41 hdfs://### HDFS PATH ### 3388 [1,2,4,8589934592,1,0]
-val_411 hdfs://### HDFS PATH ### 1924 [1,2,4,8589934592,1,0]
-val_413 hdfs://### HDFS PATH ### 2600 [1,2,4,8589934592,1,0]
-val_413 hdfs://### HDFS PATH ### 610 [1,2,4,8589934592,1,0]
-val_414 hdfs://### HDFS PATH ### 4686 [1,2,4,8589934592,1,0]
-val_414 hdfs://### HDFS PATH ### 5696 [1,2,4,8589934592,1,0]
-val_417 hdfs://### HDFS PATH ### 430 [1,2,4,8589934592,1,0]
-val_417 hdfs://### HDFS PATH ### 4794 [1,2,4,8589934592,1,0]
-val_417 hdfs://### HDFS PATH ### 730 [1,2,4,8589934592,1,0]
-val_418 hdfs://### HDFS PATH ### 2204 [1,2,4,8589934592,1,0]
-val_419 hdfs://### HDFS PATH ### 2758 [1,2,4,8589934592,1,0]
-val_42 hdfs://### HDFS PATH ### 2030 [1,2,4,8589934592,1,0]
-val_42 hdfs://### HDFS PATH ### 3298 [1,2,4,8589934592,1,0]
-val_421 hdfs://### HDFS PATH ### 5236 [1,2,4,8589934592,1,0]
-val_424 hdfs://### HDFS PATH ### 4350 [1,2,4,8589934592,1,0]
-val_424 hdfs://### HDFS PATH ### 4504 [1,2,4,8589934592,1,0]
-val_427 hdfs://### HDFS PATH ### 1248 [1,2,4,8589934592,1,0]
-val_429 hdfs://### HDFS PATH ### 256 [1,2,4,8589934592,1,0]
-val_429 hdfs://### HDFS PATH ### 4842 [1,2,4,8589934592,1,0]
-val_43 hdfs://### HDFS PATH ### 2330 [1,2,4,8589934592,1,0]
-val_430 hdfs://### HDFS PATH ### 1532 [1,2,4,8589934592,1,0]
-val_430 hdfs://### HDFS PATH ### 3320 [1,2,4,8589934592,1,0]
-val_430 hdfs://### HDFS PATH ### 442 [1,2,4,8589934592,1,0]
-val_431 hdfs://### HDFS PATH ### 1994 [1,2,4,8589934592,1,0]
-val_431 hdfs://### HDFS PATH ### 4420 [1,2,4,8589934592,1,0]
-val_431 hdfs://### HDFS PATH ### 4480 [1,2,4,8589934592,1,0]
-val_432 hdfs://### HDFS PATH ### 3920 [1,2,4,8589934592,1,0]
-val_435 hdfs://### HDFS PATH ### 2834 [1,2,4,8589934592,1,0]
-val_436 hdfs://### HDFS PATH ### 2340 [1,2,4,8589934592,1,0]
-val_437 hdfs://### HDFS PATH ### 1368 [1,2,4,8589934592,1,0]
-val_438 hdfs://### HDFS PATH ### 1070 [1,2,4,8589934592,1,0]
-val_438 hdfs://### HDFS PATH ### 3884 [1,2,4,8589934592,1,0]
-val_438 hdfs://### HDFS PATH ### 4662 [1,2,4,8589934592,1,0]
-val_439 hdfs://### HDFS PATH ### 4734 [1,2,4,8589934592,1,0]
-val_439 hdfs://### HDFS PATH ### 826 [1,2,4,8589934592,1,0]
-val_44 hdfs://### HDFS PATH ### 4068 [1,2,4,8589934592,1,0]
-val_443 hdfs://### HDFS PATH ### 4866 [1,2,4,8589934592,1,0]
-val_444 hdfs://### HDFS PATH ### 4818 [1,2,4,8589934592,1,0]
-val_446 hdfs://### HDFS PATH ### 538 [1,2,4,8589934592,1,0]
-val_448 hdfs://### HDFS PATH ### 5636 [1,2,4,8589934592,1,0]
-val_449 hdfs://### HDFS PATH ### 3434 [1,2,4,8589934592,1,0]
-val_452 hdfs://### HDFS PATH ### 3024 [1,2,4,8589934592,1,0]
-val_453 hdfs://### HDFS PATH ### 3482 [1,2,4,8589934592,1,0]
-val_454 hdfs://### HDFS PATH ### 2144 [1,2,4,8589934592,1,0]
-val_454 hdfs://### HDFS PATH ### 4432 [1,2,4,8589934592,1,0]
-val_454 hdfs://### HDFS PATH ### 5200 [1,2,4,8589934592,1,0]
-val_455 hdfs://### HDFS PATH ### 976 [1,2,4,8589934592,1,0]
-val_457 hdfs://### HDFS PATH ### 2446 [1,2,4,8589934592,1,0]
-val_458 hdfs://### HDFS PATH ### 3356 [1,2,4,8589934592,1,0]
-val_458 hdfs://### HDFS PATH ### 5442 [1,2,4,8589934592,1,0]
-val_459 hdfs://### HDFS PATH ### 1450 [1,2,4,8589934592,1,0]
-val_459 hdfs://### HDFS PATH ### 550 [1,2,4,8589934592,1,0]
-val_460 hdfs://### HDFS PATH ### 5010 [1,2,4,8589934592,1,0]
-val_462 hdfs://### HDFS PATH ### 5128 [1,2,4,8589934592,1,0]
-val_462 hdfs://### HDFS PATH ### 5350 [1,2,4,8589934592,1,0]
-val_463 hdfs://### HDFS PATH ### 1982 [1,2,4,8589934592,1,0]
-val_463 hdfs://### HDFS PATH ### 3980 [1,2,4,8589934592,1,0]
-val_466 hdfs://### HDFS PATH ### 1894 [1,2,4,8589934592,1,0]
-val_466 hdfs://### HDFS PATH ### 4126 [1,2,4,8589934592,1,0]
-val_466 hdfs://### HDFS PATH ### 658 [1,2,4,8589934592,1,0]
-val_467 hdfs://### HDFS PATH ### 3908 [1,2,4,8589934592,1,0]
-val_468 hdfs://### HDFS PATH ### 2120 [1,2,4,8589934592,1,0]
-val_468 hdfs://### HDFS PATH ### 2376 [1,2,4,8589934592,1,0]
-val_468 hdfs://### HDFS PATH ### 3526 [1,2,4,8589934592,1,0]
-val_468 hdfs://### HDFS PATH ### 4950 [1,2,4,8589934592,1,0]
-val_469 hdfs://### HDFS PATH ### 1380 [1,2,4,8589934592,1,0]
-val_469 hdfs://### HDFS PATH ### 2364 [1,2,4,8589934592,1,0]
-val_469 hdfs://### HDFS PATH ### 292 [1,2,4,8589934592,1,0]
-val_469 hdfs://### HDFS PATH ### 3968 [1,2,4,8589934592,1,0]
-val_469 hdfs://### HDFS PATH ### 5582 [1,2,4,8589934592,1,0]
-val_47 hdfs://### HDFS PATH ### 1198 [1,2,4,8589934592,1,0]
-val_470 hdfs://### HDFS PATH ### 2540 [1,2,4,8589934592,1,0]
-val_472 hdfs://### HDFS PATH ### 3238 [1,2,4,8589934592,1,0]
-val_475 hdfs://### HDFS PATH ### 898 [1,2,4,8589934592,1,0]
-val_477 hdfs://### HDFS PATH ### 5708 [1,2,4,8589934592,1,0]
-val_478 hdfs://### HDFS PATH ### 4444 [1,2,4,8589934592,1,0]
-val_478 hdfs://### HDFS PATH ### 4926 [1,2,4,8589934592,1,0]
-val_479 hdfs://### HDFS PATH ### 4770 [1,2,4,8589934592,1,0]
-val_480 hdfs://### HDFS PATH ### 3816 [1,2,4,8589934592,1,0]
-val_480 hdfs://### HDFS PATH ### 4570 [1,2,4,8589934592,1,0]
-val_480 hdfs://### HDFS PATH ### 5058 [1,2,4,8589934592,1,0]
-val_481 hdfs://### HDFS PATH ### 2434 [1,2,4,8589934592,1,0]
-val_482 hdfs://### HDFS PATH ### 586 [1,2,4,8589934592,1,0]
-val_483 hdfs://### HDFS PATH ### 4174 [1,2,4,8589934592,1,0]
-val_484 hdfs://### HDFS PATH ### 102 [1,2,4,8589934592,1,0]
-val_485 hdfs://### HDFS PATH ### 3734 [1,2,4,8589934592,1,0]
-val_487 hdfs://### HDFS PATH ### 3804 [1,2,4,8589934592,1,0]
-val_489 hdfs://### HDFS PATH ### 1128 [1,2,4,8589934592,1,0]
-val_489 hdfs://### HDFS PATH ### 1800 [1,2,4,8589934592,1,0]
-val_489 hdfs://### HDFS PATH ### 3344 [1,2,4,8589934592,1,0]
-val_489 hdfs://### HDFS PATH ### 742 [1,2,4,8589934592,1,0]
-val_490 hdfs://### HDFS PATH ### 2640 [1,2,4,8589934592,1,0]
-val_491 hdfs://### HDFS PATH ### 4710 [1,2,4,8589934592,1,0]
-val_492 hdfs://### HDFS PATH ### 3410 [1,2,4,8589934592,1,0]
-val_492 hdfs://### HDFS PATH ### 5362 [1,2,4,8589934592,1,0]
-val_493 hdfs://### HDFS PATH ### 4998 [1,2,4,8589934592,1,0]
-val_494 hdfs://### HDFS PATH ### 622 [1,2,4,8589934592,1,0]
-val_495 hdfs://### HDFS PATH ### 316 [1,2,4,8589934592,1,0]
-val_496 hdfs://### HDFS PATH ### 2076 [1,2,4,8589934592,1,0]
-val_497 hdfs://### HDFS PATH ### 3068 [1,2,4,8589934592,1,0]
-val_498 hdfs://### HDFS PATH ### 1332 [1,2,4,8589934592,1,0]
-val_498 hdfs://### HDFS PATH ### 3262 [1,2,4,8589934592,1,0]
-val_498 hdfs://### HDFS PATH ### 5418 [1,2,4,8589934592,1,0]
-val_5 hdfs://### HDFS PATH ### 3060 [1,2,4,8589934592,1,0]
-val_5 hdfs://### HDFS PATH ### 3864 [1,2,4,8589934592,1,0]
-val_5 hdfs://### HDFS PATH ### 4540 [1,2,4,8589934592,1,0]
-val_51 hdfs://### HDFS PATH ### 1462 [1,2,4,8589934592,1,0]
-val_51 hdfs://### HDFS PATH ### 2308 [1,2,4,8589934592,1,0]
-val_53 hdfs://### HDFS PATH ### 4186 [1,2,4,8589934592,1,0]
-val_54 hdfs://### HDFS PATH ### 1440 [1,2,4,8589934592,1,0]
-val_57 hdfs://### HDFS PATH ### 1024 [1,2,4,8589934592,1,0]
-val_58 hdfs://### HDFS PATH ### 1906 [1,2,4,8589934592,1,0]
-val_58 hdfs://### HDFS PATH ### 3128 [1,2,4,8589934592,1,0]
-val_64 hdfs://### HDFS PATH ### 3516 [1,2,4,8589934592,1,0]
-val_65 hdfs://### HDFS PATH ### 1592 [1,2,4,8589934592,1,0]
-val_66 hdfs://### HDFS PATH ### 198 [1,2,4,8589934592,1,0]
-val_67 hdfs://### HDFS PATH ### 1754 [1,2,4,8589934592,1,0]
-val_67 hdfs://### HDFS PATH ### 5306 [1,2,4,8589934592,1,0]
-val_69 hdfs://### HDFS PATH ### 3570 [1,2,4,8589934592,1,0]
-val_70 hdfs://### HDFS PATH ### 3794 [1,2,4,8589934592,1,0]
-val_70 hdfs://### HDFS PATH ### 4548 [1,2,4,8589934592,1,0]
-val_70 hdfs://### HDFS PATH ### 4640 [1,2,4,8589934592,1,0]
-val_72 hdfs://### HDFS PATH ### 1208 [1,2,4,8589934592,1,0]
-val_72 hdfs://### HDFS PATH ### 2792 [1,2,4,8589934592,1,0]
-val_74 hdfs://### HDFS PATH ### 3548 [1,2,4,8589934592,1,0]
-val_76 hdfs://### HDFS PATH ### 3378 [1,2,4,8589934592,1,0]
-val_76 hdfs://### HDFS PATH ### 3538 [1,2,4,8589934592,1,0]
-val_77 hdfs://### HDFS PATH ### 2622 [1,2,4,8589934592,1,0]
-val_78 hdfs://### HDFS PATH ### 3368 [1,2,4,8589934592,1,0]
-val_8 hdfs://### HDFS PATH ### 1916 [1,2,4,8589934592,1,0]
-val_80 hdfs://### HDFS PATH ### 4058 [1,2,4,8589934592,1,0]
-val_82 hdfs://### HDFS PATH ### 396 [1,2,4,8589934592,1,0]
-val_83 hdfs://### HDFS PATH ### 1674 [1,2,4,8589934592,1,0]
-val_83 hdfs://### HDFS PATH ### 5070 [1,2,4,8589934592,1,0]
-val_84 hdfs://### HDFS PATH ### 1872 [1,2,4,8589934592,1,0]
-val_84 hdfs://### HDFS PATH ### 5606 [1,2,4,8589934592,1,0]
-val_85 hdfs://### HDFS PATH ### 2612 [1,2,4,8589934592,1,0]
-val_86 hdfs://### HDFS PATH ### 12 [1,2,4,8589934592,1,0]
-val_87 hdfs://### HDFS PATH ### 2652 [1,2,4,8589934592,1,0]
-val_9 hdfs://### HDFS PATH ### 5398 [1,2,4,8589934592,1,0]
-val_90 hdfs://### HDFS PATH ### 2802 [1,2,4,8589934592,1,0]
-val_90 hdfs://### HDFS PATH ### 4304 [1,2,4,8589934592,1,0]
-val_90 hdfs://### HDFS PATH ### 5744 [1,2,4,8589934592,1,0]
-val_92 hdfs://### HDFS PATH ### 1176 [1,2,4,8589934592,1,0]
-val_95 hdfs://### HDFS PATH ### 2400 [1,2,4,8589934592,1,0]
-val_95 hdfs://### HDFS PATH ### 3160 [1,2,4,8589934592,1,0]
-val_96 hdfs://### HDFS PATH ### 2216 [1,2,4,8589934592,1,0]
-val_97 hdfs://### HDFS PATH ### 5572 [1,2,4,8589934592,1,0]
-val_97 hdfs://### HDFS PATH ### 5802 [1,2,4,8589934592,1,0]
-val_98 hdfs://### HDFS PATH ### 2458 [1,2,4,8589934592,1,0]
-val_98 hdfs://### HDFS PATH ### 92 [1,2,4,8589934592,1,0]
-PREHOOK: query: EXPLAIN
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-2 depends on stages: Stage-1
- Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src1_index__
- Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean)
- Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: bigint)
- sort order: ++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
- Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: array<bigint>)
- TableScan
- alias: default__src_src2_index__
- Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean)
- Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
- outputColumnNames: _col0, _col1, _col2
- Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: bigint)
- sort order: ++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
- Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: array<bigint>)
- Reduce Operator Tree:
- Join Operator
- condition map:
- Inner Join 0 to 1
- keys:
- 0 _col0 (type: string), _col1 (type: bigint)
- 1 _col0 (type: string), _col1 (type: bigint)
- outputColumnNames: _col0, _col1, _col2, _col5
- Statistics: Num rows: 275 Data size: 25470 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (not EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(_col2,_col5))) (type: boolean)
- Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), _col1 (type: bigint)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
- Group By Operator
- aggregations: collect_set(_col1)
- keys: _col0 (type: string)
- mode: hash
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
- Stage: Stage-2
- Map Reduce
- Map Operator Tree:
- TableScan
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col1 (type: array<bigint>)
- Reduce Operator Tree:
- Group By Operator
- aggregations: collect_set(VALUE._col0)
- keys: KEY._col0 (type: string)
- mode: mergepartial
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-#### A masked pattern was here ####
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src1_index__
-PREHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
-FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
-EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src1_index__
-POSTHOOK: Input: default@default__src_src2_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: hdfs://### HDFS PATH ###
-POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: hdfs://### HDFS PATH ###
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: DROP INDEX src1_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src1_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
-PREHOOK: query: DROP INDEX src2_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src2_index ON src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out b/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out
deleted file mode 100644
index 773e9f1..0000000
--- a/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out
+++ /dev/null
@@ -1,150 +0,0 @@
-PREHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Output: default@default__srcpart_src_part_index__
-PREHOOK: query: ALTER INDEX src_part_index ON srcpart REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX src_part_index ON srcpart REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart)srcpart.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_src_part_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-2 depends on stages: Stage-3
- Stage-1 depends on stages: Stage-2
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__srcpart_src_part_index__
- filterExpr: ((UDFToDouble(key) = 86.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) = 86.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offset (type: bigint)
- outputColumnNames: _bucketname, _offset
- Group By Operator
- aggregations: collect_set(_offset)
- keys: _bucketname (type: string)
- mode: hash
- outputColumnNames: _col0, _col1
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- value expressions: _col1 (type: array<bigint>)
- Reduce Operator Tree:
- Group By Operator
- aggregations: collect_set(VALUE._col0)
- keys: KEY._col0 (type: string)
- mode: mergepartial
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: srcpart
- filterExpr: ((UDFToDouble(key) = 86.0) and (ds = '2008-04-09')) (type: boolean)
- Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: (UDFToDouble(key) = 86.0) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_src_part_index__
-PREHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=86 AND ds='2008-04-09'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_src_part_index__
-POSTHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_src_part_index__@ds=2008-04-09/hr=12
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-86 val_86
-86 val_86
-PREHOOK: query: DROP INDEX src_part_index ON srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX src_part_index ON srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap_compression.q.out b/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
deleted file mode 100644
index 1f8e40a..0000000
--- a/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
+++ /dev/null
@@ -1,133 +0,0 @@
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bitmaps EXPRESSION [(src)src.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-2 depends on stages: Stage-3
- Stage-1 depends on stages: Stage-2
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src_index__
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offset (type: bigint)
- outputColumnNames: _bucketname, _offset
- Group By Operator
- aggregations: collect_set(_offset)
- keys: _bucketname (type: string)
- mode: hash
- outputColumnNames: _col0, _col1
- Reduce Output Operator
- key expressions: _col0 (type: string)
- sort order: +
- Map-reduce partition columns: _col0 (type: string)
- value expressions: _col1 (type: array<bigint>)
- Reduce Operator Tree:
- Group By Operator
- aggregations: collect_set(VALUE._col0)
- keys: KEY._col0 (type: string)
- mode: mergepartial
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: true
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82 val_82
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-85 val_85
-86 val_86
-87 val_87
-90 val_90
-90 val_90
-90 val_90
-92 val_92
-95 val_95
-95 val_95
-96 val_96
-97 val_97
-97 val_97
-98 val_98
-98 val_98
-PREHOOK: query: DROP INDEX src_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
[13/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 2152f00..a45cac6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -19,11 +19,6 @@
package org.apache.hadoop.hive.ql.metadata;
import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
-import static org.apache.hadoop.hive.serde.serdeConstants.COLLECTION_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.ESCAPE_CHAR;
-import static org.apache.hadoop.hive.serde.serdeConstants.FIELD_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.LINE_DELIM;
-import static org.apache.hadoop.hive.serde.serdeConstants.MAPKEY_DELIM;
import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT;
import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
@@ -108,7 +103,6 @@ import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
import org.apache.hadoop.hive.metastore.api.HiveObjectType;
-import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.metastore.api.InsertEventRequestData;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.Materialization;
@@ -116,7 +110,6 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
-import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
import org.apache.hadoop.hive.metastore.api.PrincipalType;
@@ -127,11 +120,9 @@ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMMapping;
@@ -150,7 +141,6 @@ import org.apache.hadoop.hive.ql.exec.FunctionTask;
import org.apache.hadoop.hive.ql.exec.FunctionUtils;
import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
import org.apache.hadoop.hive.ql.log.PerfLogger;
@@ -668,34 +658,6 @@ public class Hive {
}
}
- public void alterIndex(String baseTableName, String indexName, Index newIdx)
- throws InvalidOperationException, HiveException {
- String[] names = Utilities.getDbTableName(baseTableName);
- alterIndex(names[0], names[1], indexName, newIdx);
- }
-
- /**
- * Updates the existing index metadata with the new metadata.
- *
- * @param idxName
- * name of the existing index
- * @param newIdx
- * new name of the index. could be the old name
- * @throws InvalidOperationException
- * if the changes in metadata is not acceptable
- * @throws TException
- */
- public void alterIndex(String dbName, String baseTblName, String idxName, Index newIdx)
- throws InvalidOperationException, HiveException {
- try {
- getMSC().alter_index(dbName, baseTblName, idxName, newIdx);
- } catch (MetaException e) {
- throw new HiveException("Unable to alter index. " + e.getMessage(), e);
- } catch (TException e) {
- throw new HiveException("Unable to alter index. " + e.getMessage(), e);
- }
- }
-
/**
* Updates the existing partition metadata with the new metadata.
*
@@ -934,243 +896,6 @@ public class Hive {
}
/**
- *
- * @param tableName
- * table name
- * @param indexName
- * index name
- * @param indexHandlerClass
- * index handler class
- * @param indexedCols
- * index columns
- * @param indexTblName
- * index table's name
- * @param deferredRebuild
- * referred build index table's data
- * @param inputFormat
- * input format
- * @param outputFormat
- * output format
- * @param serde
- * @param storageHandler
- * index table's storage handler
- * @param location
- * location
- * @param idxProps
- * idx
- * @param serdeProps
- * serde properties
- * @param collItemDelim
- * @param fieldDelim
- * @param fieldEscape
- * @param lineDelim
- * @param mapKeyDelim
- * @throws HiveException
- */
- public void createIndex(String tableName, String indexName, String indexHandlerClass,
- List<String> indexedCols, String indexTblName, boolean deferredRebuild,
- String inputFormat, String outputFormat, String serde,
- String storageHandler, String location,
- Map<String, String> idxProps, Map<String, String> tblProps, Map<String, String> serdeProps,
- String collItemDelim, String fieldDelim, String fieldEscape,
- String lineDelim, String mapKeyDelim, String indexComment)
- throws HiveException {
-
- try {
- String tdname = Utilities.getDatabaseName(tableName);
- String idname = Utilities.getDatabaseName(indexTblName);
- if (!idname.equals(tdname)) {
- throw new HiveException("Index on different database (" + idname
- + ") from base table (" + tdname + ") is not supported.");
- }
-
- Index old_index = null;
- try {
- old_index = getIndex(tableName, indexName);
- } catch (Exception e) {
- }
- if (old_index != null) {
- throw new HiveException("Index " + indexName + " already exists on table " + tableName);
- }
-
- org.apache.hadoop.hive.metastore.api.Table baseTbl = getTable(tableName).getTTable();
- if (TableType.VIRTUAL_VIEW.toString().equals(baseTbl.getTableType())) {
- throw new HiveException("tableName="+ tableName +" is a VIRTUAL VIEW. Index on VIRTUAL VIEW is not supported.");
- }
- if (baseTbl.isTemporary()) {
- throw new HiveException("tableName=" + tableName
- + " is a TEMPORARY TABLE. Index on TEMPORARY TABLE is not supported.");
- }
-
- org.apache.hadoop.hive.metastore.api.Table temp = null;
- try {
- temp = getTable(indexTblName).getTTable();
- } catch (Exception e) {
- }
- if (temp != null) {
- throw new HiveException("Table name " + indexTblName + " already exists. Choose another name.");
- }
-
- SerDeInfo serdeInfo = new SerDeInfo();
- serdeInfo.setName(indexTblName);
-
- if(serde != null) {
- serdeInfo.setSerializationLib(serde);
- } else {
- if (storageHandler == null) {
- serdeInfo.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
- } else {
- HiveStorageHandler sh = HiveUtils.getStorageHandler(getConf(), storageHandler);
- String serDeClassName = sh.getSerDeClass().getName();
- serdeInfo.setSerializationLib(serDeClassName);
- }
- }
-
- serdeInfo.setParameters(new HashMap<String, String>());
- if (fieldDelim != null) {
- serdeInfo.getParameters().put(FIELD_DELIM, fieldDelim);
- serdeInfo.getParameters().put(SERIALIZATION_FORMAT, fieldDelim);
- }
- if (fieldEscape != null) {
- serdeInfo.getParameters().put(ESCAPE_CHAR, fieldEscape);
- }
- if (collItemDelim != null) {
- serdeInfo.getParameters().put(COLLECTION_DELIM, collItemDelim);
- }
- if (mapKeyDelim != null) {
- serdeInfo.getParameters().put(MAPKEY_DELIM, mapKeyDelim);
- }
- if (lineDelim != null) {
- serdeInfo.getParameters().put(LINE_DELIM, lineDelim);
- }
-
- if (serdeProps != null) {
- Iterator<Entry<String, String>> iter = serdeProps.entrySet()
- .iterator();
- while (iter.hasNext()) {
- Entry<String, String> m = iter.next();
- serdeInfo.getParameters().put(m.getKey(), m.getValue());
- }
- }
-
- List<FieldSchema> indexTblCols = new ArrayList<FieldSchema>();
- List<Order> sortCols = new ArrayList<Order>();
- int k = 0;
- Table metaBaseTbl = new Table(baseTbl);
- // Even though we are storing these in metastore, get regular columns. Indexes on lengthy
- // types from e.g. Avro schema will just fail to create the index table (by design).
- List<FieldSchema> cols = metaBaseTbl.getCols();
- for (int i = 0; i < cols.size(); i++) {
- FieldSchema col = cols.get(i);
- if (indexedCols.contains(col.getName())) {
- indexTblCols.add(col);
- sortCols.add(new Order(col.getName(), 1));
- k++;
- }
- }
- if (k != indexedCols.size()) {
- throw new RuntimeException(
- "Check the index columns, they should appear in the table being indexed.");
- }
-
- int time = (int) (System.currentTimeMillis() / 1000);
- org.apache.hadoop.hive.metastore.api.Table tt = null;
- HiveIndexHandler indexHandler = HiveUtils.getIndexHandler(this.getConf(), indexHandlerClass);
-
- String itname = Utilities.getTableName(indexTblName);
- if (indexHandler.usesIndexTable()) {
- tt = new org.apache.hadoop.hive.ql.metadata.Table(idname, itname).getTTable();
- List<FieldSchema> partKeys = baseTbl.getPartitionKeys();
- tt.setPartitionKeys(partKeys);
- tt.setTableType(TableType.INDEX_TABLE.toString());
- if (tblProps != null) {
- for (Entry<String, String> prop : tblProps.entrySet()) {
- tt.putToParameters(prop.getKey(), prop.getValue());
- }
- }
- SessionState ss = SessionState.get();
- CreateTableAutomaticGrant grants;
- if (ss != null && ((grants = ss.getCreateTableGrants()) != null)) {
- PrincipalPrivilegeSet principalPrivs = new PrincipalPrivilegeSet();
- principalPrivs.setUserPrivileges(grants.getUserGrants());
- principalPrivs.setGroupPrivileges(grants.getGroupGrants());
- principalPrivs.setRolePrivileges(grants.getRoleGrants());
- tt.setPrivileges(principalPrivs);
- }
- }
-
- if(!deferredRebuild) {
- throw new RuntimeException("Please specify deferred rebuild using \" WITH DEFERRED REBUILD \".");
- }
-
- StorageDescriptor indexSd = new StorageDescriptor(
- indexTblCols,
- location,
- inputFormat,
- outputFormat,
- false/*compressed - not used*/,
- -1/*numBuckets - default is -1 when the table has no buckets*/,
- serdeInfo,
- null/*bucketCols*/,
- sortCols,
- null/*parameters*/);
-
- String ttname = Utilities.getTableName(tableName);
- Index indexDesc = new Index(indexName, indexHandlerClass, tdname, ttname, time, time, itname,
- indexSd, new HashMap<String,String>(), deferredRebuild);
- if (indexComment != null) {
- indexDesc.getParameters().put("comment", indexComment);
- }
-
- if (idxProps != null)
- {
- indexDesc.getParameters().putAll(idxProps);
- }
-
- indexHandler.analyzeIndexDefinition(baseTbl, indexDesc, tt);
-
- this.getMSC().createIndex(indexDesc, tt);
-
- } catch (Exception e) {
- throw new HiveException(e);
- }
- }
-
- public Index getIndex(String baseTableName, String indexName) throws HiveException {
- String[] names = Utilities.getDbTableName(baseTableName);
- return this.getIndex(names[0], names[1], indexName);
- }
-
- public Index getIndex(String dbName, String baseTableName,
- String indexName) throws HiveException {
- try {
- return this.getMSC().getIndex(dbName, baseTableName, indexName);
- } catch (Exception e) {
- throw new HiveException(e);
- }
- }
-
- public boolean dropIndex(String baseTableName, String index_name,
- boolean throwException, boolean deleteData) throws HiveException {
- String[] names = Utilities.getDbTableName(baseTableName);
- return dropIndex(names[0], names[1], index_name, throwException, deleteData);
- }
-
- public boolean dropIndex(String db_name, String tbl_name, String index_name,
- boolean throwException, boolean deleteData) throws HiveException {
- try {
- return getMSC().dropIndex(db_name, tbl_name, index_name, deleteData);
- } catch (NoSuchObjectException e) {
- if (throwException) {
- throw new HiveException("Index " + index_name + " doesn't exist. ", e);
- }
- return false;
- } catch (Exception e) {
- throw new HiveException(e.getMessage(), e);
- }
- }
-
- /**
* Drops table along with the data in it. If the table doesn't exist then it
* is a no-op. If ifPurge option is specified it is passed to the
* hdfs command that removes table data from warehouse to make it skip trash.
@@ -1522,8 +1247,9 @@ public class Hive {
*/
public List<String> getTablesByType(String dbName, String pattern, TableType type)
throws HiveException {
- if (dbName == null)
+ if (dbName == null) {
dbName = SessionState.get().getCurrentDatabase();
+ }
try {
if (type != null) {
@@ -2779,13 +2505,17 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
addInsertNonDirectoryInformation(p, fileSystem, insertData);
}
- if (directories == null) return;
+ if (directories == null) {
+ return;
+ }
// We don't expect any nesting in most cases, or a lot of it if it is present; union and LB
// are some examples where we would have 1, or few, levels respectively.
while (!directories.isEmpty()) {
Path dir = directories.poll();
FileStatus[] contents = fileSystem.listStatus(dir);
- if (contents == null) continue;
+ if (contents == null) {
+ continue;
+ }
for (FileStatus status : contents) {
if (status.isDirectory()) {
directories.add(status.getPath());
@@ -3775,13 +3505,15 @@ private void constructOneLBLocationMap(FileStatus fSta,
ErrorMsg errorMsg = ErrorMsg.getErrorMsg(e);
- if (logMsg != null)
+ if (logMsg != null) {
LOG.info(String.format(logMsg, e.getMessage()));
+ }
- if (errorMsg != ErrorMsg.UNRESOLVED_RT_EXCEPTION)
+ if (errorMsg != ErrorMsg.UNRESOLVED_RT_EXCEPTION) {
return new HiveException(e, e.getMessage(), errorMsg, hiveErrMsg);
- else
+ } else {
return new HiveException(msg, e);
+ }
}
/**
@@ -3979,7 +3711,9 @@ private void constructOneLBLocationMap(FileStatus fSta,
bucketDest.toUri().toString());
try {
fs.rename(bucketSrc, bucketDest);
- if (newFiles != null) newFiles.add(bucketDest);
+ if (newFiles != null) {
+ newFiles.add(bucketDest);
+ }
} catch (Exception e) {
throw getHiveException(e, msg);
}
@@ -4118,7 +3852,9 @@ private void constructOneLBLocationMap(FileStatus fSta,
recycleDirToCmPath(path, purge);
}
FileStatus[] statuses = fs.listStatus(path, pathFilter);
- if (statuses == null || statuses.length == 0) return;
+ if (statuses == null || statuses.length == 0) {
+ return;
+ }
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
String s = "Deleting files under " + path + " for replace: ";
for (FileStatus file : statuses) {
@@ -4342,17 +4078,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
}
- public List<Index> getIndexes(String dbName, String tblName, short max) throws HiveException {
- List<Index> indexes = null;
- try {
- indexes = getMSC().listIndexes(dbName, tblName, max);
- } catch (Exception e) {
- LOG.error(StringUtils.stringifyException(e));
- throw new HiveException(e);
- }
- return indexes;
- }
-
public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException {
try {
return getMSC().setPartitionColumnStatistics(request);
@@ -4440,6 +4165,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
/**
* @deprecated use {@link #compact2(String, String, String, String, Map)}
*/
+ @Deprecated
public void compact(String dbname, String tableName, String partName, String compactType,
Map<String, String> tblproperties) throws HiveException {
compact2(dbname, tableName, partName, compactType, tblproperties);
@@ -4461,9 +4187,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
throws HiveException {
try {
CompactionType cr = null;
- if ("major".equals(compactType)) cr = CompactionType.MAJOR;
- else if ("minor".equals(compactType)) cr = CompactionType.MINOR;
- else throw new RuntimeException("Unknown compaction type " + compactType);
+ if ("major".equals(compactType)) {
+ cr = CompactionType.MAJOR;
+ } else if ("minor".equals(compactType)) {
+ cr = CompactionType.MINOR;
+ } else {
+ throw new RuntimeException("Unknown compaction type " + compactType);
+ }
return getMSC().compact2(dbname, tableName, partName, cr, tblproperties);
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
index 16c9834..dae18fb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
@@ -26,10 +26,8 @@ import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.tez.TezContext;
-import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
import org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator;
import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
@@ -315,24 +313,6 @@ public final class HiveUtils {
// prevent instantiation
}
- public static HiveIndexHandler getIndexHandler(HiveConf conf,
- String indexHandlerClass) throws HiveException {
-
- if (indexHandlerClass == null) {
- return null;
- }
- try {
- Class<? extends HiveIndexHandler> handlerClass =
- (Class<? extends HiveIndexHandler>)
- Class.forName(indexHandlerClass, true, Utilities.getSessionSpecifiedClassLoader());
- HiveIndexHandler indexHandler = ReflectionUtils.newInstance(handlerClass, conf);
- return indexHandler;
- } catch (ClassNotFoundException e) {
- throw new HiveException("Error in loading index handler."
- + e.getMessage(), e);
- }
- }
-
@SuppressWarnings("unchecked")
public static List<HiveMetastoreAuthorizationProvider> getMetaStoreAuthorizeProviderManagers(
Configuration conf, HiveConf.ConfVars authorizationProviderConfKey,
@@ -438,22 +418,6 @@ public final class HiveUtils {
return ret;
}
-
- /**
- * Convert FieldSchemas to columnNames with backticks around them.
- */
- public static String getUnparsedColumnNamesFromFieldSchema(
- List<FieldSchema> fieldSchemas) {
- StringBuilder sb = new StringBuilder();
- for (int i = 0; i < fieldSchemas.size(); i++) {
- if (i > 0) {
- sb.append(",");
- }
- sb.append(HiveUtils.unparseIdentifier(fieldSchemas.get(i).getName()));
- }
- return sb.toString();
- }
-
public static String getLocalDirList(Configuration conf) {
if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
TezContext tezContext = (TezContext) TezContext.get();
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
index 3b87824..a5b6a4b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hive.metastore.api.Decimal;
import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData;
import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
@@ -42,8 +41,6 @@ import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMTrigger;
-import org.apache.hadoop.hive.ql.index.HiveIndex;
-import org.apache.hadoop.hive.ql.index.HiveIndex.IndexType;
import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -55,7 +52,6 @@ import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo.ForeignKeyCol;
import org.apache.hadoop.hive.ql.metadata.NotNullConstraint;
import org.apache.hadoop.hive.ql.plan.DescTableDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
-import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hive.common.util.HiveStringUtils;
@@ -136,45 +132,6 @@ public final class MetaDataFormatUtils {
return null;
}
- public static String getIndexInformation(Index index, boolean isOutputPadded) {
- StringBuilder indexInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
-
- List<String> indexColumns = new ArrayList<String>();
-
- indexColumns.add(index.getIndexName());
- indexColumns.add(index.getOrigTableName());
-
- // index key names
- List<FieldSchema> indexKeys = index.getSd().getCols();
- StringBuilder keyString = new StringBuilder();
- boolean first = true;
- for (FieldSchema key : indexKeys)
- {
- if (!first)
- {
- keyString.append(", ");
- }
- keyString.append(key.getName());
- first = false;
- }
-
- indexColumns.add(keyString.toString());
-
- indexColumns.add(index.getIndexTableName());
-
- // index type
- String indexHandlerClass = index.getIndexHandlerClass();
- IndexType indexType = HiveIndex.getIndexTypeByClassName(indexHandlerClass);
- indexColumns.add(indexType.getName());
-
- String comment = HiveStringUtils.escapeJava(index.getParameters().get("comment"));
- indexColumns.add(comment);
-
- formatOutput(indexColumns.toArray(new String[0]), indexInfo, isOutputPadded, true);
-
- return indexInfo.toString();
- }
-
public static String getConstraintsInformation(PrimaryKeyInfo pkInfo, ForeignKeyInfo fkInfo,
UniqueConstraint ukInfo, NotNullConstraint nnInfo) {
StringBuilder constraintsInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
@@ -713,12 +670,6 @@ public final class MetaDataFormatUtils {
return DescTableDesc.getSchema(showColStats).split("#")[0].split(",");
}
- public static String getIndexColumnsHeader() {
- StringBuilder indexCols = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
- formatOutput(ShowIndexesDesc.getSchema().split("#")[0].split(","), indexCols);
- return indexCols.toString();
- }
-
public static MetaDataFormatter getFormatter(HiveConf conf) {
if ("json".equals(conf.get(HiveConf.ConfVars.HIVE_DDL_OUTPUT_FORMAT.varname, "text"))) {
return new JsonMetaDataFormatter();
@@ -802,7 +753,9 @@ public final class MetaDataFormatUtils {
if (p2.pool == null) {
return (p1.pool == null) ? 0 : -1;
}
- if (p1.pool == null) return 1;
+ if (p1.pool == null) {
+ return 1;
+ }
return Double.compare(p2.pool.getAllocFraction(), p1.pool.getAllocFraction());
});
for (PoolTreeNode child : children) {
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
deleted file mode 100644
index 81952bf..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.TaskFactory;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.hooks.ReadEntity;
-import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.index.IndexMetadataChangeTask;
-import org.apache.hadoop.hive.ql.index.IndexMetadataChangeWork;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.optimizer.physical.index.IndexWhereProcessor;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.ql.session.LineageState;
-
-/**
- * Utility class for index support.
- * Currently used for BITMAP and AGGREGATE index
- *
- */
-public final class IndexUtils {
-
- private static final Logger LOG = LoggerFactory.getLogger(IndexWhereProcessor.class.getName());
-
- private IndexUtils(){
- }
-
- /**
- * Check the partitions used by the table scan to make sure they also exist in the
- * index table.
- * @param pctx
- * @param indexes
- * @return partitions used by query. null if they do not exist in index table
- * @throws HiveException
- */
- public static Set<Partition> checkPartitionsCoveredByIndex(TableScanOperator tableScan,
- ParseContext pctx, List<Index> indexes) throws HiveException {
- Hive hive = Hive.get(pctx.getConf());
- // make sure each partition exists on the index table
- PrunedPartitionList queryPartitionList = pctx.getOpToPartList().get(tableScan);
- Set<Partition> queryPartitions = queryPartitionList.getPartitions();
- if (queryPartitions == null || queryPartitions.isEmpty()) {
- return null;
- }
-
- for (Partition part : queryPartitions) {
- if (!containsPartition(hive, part, indexes)) {
- return null; // problem if it doesn't contain the partition
- }
- }
-
- return queryPartitions;
- }
-
- /**
- * check that every index table contains the given partition and is fresh
- */
- private static boolean containsPartition(Hive hive, Partition part, List<Index> indexes)
- throws HiveException {
- HashMap<String, String> partSpec = part.getSpec();
- if (partSpec.isEmpty()) {
- // empty specs come from non-partitioned tables
- return isIndexTableFresh(hive, indexes, part.getTable());
- }
-
- for (Index index : indexes) {
- // index.getDbName() is used as a default database, which is database of target table,
- // if index.getIndexTableName() does not contain database name
- String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName());
- Table indexTable = hive.getTable(qualified[0], qualified[1]);
- // get partitions that match the spec
- Partition matchingPartition = hive.getPartition(indexTable, partSpec, false);
- if (matchingPartition == null) {
- LOG.info("Index table " + indexTable + "did not contain built partition that matched " + partSpec);
- return false;
- } else if (!isIndexPartitionFresh(hive, index, part)) {
- return false;
- }
- }
- return true;
- }
-
- /**
- * Check the index partitions on a partitioned table exist and are fresh
- */
- private static boolean isIndexPartitionFresh(Hive hive, Index index,
- Partition part) throws HiveException {
- LOG.info("checking index staleness...");
- try {
- String indexTs = index.getParameters().get(part.getSpec().toString());
- if (indexTs == null) {
- return false;
- }
-
- FileSystem partFs = part.getDataLocation().getFileSystem(hive.getConf());
- FileStatus[] parts = partFs.listStatus(part.getDataLocation(), FileUtils.HIDDEN_FILES_PATH_FILTER);
- for (FileStatus status : parts) {
- if (status.getModificationTime() > Long.parseLong(indexTs)) {
- LOG.info("Index is stale on partition '" + part.getName()
- + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath()
- + "' is higher than index creation time (" + indexTs + ").");
- return false;
- }
- }
- } catch (IOException e) {
- throw new HiveException("Failed to grab timestamp information from partition '" + part.getName() + "': " + e.getMessage(), e);
- }
- return true;
- }
-
- /**
- * Check that the indexes on the un-partitioned table exist and are fresh
- */
- private static boolean isIndexTableFresh(Hive hive, List<Index> indexes, Table src)
- throws HiveException {
- //check that they exist
- if (indexes == null || indexes.size() == 0) {
- return false;
- }
- //check that they are not stale
- for (Index index : indexes) {
- LOG.info("checking index staleness...");
- try {
- String indexTs = index.getParameters().get("base_timestamp");
- if (indexTs == null) {
- return false;
- }
-
- FileSystem srcFs = src.getPath().getFileSystem(hive.getConf());
- FileStatus[] srcs = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
- for (FileStatus status : srcs) {
- if (status.getModificationTime() > Long.parseLong(indexTs)) {
- LOG.info("Index is stale on table '" + src.getTableName()
- + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath()
- + "' is higher than index creation time (" + indexTs + ").");
- return false;
- }
- }
- } catch (IOException e) {
- throw new HiveException("Failed to grab timestamp information from table '" + src.getTableName() + "': " + e.getMessage(), e);
- }
- }
- return true;
- }
-
-
- /**
- * Get a list of indexes on a table that match given types.
- */
- public static List<Index> getIndexes(Table baseTableMetaData, List<String> matchIndexTypes)
- throws SemanticException {
- List<Index> matchingIndexes = new ArrayList<Index>();
-
- List<Index> indexesOnTable;
- try {
- indexesOnTable = getAllIndexes(baseTableMetaData, (short) -1); // get all indexes
- } catch (HiveException e) {
- throw new SemanticException("Error accessing metastore", e);
- }
-
- for (Index index : indexesOnTable) {
- String indexType = index.getIndexHandlerClass();
- if (matchIndexTypes.contains(indexType)) {
- matchingIndexes.add(index);
- }
- }
- return matchingIndexes;
- }
-
- /**
- * @return List containing Indexes names if there are indexes on this table
- * @throws HiveException
- **/
- public static List<Index> getAllIndexes(Table table, short max) throws HiveException {
- Hive hive = Hive.get();
- return hive.getIndexes(table.getTTable().getDbName(), table.getTTable().getTableName(), max);
- }
-
- public static Task<?> createRootTask(
- HiveConf builderConf,
- Set<ReadEntity> inputs,
- Set<WriteEntity> outputs,
- StringBuilder command,
- LinkedHashMap<String, String> partSpec,
- String indexTableName,
- String dbName,
- LineageState lineageState){
- // Don't try to index optimize the query to build the index
- HiveConf.setBoolVar(builderConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER, false);
- Driver driver = new Driver(builderConf, SessionState.get().getUserName(), lineageState);
- driver.compile(command.toString(), false);
-
- Task<?> rootTask = driver.getPlan().getRootTasks().get(0);
- inputs.addAll(driver.getPlan().getInputs());
- outputs.addAll(driver.getPlan().getOutputs());
-
- IndexMetadataChangeWork indexMetaChange = new IndexMetadataChangeWork(partSpec,
- indexTableName, dbName);
- IndexMetadataChangeTask indexMetaChangeTsk =
- (IndexMetadataChangeTask) TaskFactory.get(indexMetaChange, builderConf);
- indexMetaChangeTsk.setWork(indexMetaChange);
- rootTask.addDependentTask(indexMetaChangeTsk);
-
- driver.destroy();
-
- return rootTask;
- }
-
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
index 4f6be6d..71f7380 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverterPostProc;
import org.apache.hadoop.hive.ql.optimizer.correlation.CorrelationOptimizer;
import org.apache.hadoop.hive.ql.optimizer.correlation.ReduceSinkDeDuplication;
-import org.apache.hadoop.hive.ql.optimizer.index.RewriteGBUsingIndex;
import org.apache.hadoop.hive.ql.optimizer.lineage.Generator;
import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPruner;
import org.apache.hadoop.hive.ql.optimizer.metainfo.annotation.AnnotateWithOpTraits;
@@ -110,9 +109,9 @@ public class Optimizer {
}
if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION) &&
- !pctx.getContext().isCboSucceeded()) {
- // We run constant propagation twice because after predicate pushdown, filter expressions
- // are combined and may become eligible for reduction (like is not null filter).
+ !pctx.getContext().isCboSucceeded()) {
+ // We run constant propagation twice because after predicate pushdown, filter expressions
+ // are combined and may become eligible for reduction (like is not null filter).
transformations.add(new ConstantPropagate());
}
@@ -155,9 +154,6 @@ public class Optimizer {
LOG.warn("Skew join is currently not supported in tez! Disabling the skew join optimization.");
}
}
- if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTGBYUSINGINDEX)) {
- transformations.add(new RewriteGBUsingIndex());
- }
transformations.add(new SamplePruner());
MapJoinProcessor mapJoinProcessor = isSparkExecEngine ? new SparkMapJoinProcessor()
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
index 5f0e842..c0ce684 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/QueryPlanPostProcessor.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hive.ql.exec.OperatorUtils;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork;
import org.apache.hadoop.hive.ql.exec.repl.bootstrap.ReplLoadWork;
-import org.apache.hadoop.hive.ql.index.IndexMetadataChangeWork;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.parse.GenTezProcContext;
import org.apache.hadoop.hive.ql.parse.GenTezWork;
@@ -102,7 +101,6 @@ public class QueryPlanPostProcessor {
}
else if(work instanceof ReplLoadWork ||
work instanceof ReplStateLogWork ||
- work instanceof IndexMetadataChangeWork ||
work instanceof GenTezWork ||
work instanceof GenSparkWork ||
work instanceof ArchiveWork ||
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java
deleted file mode 100644
index 641d877..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.index;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Stack;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.ql.exec.FilterOperator;
-import org.apache.hadoop.hive.ql.exec.GroupByOperator;
-import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
-import org.apache.hadoop.hive.ql.exec.SelectOperator;
-import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
-import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.PreOrderOnceWalker;
-import org.apache.hadoop.hive.ql.lib.Rule;
-import org.apache.hadoop.hive.ql.lib.RuleRegExp;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-
-/**
- * RewriteCanApplyCtx class stores the context for the {@link RewriteCanApplyProcFactory}
- * to determine if any index can be used and if the input query
- * meets all the criteria for rewrite optimization.
- */
-public final class RewriteCanApplyCtx implements NodeProcessorCtx {
-
- private static final Logger LOG = LoggerFactory.getLogger(RewriteCanApplyCtx.class.getName());
-
- private RewriteCanApplyCtx(ParseContext parseContext) {
- this.parseContext = parseContext;
- }
-
- public static RewriteCanApplyCtx getInstance(ParseContext parseContext){
- return new RewriteCanApplyCtx(parseContext);
- }
-
- // Rewrite Variables
- private boolean selClauseColsFetchException = false;
- private boolean queryHasGroupBy = false;
- private boolean aggFuncIsNotCount = false;
- private boolean aggParameterException = false;
-
- //The most important, indexKey
- private String indexKey;
-
- private final ParseContext parseContext;
- private String alias;
- private String baseTableName;
- private String indexTableName;
- private String aggFunction;
-
- private TableScanOperator tableScanOperator;
- private List<SelectOperator> selectOperators;
- private List<GroupByOperator> groupByOperators;
-
- void resetCanApplyCtx(){
- setQueryHasGroupBy(false);
- setAggFuncIsNotCount(false);
- setSelClauseColsFetchException(false);
- setBaseTableName("");
- setAggFunction("");
- setIndexKey("");
- }
-
- public boolean isQueryHasGroupBy() {
- return queryHasGroupBy;
- }
-
- public void setQueryHasGroupBy(boolean queryHasGroupBy) {
- this.queryHasGroupBy = queryHasGroupBy;
- }
-
- public boolean isAggFuncIsNotCount() {
- return aggFuncIsNotCount;
- }
-
- public void setAggFuncIsNotCount(boolean aggFuncIsNotCount) {
- this.aggFuncIsNotCount = aggFuncIsNotCount;
- }
-
- public Map<String, String> getBaseToIdxTableMap() {
- return baseToIdxTableMap;
- }
-
- public void setAggFunction(String aggFunction) {
- this.aggFunction = aggFunction;
- }
-
- public String getAggFunction() {
- return aggFunction;
- }
-
- public void setSelClauseColsFetchException(boolean selClauseColsFetchException) {
- this.selClauseColsFetchException = selClauseColsFetchException;
- }
-
- public boolean isSelClauseColsFetchException() {
- return selClauseColsFetchException;
- }
-
- public String getAlias() {
- return alias;
- }
-
- public void setAlias(String alias) {
- this.alias = alias;
- }
-
- public String getBaseTableName() {
- return baseTableName;
- }
-
- public void setBaseTableName(String baseTableName) {
- this.baseTableName = baseTableName;
- }
-
- public String getIndexTableName() {
- return indexTableName;
- }
-
- public void setIndexTableName(String indexTableName) {
- this.indexTableName = indexTableName;
- }
-
- public ParseContext getParseContext() {
- return parseContext;
- }
-
- /**
- * This method walks all the nodes starting from topOp TableScanOperator node
- * and invokes methods from {@link RewriteCanApplyProcFactory} for each of the rules
- * added to the opRules map. We use the {@link PreOrderOnceWalker} for a pre-order
- * traversal of the operator tree.
- *
- * The methods from {@link RewriteCanApplyProcFactory} set appropriate values in
- * {@link RewriteVars} enum.
- *
- * @param topOp
- * @throws SemanticException
- */
- void populateRewriteVars(TableScanOperator topOp)
- throws SemanticException{
- Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
- //^TS%[(SEL%)|(FIL%)]*GRY%[(FIL%)]*RS%[(FIL%)]*GRY%
- opRules.put(
- new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%[("
- + SelectOperator.getOperatorName() + "%)|(" + FilterOperator.getOperatorName() + "%)]*"
- + GroupByOperator.getOperatorName() + "%[" + FilterOperator.getOperatorName() + "%]*"
- + ReduceSinkOperator.getOperatorName() + "%[" + FilterOperator.getOperatorName()
- + "%]*" + GroupByOperator.getOperatorName() + "%"),
- RewriteCanApplyProcFactory.canApplyOnTableScanOperator(topOp));
-
- // The dispatcher fires the processor corresponding to the closest matching
- // rule and passes the context along
- Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, this);
- GraphWalker ogw = new PreOrderOnceWalker(disp);
-
- // Create a list of topop nodes
- List<Node> topNodes = new ArrayList<Node>();
- topNodes.add(topOp);
-
- try {
- ogw.startWalking(topNodes, null);
- } catch (SemanticException e) {
- LOG.error("Exception in walking operator tree. Rewrite variables not populated");
- LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
- throw new SemanticException(e.getMessage(), e);
- }
- }
-
-
- /**
- * Default procedure for {@link DefaultRuleDispatcher}.
- * @return
- */
- private NodeProcessor getDefaultProc() {
- return new NodeProcessor() {
- @Override
- public Object process(Node nd, Stack<Node> stack,
- NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
- return null;
- }
- };
- }
-
-
- //Map for base table to index table mapping
- //TableScan operator for base table will be modified to read from index table
- private final Map<String, String> baseToIdxTableMap = new HashMap<String, String>();;
-
- public void addTable(String baseTableName, String indexTableName) {
- baseToIdxTableMap.put(baseTableName, indexTableName);
- }
-
- public String findBaseTable(String baseTableName) {
- return baseToIdxTableMap.get(baseTableName);
- }
-
- public String getIndexKey() {
- return indexKey;
- }
-
- public void setIndexKey(String indexKey) {
- this.indexKey = indexKey;
- }
-
- public TableScanOperator getTableScanOperator() {
- return tableScanOperator;
- }
-
- public void setTableScanOperator(TableScanOperator tableScanOperator) {
- this.tableScanOperator = tableScanOperator;
- }
-
- public List<SelectOperator> getSelectOperators() {
- return selectOperators;
- }
-
- public void setSelectOperators(List<SelectOperator> selectOperators) {
- this.selectOperators = selectOperators;
- }
-
- public List<GroupByOperator> getGroupByOperators() {
- return groupByOperators;
- }
-
- public void setGroupByOperators(List<GroupByOperator> groupByOperators) {
- this.groupByOperators = groupByOperators;
- }
-
- public void setAggParameterException(boolean aggParameterException) {
- this.aggParameterException = aggParameterException;
- }
-
- public boolean isAggParameterException() {
- return aggParameterException;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java
deleted file mode 100644
index 41d2282..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyProcFactory.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.index;
-
-import org.apache.hadoop.hive.ql.exec.GroupByOperator;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.SelectOperator;
-import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.AggregationDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
-import org.apache.hadoop.hive.ql.plan.GroupByDesc;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Stack;
-
-/**
- * Factory of methods used by {@link RewriteGBUsingIndex}
- * to determine if the rewrite optimization can be applied to the input query.
- *
- */
-public final class RewriteCanApplyProcFactory {
- public static CheckTableScanProc canApplyOnTableScanOperator(TableScanOperator topOp) {
- return new CheckTableScanProc();
- }
-
- private static class CheckTableScanProc implements NodeProcessor {
- public CheckTableScanProc() {
- }
-
- public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx ctx, Object... nodeOutputs)
- throws SemanticException {
- RewriteCanApplyCtx canApplyCtx = (RewriteCanApplyCtx) ctx;
- for (Node node : stack) {
- // For table scan operator,
- // check ReferencedColumns to make sure that only the index column is
- // selected for the following operators.
- if (node instanceof TableScanOperator) {
- TableScanOperator ts = (TableScanOperator) node;
- canApplyCtx.setTableScanOperator(ts);
- List<String> selectColumns = ts.getConf().getReferencedColumns();
- if (selectColumns == null || selectColumns.size() != 1) {
- canApplyCtx.setSelClauseColsFetchException(true);
- return null;
- } else {
- canApplyCtx.setIndexKey(selectColumns.get(0));
- }
- } else if (node instanceof SelectOperator) {
- // For select operators in the stack, we just add them
- if (canApplyCtx.getSelectOperators() == null) {
- canApplyCtx.setSelectOperators(new ArrayList<SelectOperator>());
- }
- canApplyCtx.getSelectOperators().add((SelectOperator) node);
- } else if (node instanceof GroupByOperator) {
- if (canApplyCtx.getGroupByOperators() == null) {
- canApplyCtx.setGroupByOperators(new ArrayList<GroupByOperator>());
- }
- // According to the pre-order,
- // the first GroupbyOperator is the one before RS
- // and the second one is the one after RS
- GroupByOperator operator = (GroupByOperator) node;
- canApplyCtx.getGroupByOperators().add(operator);
- if (!canApplyCtx.isQueryHasGroupBy()) {
- canApplyCtx.setQueryHasGroupBy(true);
- GroupByDesc conf = operator.getConf();
- List<AggregationDesc> aggrList = conf.getAggregators();
- if (aggrList == null || aggrList.size() != 1
- || !("count".equals(aggrList.get(0).getGenericUDAFName()))) {
- // In the current implementation, we make sure that only count is
- // in the function
- canApplyCtx.setAggFuncIsNotCount(true);
- return null;
- } else {
- List<ExprNodeDesc> para = aggrList.get(0).getParameters();
- if (para == null || para.size() == 0 || para.size() > 1) {
- canApplyCtx.setAggParameterException(true);
- return null;
- } else {
- ExprNodeDesc expr = ExprNodeDescUtils.backtrack(para.get(0), operator,
- (Operator<OperatorDesc>) stack.get(0));
- if (!(expr instanceof ExprNodeColumnDesc)) {
- canApplyCtx.setAggParameterException(true);
- return null;
- }
- }
- }
- }
- }
- }
- return null;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java
deleted file mode 100644
index 3cb176e..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.index;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.ql.exec.TableScanOperator;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.index.AggregateIndexHandler;
-import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
-import org.apache.hadoop.hive.ql.optimizer.Transform;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-
-
-/**
- * RewriteGBUsingIndex is implemented as one of the Rule-based Optimizations.
- * Implements optimizations for GroupBy clause rewrite using aggregate index.
- * This optimization rewrites GroupBy query over base table to the query over simple table-scan
- * over index table, if there is index on the group by key(s) or the distinct column(s).
- * E.g.
- * <code>
- * select count(key)
- * from table
- * group by key;
- * </code>
- * to
- * <code>
- * select sum(_count_of_key)
- * from idx_table
- * group by key;
- * </code>
- *
- * The rewrite supports following queries:
- * <ul>
- * <li> Queries having only those col refs that are in the index key.
- * <li> Queries that have index key col refs
- * <ul>
- * <li> in SELECT
- * <li> in WHERE
- * <li> in GROUP BY
- * </ul>
- * <li> Queries with agg func COUNT(index key col ref) in SELECT
- * <li> Queries with SELECT DISTINCT index_key_col_refs
- * <li> Queries having a subquery satisfying above condition (only the subquery is rewritten)
- * </ul>
- *
- * @see AggregateIndexHandler
- * @see IndexUtils
- * @see RewriteCanApplyCtx
- * @see RewriteCanApplyProcFactory
- * @see RewriteParseContextGenerator
- * @see RewriteQueryUsingAggregateIndexCtx
- * @see RewriteQueryUsingAggregateIndex
- * For test cases, @see ql_rewrite_gbtoidx.q
- */
-
-public class RewriteGBUsingIndex extends Transform {
- private ParseContext parseContext;
- // Assumes one instance of this + single-threaded compilation for each query.
- private Hive hiveDb;
- private HiveConf hiveConf;
- private static final Logger LOG = LoggerFactory.getLogger(RewriteGBUsingIndex.class.getName());
-
- /*
- * Stores the list of top TableScanOperator names for which the rewrite
- * can be applied and the action that needs to be performed for operator tree
- * starting from this TableScanOperator
- */
- private final Map<String, RewriteCanApplyCtx> tsOpToProcess =
- new LinkedHashMap<String, RewriteCanApplyCtx>();
-
- //Index Validation Variables
- private static final String IDX_BUCKET_COL = "_bucketname";
- private static final String IDX_OFFSETS_ARRAY_COL = "_offsets";
-
-
- @Override
- public ParseContext transform(ParseContext pctx) throws SemanticException {
- parseContext = pctx;
- hiveConf = parseContext.getConf();
- try {
- hiveDb = Hive.get(hiveConf);
- } catch (HiveException e) {
- LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
- throw new SemanticException(e.getMessage(), e);
- }
-
- // Don't try to index optimize the query to build the index
- HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER, false);
-
- /* Check if the input query passes all the tests to be eligible for a rewrite
- * If yes, rewrite original query; else, return the current parseContext
- */
- if (shouldApplyOptimization()) {
- LOG.info("Rewriting Original Query using " + getName() + " optimization.");
- rewriteOriginalQuery();
- }
- return parseContext;
- }
-
- private String getName() {
- return "RewriteGBUsingIndex";
- }
-
- /**
- * We traverse the current operator tree to check for conditions in which the
- * optimization cannot be applied.
- *
- * At the end, we check if all conditions have passed for rewrite. If yes, we
- * determine if the the index is usable for rewrite. Else, we log the condition which
- * did not meet the rewrite criterion.
- *
- * @return
- * @throws SemanticException
- */
- boolean shouldApplyOptimization() throws SemanticException {
- Map<Table, List<Index>> tableToIndex = getIndexesForRewrite();
- if (tableToIndex.isEmpty()) {
- LOG.debug("No Valid Index Found to apply Rewrite, " +
- "skipping " + getName() + " optimization");
- return false;
- }
- /*
- * This code iterates over each TableScanOperator from the topOps map from ParseContext.
- * For each operator tree originating from this top TableScanOperator, we determine
- * if the optimization can be applied. If yes, we add the name of the top table to
- * the tsOpToProcess to apply rewrite later on.
- * */
- for (Map.Entry<String, TableScanOperator> entry : parseContext.getTopOps().entrySet()) {
- String alias = entry.getKey();
- TableScanOperator topOp = entry.getValue();
- Table table = topOp.getConf().getTableMetadata();
- List<Index> indexes = tableToIndex.get(table);
- if (indexes.isEmpty()) {
- continue;
- }
- if (table.isPartitioned()) {
- //if base table has partitions, we need to check if index is built for
- //all partitions. If not, then we do not apply the optimization
- if (!checkIfIndexBuiltOnAllTablePartitions(topOp, indexes)) {
- LOG.debug("Index is not built for all table partitions, " +
- "skipping " + getName() + " optimization");
- continue;
- }
- }
- //check if rewrite can be applied for operator tree
- //if there are no partitions on base table
- checkIfRewriteCanBeApplied(alias, topOp, table, indexes);
- }
- return !tsOpToProcess.isEmpty();
- }
-
- /**
- * This methods checks if rewrite can be applied using the index and also
- * verifies all conditions of the operator tree.
- *
- * @param topOp - TableScanOperator for a single the operator tree branch
- * @param indexes - Map of a table and list of indexes on it
- * @return - true if rewrite can be applied on the current branch; false otherwise
- * @throws SemanticException
- */
- private boolean checkIfRewriteCanBeApplied(String alias, TableScanOperator topOp,
- Table baseTable, List<Index> indexes) throws SemanticException{
- //Context for checking if this optimization can be applied to the input query
- RewriteCanApplyCtx canApplyCtx = RewriteCanApplyCtx.getInstance(parseContext);
- canApplyCtx.setAlias(alias);
- canApplyCtx.setBaseTableName(baseTable.getTableName());
- canApplyCtx.populateRewriteVars(topOp);
- Map<Index, String> indexTableMap = getIndexToKeysMap(indexes);
- for (Map.Entry<Index, String> entry : indexTableMap.entrySet()) {
- //we rewrite the original query using the first valid index encountered
- //this can be changed if we have a better mechanism to
- //decide which index will produce a better rewrite
- Index index = entry.getKey();
- String indexKeyName = entry.getValue();
- //break here if any valid index is found to apply rewrite
- if (canApplyCtx.getIndexKey() != null && canApplyCtx.getIndexKey().equals(indexKeyName)
- && checkIfAllRewriteCriteriaIsMet(canApplyCtx)) {
- canApplyCtx.setAggFunction("_count_of_" + indexKeyName + "");
- canApplyCtx.addTable(canApplyCtx.getBaseTableName(), index.getIndexTableName());
- canApplyCtx.setIndexTableName(index.getIndexTableName());
- tsOpToProcess.put(alias, canApplyCtx);
- return true;
- }
- }
- return false;
- }
-
- /**
- * Get a list of indexes which can be used for rewrite.
- * @return
- * @throws SemanticException
- */
- private Map<Table, List<Index>> getIndexesForRewrite() throws SemanticException{
- List<String> supportedIndexes = new ArrayList<String>();
- supportedIndexes.add(AggregateIndexHandler.class.getName());
-
- // query the metastore to know what columns we have indexed
- Collection<TableScanOperator> topTables = parseContext.getTopOps().values();
- Map<Table, List<Index>> indexes = new HashMap<Table, List<Index>>();
- for (TableScanOperator op : topTables) {
- TableScanOperator tsOP = op;
- List<Index> tblIndexes = IndexUtils.getIndexes(tsOP.getConf().getTableMetadata(),
- supportedIndexes);
- if (tblIndexes.size() > 0) {
- indexes.put(tsOP.getConf().getTableMetadata(), tblIndexes);
- }
- }
-
- return indexes;
- }
-
- /**
- * This method checks if the index is built on all partitions of the base
- * table. If not, then the method returns false as we do not apply optimization
- * for this case.
- * @param tableScan
- * @param indexes
- * @return
- * @throws SemanticException
- */
- private boolean checkIfIndexBuiltOnAllTablePartitions(TableScanOperator tableScan,
- List<Index> indexes) throws SemanticException {
- // check if we have indexes on all partitions in this table scan
- Set<Partition> queryPartitions;
- try {
- queryPartitions = IndexUtils.checkPartitionsCoveredByIndex(tableScan, parseContext, indexes);
- if (queryPartitions == null) { // partitions not covered
- return false;
- }
- } catch (HiveException e) {
- LOG.error("Fatal Error: problem accessing metastore", e);
- throw new SemanticException(e);
- }
- if (queryPartitions.size() != 0) {
- return true;
- }
- return false;
- }
-
- /**
- * This code block iterates over indexes on the table and populates the indexToKeys map
- * for all the indexes that satisfy the rewrite criteria.
- * @param indexTables
- * @return
- * @throws SemanticException
- */
- Map<Index, String> getIndexToKeysMap(List<Index> indexTables) throws SemanticException{
- Hive hiveInstance = hiveDb;
- Map<Index, String> indexToKeysMap = new LinkedHashMap<Index, String>();
- for (int idxCtr = 0; idxCtr < indexTables.size(); idxCtr++) {
- Index index = indexTables.get(idxCtr);
- //Getting index key columns
- StorageDescriptor sd = index.getSd();
- List<FieldSchema> idxColList = sd.getCols();
- assert idxColList.size()==1;
- String indexKeyName = idxColList.get(0).getName();
- // Check that the index schema is as expected. This code block should
- // catch problems of this rewrite breaking when the AggregateIndexHandler
- // index is changed.
- List<String> idxTblColNames = new ArrayList<String>();
- try {
- String[] qualified = Utilities.getDbTableName(index.getDbName(),
- index.getIndexTableName());
- Table idxTbl = hiveInstance.getTable(qualified[0], qualified[1]);
- for (FieldSchema idxTblCol : idxTbl.getCols()) {
- idxTblColNames.add(idxTblCol.getName());
- }
- } catch (HiveException e) {
- LOG.error("Got exception while locating index table, " +
- "skipping " + getName() + " optimization");
- LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
- throw new SemanticException(e.getMessage(), e);
- }
- assert(idxTblColNames.contains(IDX_BUCKET_COL));
- assert(idxTblColNames.contains(IDX_OFFSETS_ARRAY_COL));
- // we add all index tables which can be used for rewrite
- // and defer the decision of using a particular index for later
- // this is to allow choosing a index if a better mechanism is
- // designed later to chose a better rewrite
- indexToKeysMap.put(index, indexKeyName);
- }
- return indexToKeysMap;
- }
-
- /**
- * Method to rewrite the input query if all optimization criteria is passed.
- * The method iterates over the tsOpToProcess {@link ArrayList} to apply the rewrites
- * @throws SemanticException
- *
- */
- private void rewriteOriginalQuery() throws SemanticException {
- for (RewriteCanApplyCtx canApplyCtx : tsOpToProcess.values()) {
- RewriteQueryUsingAggregateIndexCtx rewriteQueryCtx =
- RewriteQueryUsingAggregateIndexCtx.getInstance(parseContext, hiveDb, canApplyCtx);
- rewriteQueryCtx.invokeRewriteQueryProc();
- parseContext = rewriteQueryCtx.getParseContext();
- }
- LOG.info("Finished Rewriting query");
- }
-
-
- /**
- * This method logs the reason for which we cannot apply the rewrite optimization.
- * @return
- */
- boolean checkIfAllRewriteCriteriaIsMet(RewriteCanApplyCtx canApplyCtx) {
- if (canApplyCtx.isSelClauseColsFetchException()) {
- LOG.debug("Got exception while locating child col refs for select list, " + "skipping "
- + getName() + " optimization.");
- return false;
- }
- if (canApplyCtx.isAggFuncIsNotCount()) {
- LOG.debug("Agg func other than count is " + "not supported by " + getName()
- + " optimization.");
- return false;
- }
- if (canApplyCtx.isAggParameterException()) {
- LOG.debug("Got exception while locating parameter refs for aggregation, " + "skipping "
- + getName() + " optimization.");
- return false;
- }
- return true;
- }
-}
-
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java
deleted file mode 100644
index 2a01d29..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.optimizer.index;
-
-import java.io.IOException;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.Context;
-import org.apache.hadoop.hive.ql.QueryState;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.parse.ASTNode;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
-import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.ParseDriver;
-import org.apache.hadoop.hive.ql.parse.ParseException;
-import org.apache.hadoop.hive.ql.parse.ParseUtils;
-import org.apache.hadoop.hive.ql.parse.QB;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzerFactory;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-
-
-/**
- * RewriteParseContextGenerator is a class that offers methods to generate operator tree
- * for input queries. It is implemented on lines of the analyzeInternal(..) method
- * of {@link SemanticAnalyzer} but it creates only the ParseContext for the input query command.
- * It does not optimize or generate map-reduce tasks for the input query.
- * This can be used when you need to create operator tree for an internal query.
- *
- */
-public final class RewriteParseContextGenerator {
-
- private static final Logger LOG = LoggerFactory.getLogger(RewriteParseContextGenerator.class.getName());
-
- /**
- * Parse the input {@link String} command and generate an operator tree.
- * @param conf
- * @param command
- * @throws SemanticException
- */
- public static Operator<? extends OperatorDesc> generateOperatorTree(QueryState queryState,
- String command) throws SemanticException {
- Operator<? extends OperatorDesc> operatorTree;
- try {
- Context ctx = new Context(queryState.getConf());
- ASTNode tree = ParseUtils.parse(command, ctx);
-
- BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(queryState, tree);
- assert(sem instanceof SemanticAnalyzer);
- operatorTree = doSemanticAnalysis((SemanticAnalyzer) sem, tree, ctx);
- LOG.info("Sub-query Semantic Analysis Completed");
- } catch (IOException e) {
- LOG.error("IOException in generating the operator " +
- "tree for input command - " + command + " " , e);
- LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
- throw new SemanticException(e.getMessage(), e);
- } catch (ParseException e) {
- LOG.error("ParseException in generating the operator " +
- "tree for input command - " + command + " " , e);
- LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
- throw new SemanticException(e.getMessage(), e);
- } catch (SemanticException e) {
- LOG.error("SemanticException in generating the operator " +
- "tree for input command - " + command + " " , e);
- LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
- throw new SemanticException(e.getMessage(), e);
- }
- return operatorTree;
- }
-
- /**
- * For the input ASTNode tree, perform a semantic analysis and check metadata
- * Generate a operator tree and return it.
- *
- * @param ctx
- * @param sem
- * @param ast
- * @return
- * @throws SemanticException
- */
- private static Operator<?> doSemanticAnalysis(SemanticAnalyzer sem,
- ASTNode ast, Context ctx) throws SemanticException {
- QB qb = new QB(null, null, false);
- ASTNode child = ast;
- ParseContext subPCtx = sem.getParseContext();
- subPCtx.setContext(ctx);
- sem.initParseCtx(subPCtx);
-
- LOG.info("Starting Sub-query Semantic Analysis");
- sem.doPhase1(child, qb, sem.initPhase1Ctx(), null);
- LOG.info("Completed phase 1 of Sub-query Semantic Analysis");
-
- sem.getMetaData(qb);
- LOG.info("Completed getting MetaData in Sub-query Semantic Analysis");
-
- LOG.info("Sub-query Abstract syntax tree: " + ast.toStringTree());
- Operator<?> operator = sem.genPlan(qb);
-
- LOG.info("Sub-query Completed plan generation");
- return operator;
- }
-
-}
[04/15] hive git commit: HIVE-18448: Drop Support For Indexes From
Apache Hive (Zoltan Haindrich reviewed by Ashutosh Chauhan)
Posted by kg...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_bitmap_rc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap_rc.q.out b/ql/src/test/results/clientpositive/index_bitmap_rc.q.out
deleted file mode 100644
index 046442c..0000000
--- a/ql/src/test/results/clientpositive/index_bitmap_rc.q.out
+++ /dev/null
@@ -1,349 +0,0 @@
-PREHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@srcpart_rc
-POSTHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@srcpart_rc
-PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=12
-POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart_rc
-POSTHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname`
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' GROUP BY `_bucketname`
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname`
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__
-x WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND x.key=100 AND x.ds = '2008-04-08' and x.hr = 11 GROUP BY `_bucketname`
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart_rc
-POSTHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart_rc
-PREHOOK: query: EXPLAIN
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart_rc
-POSTHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'BITMAP' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bitmaps EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:ROW__OFFSET__INSIDE__BLOCK, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._offset SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key = 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.key = 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__
-WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-COLLECT_SET(`_offset`) as `_offsets` FROM default__srcpart_rc_srcpart_rc_index__
-WHERE NOT EWAH_BITMAP_EMPTY(`_bitmaps`) AND key=100 GROUP BY `_bucketname`
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart_rc
-POSTHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart_rc
-PREHOOK: query: DROP TABLE srcpart_rc
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Output: default@srcpart_rc
-POSTHOOK: query: DROP TABLE srcpart_rc
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Output: default@srcpart_rc
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_compact.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_compact.q.out b/ql/src/test/results/clientpositive/index_compact.q.out
deleted file mode 100644
index 97d7bac..0000000
--- a/ql/src/test/results/clientpositive/index_compact.q.out
+++ /dev/null
@@ -1,271 +0,0 @@
-PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
-PREHOOK: query: EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__
-PREHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x WHERE x.ds = '2008-04-08' and x.hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100 AND ds = '2008-04-08' and hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
-PREHOOK: query: EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: CREATE INDEX srcpart_index_proj ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__
-PREHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_index_proj ON srcpart REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_srcpart_index_proj__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__srcpart_srcpart_index_proj__ x
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_srcpart_index_proj__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM srcpart WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart
-POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_compact_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_compact_1.q.out b/ql/src/test/results/clientpositive/index_compact_1.q.out
deleted file mode 100644
index 7be9ada..0000000
--- a/ql/src/test/results/clientpositive/index_compact_1.q.out
+++ /dev/null
@@ -1,70 +0,0 @@
-PREHOOK: query: EXPLAIN
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: SELECT x.* FROM default__src_src_index__ x
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__src_src_index__ x
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM src WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM src WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX src_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_compact_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_compact_2.q.out b/ql/src/test/results/clientpositive/index_compact_2.q.out
deleted file mode 100644
index 28ba095..0000000
--- a/ql/src/test/results/clientpositive/index_compact_2.q.out
+++ /dev/null
@@ -1,317 +0,0 @@
-PREHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@srcpart_rc
-POSTHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@srcpart_rc
-PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 12
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-PREHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=12
-POSTHOOK: query: INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-09', hr=12) SELECT key, value FROM srcpart WHERE ds = '2008-04-09' AND hr = 12
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@srcpart_rc@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: srcpart_rc PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart_rc
-POSTHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x WHERE x.ds = '2008-04-08' and x.hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100 AND ds = '2008-04-08' and hr = 11
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart_rc
-POSTHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart_rc
-PREHOOK: query: EXPLAIN
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-POSTHOOK: query: EXPLAIN
-CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-STAGE DEPENDENCIES:
- Stage-0 is a root stage
-
-STAGE PLANS:
- Stage: Stage-0
-
-PREHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@srcpart_rc
-POSTHOOK: query: CREATE INDEX srcpart_rc_index ON TABLE srcpart_rc(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-PREHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-POSTHOOK: query: ALTER INDEX srcpart_rc_index ON srcpart_rc REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-POSTHOOK: Output: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart_rc)srcpart_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__srcpart_rc_srcpart_rc_index__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart_rc)srcpart_rc.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__srcpart_rc_srcpart_rc_index__ x
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-PREHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@default__srcpart_rc_srcpart_rc_index__@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM srcpart_rc WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_rc@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@srcpart_rc
-POSTHOOK: query: DROP INDEX srcpart_rc_index on srcpart_rc
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@srcpart_rc
-PREHOOK: query: DROP TABLE srcpart_rc
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@srcpart_rc
-PREHOOK: Output: default@srcpart_rc
-POSTHOOK: query: DROP TABLE srcpart_rc
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@srcpart_rc
-POSTHOOK: Output: default@srcpart_rc
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_compact_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_compact_3.q.out b/ql/src/test/results/clientpositive/index_compact_3.q.out
deleted file mode 100644
index 14a5927..0000000
--- a/ql/src/test/results/clientpositive/index_compact_3.q.out
+++ /dev/null
@@ -1,84 +0,0 @@
-PREHOOK: query: CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_index_test_rc
-POSTHOOK: query: CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_index_test_rc
-PREHOOK: query: INSERT OVERWRITE TABLE src_index_test_rc SELECT * FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_index_test_rc
-POSTHOOK: query: INSERT OVERWRITE TABLE src_index_test_rc SELECT * FROM src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_index_test_rc
-POSTHOOK: Lineage: src_index_test_rc.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_index_test_rc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: CREATE INDEX src_index ON TABLE src_index_test_rc(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src_index_test_rc
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src_index_test_rc(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src_index_test_rc
-POSTHOOK: Output: default@default__src_index_test_rc_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src_index_test_rc REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src_index_test_rc
-PREHOOK: Output: default@default__src_index_test_rc_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src_index_test_rc REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src_index_test_rc
-POSTHOOK: Output: default@default__src_index_test_rc_src_index__
-POSTHOOK: Lineage: default__src_index_test_rc_src_index__._bucketname SIMPLE [(src_index_test_rc)src_index_test_rc.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_index_test_rc_src_index__._offsets EXPRESSION [(src_index_test_rc)src_index_test_rc.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_index_test_rc_src_index__.key SIMPLE [(src_index_test_rc)src_index_test_rc.FieldSchema(name:key, type:int, comment:null), ]
-PREHOOK: query: SELECT x.* FROM default__src_index_test_rc_src_index__ x
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_index_test_rc_src_index__
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT x.* FROM default__src_index_test_rc_src_index__ x
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_index_test_rc_src_index__
-#### A masked pattern was here ####
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_index_test_rc_src_index__
-#### A masked pattern was here ####
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_index_test_rc_src_index__
-#### A masked pattern was here ####
-PREHOOK: query: SELECT key, value FROM src_index_test_rc WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_index_test_rc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src_index_test_rc WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_index_test_rc
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: SELECT key, value FROM src_index_test_rc WHERE key=100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_index_test_rc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src_index_test_rc WHERE key=100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_index_test_rc
-#### A masked pattern was here ####
-100 val_100
-100 val_100
-PREHOOK: query: DROP INDEX src_index on src_index_test_rc
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src_index_test_rc
-POSTHOOK: query: DROP INDEX src_index on src_index_test_rc
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src_index_test_rc
-PREHOOK: query: DROP TABLE src_index_test_rc
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@src_index_test_rc
-PREHOOK: Output: default@src_index_test_rc
-POSTHOOK: query: DROP TABLE src_index_test_rc
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@src_index_test_rc
-POSTHOOK: Output: default@src_index_test_rc
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_compact_binary_search.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_compact_binary_search.q.out b/ql/src/test/results/clientpositive/index_compact_binary_search.q.out
deleted file mode 100644
index dbbd9ed..0000000
--- a/ql/src/test/results/clientpositive/index_compact_binary_search.q.out
+++ /dev/null
@@ -1,473 +0,0 @@
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: SELECT * FROM src WHERE key = '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key < '1'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key <= '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key > '8'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-86 val_86
-98 val_98
-82 val_82
-92 val_92
-83 val_83
-84 val_84
-96 val_96
-95 val_95
-98 val_98
-85 val_85
-87 val_87
-90 val_90
-95 val_95
-80 val_80
-90 val_90
-83 val_83
-9 val_9
-97 val_97
-84 val_84
-90 val_90
-97 val_97
-PREHOOK: query: SELECT * FROM src WHERE key >= '9'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-98 val_98
-92 val_92
-96 val_96
-95 val_95
-98 val_98
-90 val_90
-95 val_95
-90 val_90
-9 val_9
-97 val_97
-90 val_90
-97 val_97
-PREHOOK: query: DROP INDEX src_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-PREHOOK: query: SELECT * FROM src WHERE key = '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key < '1'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key <= '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key > '8'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-86 val_86
-98 val_98
-82 val_82
-92 val_92
-83 val_83
-84 val_84
-96 val_96
-95 val_95
-98 val_98
-85 val_85
-87 val_87
-90 val_90
-95 val_95
-80 val_80
-90 val_90
-83 val_83
-9 val_9
-97 val_97
-84 val_84
-90 val_90
-97 val_97
-PREHOOK: query: SELECT * FROM src WHERE key >= '9'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-98 val_98
-92 val_92
-96 val_96
-95 val_95
-98 val_98
-90 val_90
-95 val_95
-90 val_90
-9 val_9
-97 val_97
-90 val_90
-97 val_97
-PREHOOK: query: DROP INDEX src_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-PREHOOK: query: SELECT * FROM src WHERE key = '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key < '1'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key <= '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key > '8'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-86 val_86
-98 val_98
-82 val_82
-92 val_92
-83 val_83
-84 val_84
-96 val_96
-95 val_95
-98 val_98
-85 val_85
-87 val_87
-90 val_90
-95 val_95
-80 val_80
-90 val_90
-83 val_83
-9 val_9
-97 val_97
-84 val_84
-90 val_90
-97 val_97
-PREHOOK: query: SELECT * FROM src WHERE key >= '9'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-98 val_98
-92 val_92
-96 val_96
-95 val_95
-98 val_98
-90 val_90
-95 val_95
-90 val_90
-9 val_9
-97 val_97
-90 val_90
-97 val_97
-PREHOOK: query: DROP INDEX src_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-PREHOOK: query: SELECT * FROM src WHERE key = '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key < '1'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key <= '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key > '8'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-86 val_86
-98 val_98
-82 val_82
-92 val_92
-83 val_83
-84 val_84
-96 val_96
-95 val_95
-98 val_98
-85 val_85
-87 val_87
-90 val_90
-95 val_95
-80 val_80
-90 val_90
-83 val_83
-9 val_9
-97 val_97
-84 val_84
-90 val_90
-97 val_97
-PREHOOK: query: SELECT * FROM src WHERE key >= '9'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-98 val_98
-92 val_92
-96 val_96
-95 val_95
-98 val_98
-90 val_90
-95 val_95
-90 val_90
-9 val_9
-97 val_97
-90 val_90
-97 val_97
-PREHOOK: query: DROP INDEX src_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-PREHOOK: query: SELECT * FROM src WHERE key = '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key < '1'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key <= '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key > '8'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-86 val_86
-98 val_98
-82 val_82
-92 val_92
-83 val_83
-84 val_84
-96 val_96
-95 val_95
-98 val_98
-85 val_85
-87 val_87
-90 val_90
-95 val_95
-80 val_80
-90 val_90
-83 val_83
-9 val_9
-97 val_97
-84 val_84
-90 val_90
-97 val_97
-PREHOOK: query: SELECT * FROM src WHERE key >= '9'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-98 val_98
-92 val_92
-96 val_96
-95 val_95
-98 val_98
-90 val_90
-95 val_95
-90 val_90
-9 val_9
-97 val_97
-90 val_90
-97 val_97
-PREHOOK: query: DROP INDEX src_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-PREHOOK: query: SELECT * FROM src WHERE key = '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key < '1'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key <= '0'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-0 val_0
-0 val_0
-0 val_0
-PREHOOK: query: SELECT * FROM src WHERE key > '8'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-86 val_86
-98 val_98
-82 val_82
-92 val_92
-83 val_83
-84 val_84
-96 val_96
-95 val_95
-98 val_98
-85 val_85
-87 val_87
-90 val_90
-95 val_95
-80 val_80
-90 val_90
-83 val_83
-9 val_9
-97 val_97
-84 val_84
-90 val_90
-97 val_97
-PREHOOK: query: SELECT * FROM src WHERE key >= '9'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-98 val_98
-92 val_92
-96 val_96
-95 val_95
-98 val_98
-90 val_90
-95 val_95
-90 val_90
-9 val_9
-97 val_97
-90 val_90
-97 val_97
-PREHOOK: query: DROP INDEX src_index ON src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
http://git-wip-us.apache.org/repos/asf/hive/blob/b0d3cb45/ql/src/test/results/clientpositive/index_compression.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_compression.q.out b/ql/src/test/results/clientpositive/index_compression.q.out
deleted file mode 100644
index a2c0d27..0000000
--- a/ql/src/test/results/clientpositive/index_compression.q.out
+++ /dev/null
@@ -1,158 +0,0 @@
-PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-PREHOOK: type: CREATEINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD
-POSTHOOK: type: CREATEINDEX
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-PREHOOK: query: ALTER INDEX src_index ON src REBUILD
-PREHOOK: type: ALTERINDEX_REBUILD
-PREHOOK: Input: default@src
-PREHOOK: Output: default@default__src_src_index__
-POSTHOOK: query: ALTER INDEX src_index ON src REBUILD
-POSTHOOK: type: ALTERINDEX_REBUILD
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@default__src_src_index__
-POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
-POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
- Stage-3 is a root stage
- Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6
- Stage-5
- Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
- Stage-1 depends on stages: Stage-2
- Stage-4
- Stage-6
- Stage-7 depends on stages: Stage-6
- Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
- Stage: Stage-3
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: default__src_src_index__
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Select Operator
- expressions: _bucketname (type: string), _offsets (type: array<bigint>)
- outputColumnNames: _col0, _col1
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-8
- Conditional Operator
-
- Stage: Stage-5
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-2
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: src
- filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
- Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
- Filter Operator
- predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: key (type: string), value (type: string)
- outputColumnNames: _col0, _col1
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: true
- Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-4
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-6
- Map Reduce
- Map Operator Tree:
- TableScan
- File Output Operator
- compressed: false
- table:
- input format: org.apache.hadoop.mapred.TextInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
- Stage: Stage-7
- Move Operator
- files:
- hdfs directory: true
-#### A masked pattern was here ####
-
- Stage: Stage-0
- Fetch Operator
- limit: -1
- Processor Tree:
- ListSink
-
-PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100
-PREHOOK: type: QUERY
-PREHOOK: Input: default@default__src_src_index__
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@default__src_src_index__
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-82 val_82
-83 val_83
-83 val_83
-84 val_84
-84 val_84
-85 val_85
-86 val_86
-87 val_87
-90 val_90
-90 val_90
-90 val_90
-92 val_92
-95 val_95
-95 val_95
-96 val_96
-97 val_97
-97 val_97
-98 val_98
-98 val_98
-PREHOOK: query: DROP INDEX src_index on src
-PREHOOK: type: DROPINDEX
-PREHOOK: Input: default@src
-POSTHOOK: query: DROP INDEX src_index on src
-POSTHOOK: type: DROPINDEX
-POSTHOOK: Input: default@src