You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/30 17:22:48 UTC
svn commit: r1635536 [14/28] - in /hive/branches/spark: ./ accumulo-handler/
accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/
accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/
accumulo-handler/src/test/org/apache/hado...
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceWork.java Thu Oct 30 16:22:33 2014
@@ -95,8 +95,6 @@ public class ReduceWork extends BaseWork
private ObjectInspector keyObjectInspector = null;
private ObjectInspector valueObjectInspector = null;
- private final Map<String, Integer> reduceColumnNameMap = new LinkedHashMap<String, Integer>();
-
/**
* If the plan has a reducer and correspondingly a reduce-sink, then store the TableDesc pointing
* to keySerializeInfo of the ReduceSink
@@ -142,58 +140,6 @@ public class ReduceWork extends BaseWork
return valueObjectInspector;
}
- private int addToReduceColumnNameMap(StructObjectInspector structObjectInspector, int startIndex, String prefix) {
- List<? extends StructField> fields = structObjectInspector.getAllStructFieldRefs();
- int index = startIndex;
- for (StructField field: fields) {
- reduceColumnNameMap.put(prefix + "." + field.getFieldName(), index);
- index++;
- }
- return index;
- }
-
- public Boolean fillInReduceColumnNameMap() {
- ObjectInspector keyObjectInspector = getKeyObjectInspector();
- if (keyObjectInspector == null || !(keyObjectInspector instanceof StructObjectInspector)) {
- return false;
- }
- StructObjectInspector keyStructObjectInspector = (StructObjectInspector) keyObjectInspector;
-
- ObjectInspector valueObjectInspector = getValueObjectInspector();
- if (valueObjectInspector == null || !(valueObjectInspector instanceof StructObjectInspector)) {
- return false;
- }
- StructObjectInspector valueStructObjectInspector = (StructObjectInspector) valueObjectInspector;
-
- int keyCount = addToReduceColumnNameMap(keyStructObjectInspector, 0, Utilities.ReduceField.KEY.toString());
- addToReduceColumnNameMap(valueStructObjectInspector, keyCount, Utilities.ReduceField.VALUE.toString());
- return true;
- }
-
- public Map<String, Integer> getReduceColumnNameMap() {
- if (needsTagging) {
- return null;
- }
- if (reduceColumnNameMap.size() == 0) {
- if (!fillInReduceColumnNameMap()) {
- return null;
- }
- }
- return reduceColumnNameMap;
- }
-
- public List<String> getReduceColumnNames() {
- if (needsTagging) {
- return null;
- }
- if (reduceColumnNameMap.size() == 0) {
- if (!fillInReduceColumnNameMap()) {
- return null;
- }
- }
- return new ArrayList<String>(reduceColumnNameMap.keySet());
- }
-
public List<TableDesc> getTagToValueDesc() {
return tagToValueDesc;
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java Thu Oct 30 16:22:33 2014
@@ -32,6 +32,10 @@ public class ShowFunctionsDesc extends D
String pattern;
String resFile;
/**
+ * whether like keyword is specified
+ */
+ private boolean isLikePattern = false;
+ /**
* table name for the result of show tables.
*/
private static final String table = "show";
@@ -69,6 +73,18 @@ public class ShowFunctionsDesc extends D
}
/**
+ * @param pattern
+ * names of tables to show
+ * @param like
+ * is like keyword used
+ */
+ public ShowFunctionsDesc(Path resFile, String pattern, boolean isLikePattern) {
+ this(resFile, pattern);
+ this.isLikePattern = isLikePattern;
+ }
+
+
+ /**
* @return the pattern
*/
@Explain(displayName = "pattern")
@@ -99,4 +115,11 @@ public class ShowFunctionsDesc extends D
public void setResFile(String resFile) {
this.resFile = resFile;
}
+
+ /**
+ * @return isLikePattern
+ */
+ public boolean getIsLikePattern() {
+ return isLikePattern;
+ }
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java Thu Oct 30 16:22:33 2014
@@ -24,7 +24,7 @@ import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
-import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
@@ -76,12 +76,16 @@ public class TableDesc implements Serial
return inputFileFormatClass;
}
+ public Deserializer getDeserializer() throws Exception {
+ return getDeserializer(null);
+ }
+
/**
* Return a deserializer object corresponding to the tableDesc.
*/
- public Deserializer getDeserializer() throws Exception {
+ public Deserializer getDeserializer(Configuration conf) throws Exception {
Deserializer de = getDeserializerClass().newInstance();
- SerDeUtils.initializeSerDe(de, null, properties, null);
+ SerDeUtils.initializeSerDe(de, conf, properties, null);
return de;
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java Thu Oct 30 16:22:33 2014
@@ -149,17 +149,17 @@ public final class OpProcFactory {
}
}
-
+
public static class PTFPPD extends ScriptPPD {
-
+
/*
* For WindowingTableFunction if:
- * a. there is a Rank/DenseRank function: if there are unpushedPred of the form
+ * a. there is a Rank/DenseRank function: if there are unpushedPred of the form
* rnkValue < Constant; then use the smallest Constant val as the 'rankLimit'
* on the WindowingTablFn.
- * b. If there are no Wdw Fns with an End Boundary past the current row, the
+ * b. If there are no Wdw Fns with an End Boundary past the current row, the
* condition can be pushed down as a limit pushdown(mapGroupBy=true)
- *
+ *
* (non-Javadoc)
* @see org.apache.hadoop.hive.ql.ppd.OpProcFactory.ScriptPPD#process(org.apache.hadoop.hive.ql.lib.Node, java.util.Stack, org.apache.hadoop.hive.ql.lib.NodeProcessorCtx, java.lang.Object[])
*/
@@ -170,30 +170,30 @@ public final class OpProcFactory {
+ ((Operator) nd).getIdentifier() + ")");
OpWalkerInfo owi = (OpWalkerInfo) procCtx;
PTFOperator ptfOp = (PTFOperator) nd;
-
+
pushRankLimit(ptfOp, owi);
return super.process(nd, stack, procCtx, nodeOutputs);
}
-
+
private void pushRankLimit(PTFOperator ptfOp, OpWalkerInfo owi) throws SemanticException {
PTFDesc conf = ptfOp.getConf();
-
+
if ( !conf.forWindowing() ) {
return;
}
-
+
float threshold = owi.getParseContext().getConf().getFloatVar(HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE);
if (threshold <= 0 || threshold >= 1) {
return;
}
-
+
WindowTableFunctionDef wTFn = (WindowTableFunctionDef) conf.getFuncDef();
List<Integer> rFnIdxs = rankingFunctions(wTFn);
-
+
if ( rFnIdxs.size() == 0 ) {
return;
}
-
+
ExprWalkerInfo childInfo = getChildWalkerInfo(ptfOp, owi);
if (childInfo == null) {
@@ -207,7 +207,7 @@ public final class OpProcFactory {
preds = ExprNodeDescUtils.split(pred, preds);
}
}
-
+
int rLimit = -1;
int fnIdx = -1;
for(ExprNodeDesc pred : preds) {
@@ -219,7 +219,7 @@ public final class OpProcFactory {
}
}
}
-
+
if ( rLimit != -1 ) {
wTFn.setRankLimit(rLimit);
wTFn.setRankLimitFunction(fnIdx);
@@ -228,68 +228,68 @@ public final class OpProcFactory {
}
}
}
-
+
private List<Integer> rankingFunctions(WindowTableFunctionDef wTFn) {
List<Integer> rFns = new ArrayList<Integer>();
for(int i=0; i < wTFn.getWindowFunctions().size(); i++ ) {
WindowFunctionDef wFnDef = wTFn.getWindowFunctions().get(i);
- if ( (wFnDef.getWFnEval() instanceof GenericUDAFRankEvaluator) ||
+ if ( (wFnDef.getWFnEval() instanceof GenericUDAFRankEvaluator) ||
(wFnDef.getWFnEval() instanceof GenericUDAFDenseRankEvaluator ) ) {
rFns.add(i);
}
}
return rFns;
}
-
+
/*
* For a predicate check if it is a candidate for pushing down as limit optimization.
* The expression must be of the form rankFn <|<= constant.
*/
private int[] getLimit(WindowTableFunctionDef wTFn, List<Integer> rFnIdxs, ExprNodeDesc expr) {
-
+
if ( !(expr instanceof ExprNodeGenericFuncDesc) ) {
return null;
}
-
+
ExprNodeGenericFuncDesc fExpr = (ExprNodeGenericFuncDesc) expr;
-
- if ( !(fExpr.getGenericUDF() instanceof GenericUDFOPLessThan) &&
+
+ if ( !(fExpr.getGenericUDF() instanceof GenericUDFOPLessThan) &&
!(fExpr.getGenericUDF() instanceof GenericUDFOPEqualOrLessThan) ) {
return null;
}
-
+
if ( !(fExpr.getChildren().get(0) instanceof ExprNodeColumnDesc) ) {
return null;
}
-
+
if ( !(fExpr.getChildren().get(1) instanceof ExprNodeConstantDesc) ) {
return null;
}
-
+
ExprNodeConstantDesc constantExpr = (ExprNodeConstantDesc) fExpr.getChildren().get(1) ;
-
+
if ( constantExpr.getTypeInfo() != TypeInfoFactory.intTypeInfo ) {
return null;
}
-
+
int limit = (Integer) constantExpr.getValue();
if ( fExpr.getGenericUDF() instanceof GenericUDFOPEqualOrLessThan ) {
limit = limit + 1;
}
String colName = ((ExprNodeColumnDesc)fExpr.getChildren().get(0)).getColumn();
-
+
for(int i=0; i < rFnIdxs.size(); i++ ) {
String fAlias = wTFn.getWindowFunctions().get(i).getAlias();
if ( fAlias.equals(colName)) {
return new int[] {limit,i};
}
}
-
+
return null;
}
-
+
/*
- * Limit can be pushed down to Map-side if all Window Functions need access
+ * Limit can be pushed down to Map-side if all Window Functions need access
* to rows before the current row. This is true for:
* 1. Rank, DenseRank and Lead Fns. (the window doesn't matter for lead fn).
* 2. If the Window for the function is Row based and the End Boundary doesn't
@@ -298,8 +298,8 @@ public final class OpProcFactory {
private boolean canPushLimitToReduceSink(WindowTableFunctionDef wTFn) {
for(WindowFunctionDef wFnDef : wTFn.getWindowFunctions() ) {
- if ( (wFnDef.getWFnEval() instanceof GenericUDAFRankEvaluator) ||
- (wFnDef.getWFnEval() instanceof GenericUDAFDenseRankEvaluator ) ||
+ if ( (wFnDef.getWFnEval() instanceof GenericUDAFRankEvaluator) ||
+ (wFnDef.getWFnEval() instanceof GenericUDAFDenseRankEvaluator ) ||
(wFnDef.getWFnEval() instanceof GenericUDAFLeadEvaluator ) ) {
continue;
}
@@ -314,18 +314,18 @@ public final class OpProcFactory {
}
return true;
}
-
+
private void pushRankLimitToRedSink(PTFOperator ptfOp, HiveConf conf, int rLimit) throws SemanticException {
-
+
Operator<? extends OperatorDesc> parent = ptfOp.getParentOperators().get(0);
Operator<? extends OperatorDesc> gP = parent == null ? null : parent.getParentOperators().get(0);
-
+
if ( gP == null || !(gP instanceof ReduceSinkOperator )) {
return;
}
-
+
float threshold = conf.getFloatVar(HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE);
-
+
ReduceSinkOperator rSink = (ReduceSinkOperator) gP;
ReduceSinkDesc rDesc = rSink.getConf();
rDesc.setTopN(rLimit);
@@ -543,7 +543,7 @@ public final class OpProcFactory {
private void applyFilterTransitivity(JoinOperator nd, OpWalkerInfo owi)
throws SemanticException {
ExprWalkerInfo prunePreds =
- owi.getPrunedPreds((Operator<? extends OperatorDesc>) nd);
+ owi.getPrunedPreds(nd);
if (prunePreds != null) {
// We want to use the row resolvers of the parents of the join op
// because the rowresolver refers to the output columns of an operator
@@ -579,9 +579,6 @@ public final class OpProcFactory {
int numColumns = eqExpressions.size();
int numEqualities = eqExpressions.get(0).size();
- // joins[i] is the join between table i and i+1 in the JoinOperator
- JoinCondDesc[] joins = (nd).getConf().getConds();
-
// oldFilters contains the filters to be pushed down
Map<String, List<ExprNodeDesc>> oldFilters =
prunePreds.getFinalCandidates();
@@ -632,10 +629,32 @@ public final class OpProcFactory {
}
}
}
+ // Push where false filter transitively
+ Map<String,List<ExprNodeDesc>> candidates = prunePreds.getNonFinalCandidates();
+ List<ExprNodeDesc> exprs;
+ // where false is not associated with any alias in candidates
+ if (null != candidates && candidates.get(null) != null && ((exprs = candidates.get(null)) != null)) {
+ Iterator<ExprNodeDesc> itr = exprs.iterator();
+ while (itr.hasNext()) {
+ ExprNodeDesc expr = itr.next();
+ if (expr instanceof ExprNodeConstantDesc && Boolean.FALSE.equals(((ExprNodeConstantDesc)expr).getValue())) {
+ // push this 'where false' expr to all aliases
+ for (String alias : aliasToRR.keySet()) {
+ List<ExprNodeDesc> pushedFilters = newFilters.get(alias);
+ if (null == pushedFilters) {
+ newFilters.put(alias, new ArrayList<ExprNodeDesc>());
+ }
+ newFilters.get(alias).add(expr);
+ }
+ // this filter is pushed, we can remove it from non-final candidates.
+ itr.remove();
+ }
+ }
+ }
for (Entry<String, List<ExprNodeDesc>> aliasToFilters
: newFilters.entrySet()){
- owi.getPrunedPreds((Operator<? extends OperatorDesc>) nd)
+ owi.getPrunedPreds(nd)
.addPushDowns(aliasToFilters.getKey(), aliasToFilters.getValue());
}
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessController.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessController.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessController.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessController.java Thu Oct 30 16:22:33 2014
@@ -68,5 +68,5 @@ public interface HiveAccessController {
List<HiveRoleGrant> getRoleGrantInfoForPrincipal(HivePrincipal principal) throws HiveAuthzPluginException,
HiveAccessControlException;
- void applyAuthorizationConfigPolicy(HiveConf hiveConf);
+ void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException;
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java Thu Oct 30 16:22:33 2014
@@ -191,8 +191,9 @@ public interface HiveAuthorizer {
* Modify the given HiveConf object to configure authorization related parameters
* or other parameters related to hive security
* @param hiveConf
+ * @throws HiveAuthzPluginException
*/
- public void applyAuthorizationConfigPolicy(HiveConf hiveConf);
+ public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException;
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerImpl.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerImpl.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerImpl.java Thu Oct 30 16:22:33 2014
@@ -124,7 +124,7 @@ public class HiveAuthorizerImpl implemen
}
@Override
- public void applyAuthorizationConfigPolicy(HiveConf hiveConf) {
+ public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException {
accessController.applyAuthorizationConfigPolicy(hiveConf);
}
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java Thu Oct 30 16:22:33 2014
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -59,9 +60,9 @@ import org.apache.hadoop.hive.ql.securit
import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveRoleGrant;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.SettableConfigUpdater;
import org.apache.thrift.TException;
-import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableSet;
/**
@@ -350,9 +351,9 @@ public class SQLStdHiveAccessController
@Override
public List<HiveRoleGrant> getPrincipalGrantInfoForRole(String roleName) throws HiveAuthzPluginException, HiveAccessControlException {
// only user belonging to admin role can list role
- if (!isUserAdmin()) {
+ if (!isUserAdmin() && !doesUserHasAdminOption(Arrays.asList(roleName))) {
throw new HiveAccessControlException("Current user : " + currentUserName+ " is not"
- + " allowed get principals in a role. " + ADMIN_ONLY_MSG);
+ + " allowed get principals in a role. " + ADMIN_ONLY_MSG + " Otherwise, " + HAS_ADMIN_PRIV_MSG);
}
try {
return getHiveRoleGrants(metastoreClientFactory.getHiveMetastoreClient(), roleName);
@@ -609,72 +610,8 @@ public class SQLStdHiveAccessController
}
}
-
- /**
- * Default list of modifiable config parameters for sql standard authorization
- */
- static final String [] defaultModWhiteListSqlStdAuth = new String [] {
- ConfVars.BYTESPERREDUCER.varname,
- ConfVars.MAXREDUCERS.varname,
- ConfVars.HIVEMAPSIDEAGGREGATE.varname,
- ConfVars.HIVEMAPAGGRHASHMEMORY.varname,
- ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD.varname,
- ConfVars.HIVEMAPAGGRHASHMINREDUCTION.varname,
- ConfVars.HIVEGROUPBYSKEW.varname,
- ConfVars.HIVE_OPTIMIZE_MULTI_GROUPBY_COMMON_DISTINCTS.varname,
- ConfVars.HIVEOPTGBYUSINGINDEX.varname,
- ConfVars.HIVEOPTPPD.varname,
- ConfVars.HIVEOPTPPD_STORAGE.varname,
- ConfVars.HIVEOPTPPD_STORAGE.varname,
- ConfVars.HIVEPPDRECOGNIZETRANSITIVITY.varname,
- ConfVars.HIVEOPTGROUPBY.varname,
- ConfVars.HIVEOPTSORTDYNAMICPARTITION.varname,
- ConfVars.HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME.varname,
- ConfVars.HIVE_OPTIMIZE_UNION_REMOVE.varname,
- ConfVars.HIVEMULTIGROUPBYSINGLEREDUCER.varname,
- ConfVars.HIVE_MAP_GROUPBY_SORT.varname,
- ConfVars.HIVE_MAP_GROUPBY_SORT_TESTMODE.varname,
- ConfVars.HIVESKEWJOIN.varname,
- ConfVars.HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME.varname,
- ConfVars.HIVEMAPREDMODE.varname,
- ConfVars.HIVEENFORCEBUCKETMAPJOIN.varname,
- ConfVars.COMPRESSRESULT.varname,
- ConfVars.COMPRESSINTERMEDIATE.varname,
- ConfVars.EXECPARALLEL.varname,
- ConfVars.EXECPARALLETHREADNUMBER.varname,
- ConfVars.EXECPARALLETHREADNUMBER.varname,
- ConfVars.HIVEROWOFFSET.varname,
- ConfVars.HIVEMERGEMAPFILES.varname,
- ConfVars.HIVEMERGEMAPREDFILES.varname,
- ConfVars.HIVEMERGETEZFILES.varname,
- ConfVars.HIVEIGNOREMAPJOINHINT.varname,
- ConfVars.HIVECONVERTJOIN.varname,
- ConfVars.HIVECONVERTJOINNOCONDITIONALTASK.varname,
- ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD.varname,
- ConfVars.HIVECONVERTJOINUSENONSTAGED.varname,
- ConfVars.HIVECONVERTJOINNOCONDITIONALTASK.varname,
- ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD.varname,
- ConfVars.HIVECONVERTJOINUSENONSTAGED.varname,
- ConfVars.HIVEENFORCEBUCKETING.varname,
- ConfVars.HIVEENFORCESORTING.varname,
- ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN.varname,
- ConfVars.HIVE_AUTO_SORTMERGE_JOIN.varname,
- ConfVars.HIVE_EXECUTION_ENGINE.varname,
- ConfVars.HIVE_VECTORIZATION_ENABLED.varname,
- ConfVars.HIVEMAPJOINUSEOPTIMIZEDKEYS.varname,
- ConfVars.HIVEMAPJOINLAZYHASHTABLE.varname,
- ConfVars.HIVE_CHECK_CROSS_PRODUCT.varname,
- ConfVars.HIVE_COMPAT.varname,
- ConfVars.DYNAMICPARTITIONINGMODE.varname,
- "mapred.reduce.tasks",
- "mapred.output.compression.codec",
- "mapred.map.output.compression.codec",
- "mapreduce.job.reduce.slowstart.completedmaps",
- "mapreduce.job.queuename",
- };
-
@Override
- public void applyAuthorizationConfigPolicy(HiveConf hiveConf) {
+ public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException {
// First apply configuration applicable to both Hive Cli and HiveServer2
// Not adding any authorization related restrictions to hive cli
// grant all privileges for table to its owner - set this in cli as well so that owner
@@ -682,28 +619,21 @@ public class SQLStdHiveAccessController
hiveConf.setVar(ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS, "INSERT,SELECT,UPDATE,DELETE");
// Apply rest of the configuration only to HiveServer2
- if(sessionCtx.getClientType() == CLIENT_TYPE.HIVESERVER2) {
+ if (sessionCtx.getClientType() == CLIENT_TYPE.HIVESERVER2
+ && hiveConf.getBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
+
// Configure PREEXECHOOKS with DisallowTransformHook to disallow transform queries
String hooks = hiveConf.getVar(ConfVars.PREEXECHOOKS).trim();
if (hooks.isEmpty()) {
hooks = DisallowTransformHook.class.getName();
} else {
- hooks = hooks + "," +DisallowTransformHook.class.getName();
+ hooks = hooks + "," + DisallowTransformHook.class.getName();
}
LOG.debug("Configuring hooks : " + hooks);
hiveConf.setVar(ConfVars.PREEXECHOOKS, hooks);
- // restrict the variables that can be set using set command to a list in whitelist
- hiveConf.setIsModWhiteListEnabled(true);
- String whiteListParamsStr = hiveConf.getVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST);
- if (whiteListParamsStr == null || whiteListParamsStr.trim().equals("")){
- // set the default configs in whitelist
- whiteListParamsStr = Joiner.on(",").join(defaultModWhiteListSqlStdAuth);
- hiveConf.setVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST, whiteListParamsStr);
- }
- for(String whiteListParam : whiteListParamsStr.split(",")){
- hiveConf.addToModifiableWhiteList(whiteListParam);
- }
+ SettableConfigUpdater.setHiveConfWhiteList(hiveConf);
+
}
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerWrapper.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerWrapper.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerWrapper.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerWrapper.java Thu Oct 30 16:22:33 2014
@@ -174,7 +174,7 @@ public class SQLStdHiveAccessControllerW
}
@Override
- public void applyAuthorizationConfigPolicy(HiveConf hiveConf) {
+ public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException {
hiveAccessController.applyAuthorizationConfigPolicy(hiveConf);
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java Thu Oct 30 16:22:33 2014
@@ -29,6 +29,7 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.StatsSetupConst;
@@ -86,6 +87,7 @@ import org.apache.hadoop.hive.serde2.obj
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableStringObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableTimestampObjectInspector;
import org.apache.hadoop.io.BytesWritable;
+import org.apache.tez.mapreduce.hadoop.MRJobConfig;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
@@ -175,6 +177,9 @@ public class StatsUtils {
colStats = getTableColumnStats(table, schema, neededColumns);
}
+ // infer if any column can be primary key based on column statistics
+ inferAndSetPrimaryKey(stats.getNumRows(), colStats);
+
stats.setColumnStatsState(deriveStatType(colStats, neededColumns));
stats.addToColumnStats(colStats);
} else if (partList != null) {
@@ -244,7 +249,7 @@ public class StatsUtils {
List<ColStatistics> emptyStats = Lists.newArrayList();
// add partition column stats
- addParitionColumnStats(neededColumns, referencedColumns, schema, table, partList,
+ addParitionColumnStats(conf, neededColumns, referencedColumns, schema, table, partList,
emptyStats);
stats.addToColumnStats(emptyStats);
@@ -258,9 +263,12 @@ public class StatsUtils {
List<ColStatistics> columnStats = convertColStats(colStats, table.getTableName(),
colToTabAlias);
- addParitionColumnStats(neededColumns, referencedColumns, schema, table, partList,
+ addParitionColumnStats(conf, neededColumns, referencedColumns, schema, table, partList,
columnStats);
+ // infer if any column can be primary key based on column statistics
+ inferAndSetPrimaryKey(stats.getNumRows(), columnStats);
+
stats.addToColumnStats(columnStats);
State colState = deriveStatType(columnStats, referencedColumns);
if (aggrStats.getPartsFound() != partNames.size() && colState != State.NONE) {
@@ -275,7 +283,59 @@ public class StatsUtils {
return stats;
}
- private static void addParitionColumnStats(List<String> neededColumns,
+
+ /**
+ * Based on the provided column statistics and number of rows, this method infers if the column
+ * can be primary key. It checks if the difference between the min and max value is equal to
+ * number of rows specified.
+ * @param numRows - number of rows
+ * @param colStats - column statistics
+ */
+ public static void inferAndSetPrimaryKey(long numRows, List<ColStatistics> colStats) {
+ if (colStats != null) {
+ for (ColStatistics cs : colStats) {
+ if (cs != null && cs.getRange() != null && cs.getRange().minValue != null &&
+ cs.getRange().maxValue != null) {
+ if (numRows ==
+ ((cs.getRange().maxValue.longValue() - cs.getRange().minValue.longValue()) + 1)) {
+ cs.setPrimaryKey(true);
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Infer foreign key relationship from given column statistics.
+ * @param csPK - column statistics of primary key
+ * @param csFK - column statistics of potential foreign key
+ * @return
+ */
+ public static boolean inferForeignKey(ColStatistics csPK, ColStatistics csFK) {
+ if (csPK != null && csFK != null) {
+ if (csPK.isPrimaryKey()) {
+ if (csPK.getRange() != null && csFK.getRange() != null) {
+ ColStatistics.Range pkRange = csPK.getRange();
+ ColStatistics.Range fkRange = csFK.getRange();
+ return isWithin(fkRange, pkRange);
+ }
+ }
+ }
+ return false;
+ }
+
+ private static boolean isWithin(ColStatistics.Range range1, ColStatistics.Range range2) {
+ if (range1.minValue != null && range2.minValue != null && range1.maxValue != null &&
+ range2.maxValue != null) {
+ if (range1.minValue.longValue() >= range2.minValue.longValue() &&
+ range1.maxValue.longValue() <= range2.maxValue.longValue()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private static void addParitionColumnStats(HiveConf conf, List<String> neededColumns,
List<String> referencedColumns, List<ColumnInfo> schema, Table table,
PrunedPartitionList partList, List<ColStatistics> colStats)
throws HiveException {
@@ -298,6 +358,8 @@ public class StatsUtils {
long numPartitions = getNDVPartitionColumn(partList.getPartitions(),
ci.getInternalName());
partCS.setCountDistint(numPartitions);
+ partCS.setAvgColLen(StatsUtils.getAvgColLenOfVariableLengthTypes(conf,
+ ci.getObjectInspector(), partCS.getColumnType()));
colStats.add(partCS);
}
}
@@ -531,6 +593,7 @@ public class StatsUtils {
// Columns statistics for complex datatypes are not supported yet
return null;
}
+
return cs;
}
@@ -957,7 +1020,20 @@ public class StatsUtils {
colStat.setColumnName(outColName);
colStat.setTableAlias(outTabAlias);
}
- cs.add(colStat);
+ if (colStat != null) {
+ cs.add(colStat);
+ }
+ }
+
+ return cs;
+ }
+
+ // In cases where column expression map or row schema is missing, just pass on the parent column
+ // stats. This could happen in cases like TS -> FIL where FIL does not map input column names to
+ // internal names.
+ if (colExprMap == null || rowSchema == null) {
+ if (parentStats.getColumnStats() != null) {
+ cs.addAll(parentStats.getColumnStats());
}
}
return cs;
@@ -998,7 +1074,13 @@ public class StatsUtils {
if (encd.getIsPartitionColOrVirtualCol()) {
- // vitual columns
+ ColStatistics colStats = parentStats.getColumnStatisticsFromColName(colName);
+ if (colStats != null) {
+ /* If statistics for the column already exist use it. */
+ return colStats;
+ }
+
+ // virtual columns
colType = encd.getTypeInfo().getTypeName();
countDistincts = numRows;
oi = encd.getWritableObjectInspector();
@@ -1350,4 +1432,11 @@ public class StatsUtils {
}
}
+
+ public static long getAvailableMemory(Configuration conf) {
+ int memory = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) > 0 ?
+ HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) :
+ conf.getInt(MRJobConfig.MAP_MEMORY_MB, MRJobConfig.DEFAULT_MAP_MEMORY_MB);
+ return memory;
+ }
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java Thu Oct 30 16:22:33 2014
@@ -24,15 +24,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.ValidTxnListImpl;
-import org.apache.hadoop.hive.metastore.api.LockComponent;
-import org.apache.hadoop.hive.metastore.api.LockLevel;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.LockState;
-import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.UnlockRequest;
import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.security.UserGroupInformation;
@@ -41,7 +38,12 @@ import org.apache.hadoop.util.StringUtil
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
/**
* A class to clean directories after compactions. This will run in a separate thread.
@@ -50,35 +52,85 @@ public class Cleaner extends CompactorTh
static final private String CLASS_NAME = Cleaner.class.getName();
static final private Log LOG = LogFactory.getLog(CLASS_NAME);
- private long cleanerCheckInterval = 5000;
+ private long cleanerCheckInterval = 0;
+
+ // List of compactions to clean.
+ private Map<Long, Set<Long>> compactId2LockMap = new HashMap<Long, Set<Long>>();
+ private Map<Long, CompactionInfo> compactId2CompactInfoMap = new HashMap<Long, CompactionInfo>();
@Override
public void run() {
- // Make sure nothing escapes this run method and kills the metastore at large,
- // so wrap it in a big catch Throwable statement.
+ if (cleanerCheckInterval == 0) {
+ cleanerCheckInterval = conf.getTimeVar(
+ HiveConf.ConfVars.HIVE_COMPACTOR_CLEANER_RUN_INTERVAL, TimeUnit.MILLISECONDS);
+ }
+
do {
+ // This is solely for testing. It checks if the test has set the looped value to false,
+ // and if so remembers that and then sets it to true at the end. We have to check here
+ // first to make sure we go through a complete iteration of the loop before resetting it.
+ boolean setLooped = !looped.boolVal;
+ // Make sure nothing escapes this run method and kills the metastore at large,
+ // so wrap it in a big catch Throwable statement.
try {
long startedAt = System.currentTimeMillis();
- // Now look for new entries ready to be cleaned.
+ // First look for all the compactions that are waiting to be cleaned. If we have not
+ // seen an entry before, look for all the locks held on that table or partition and
+ // record them. We will then only clean the partition once all of those locks have been
+ // released. This way we avoid removing the files while they are in use,
+ // while at the same time avoiding starving the cleaner as new readers come along.
+ // This works because we know that any reader who comes along after the worker thread has
+ // done the compaction will read the more up to date version of the data (either in a
+ // newer delta or in a newer base).
List<CompactionInfo> toClean = txnHandler.findReadyToClean();
- for (CompactionInfo ci : toClean) {
- LockComponent comp = null;
- comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.TABLE, ci.dbname);
- comp.setTablename(ci.tableName);
- if (ci.partName != null) comp.setPartitionname(ci.partName);
- List<LockComponent> components = new ArrayList<LockComponent>(1);
- components.add(comp);
- LockRequest rqst = new LockRequest(components, System.getProperty("user.name"),
- Worker.hostname());
- LockResponse rsp = txnHandler.lockNoWait(rqst);
+ if (toClean.size() > 0 || compactId2LockMap.size() > 0) {
+ ShowLocksResponse locksResponse = txnHandler.showLocks(new ShowLocksRequest());
+
+ for (CompactionInfo ci : toClean) {
+ // Check to see if we have seen this request before. If so, ignore it. If not,
+ // add it to our queue.
+ if (!compactId2LockMap.containsKey(ci.id)) {
+ compactId2LockMap.put(ci.id, findRelatedLocks(ci, locksResponse));
+ compactId2CompactInfoMap.put(ci.id, ci);
+ }
+ }
+
+ // Now, for each entry in the queue, see if all of the associated locks are clear so we
+ // can clean
+ Set<Long> currentLocks = buildCurrentLockSet(locksResponse);
+ List<Long> expiredLocks = new ArrayList<Long>();
+ List<Long> compactionsCleaned = new ArrayList<Long>();
try {
- if (rsp.getState() == LockState.ACQUIRED) {
- clean(ci);
+ for (Map.Entry<Long, Set<Long>> queueEntry : compactId2LockMap.entrySet()) {
+ boolean sawLock = false;
+ for (Long lockId : queueEntry.getValue()) {
+ if (currentLocks.contains(lockId)) {
+ sawLock = true;
+ break;
+ } else {
+ expiredLocks.add(lockId);
+ }
+ }
+
+ if (!sawLock) {
+ // Remember to remove this when we're out of the loop,
+ // we can't do it in the loop or we'll get a concurrent modification exception.
+ compactionsCleaned.add(queueEntry.getKey());
+ clean(compactId2CompactInfoMap.get(queueEntry.getKey()));
+ } else {
+ // Remove the locks we didn't see so we don't look for them again next time
+ for (Long lockId : expiredLocks) {
+ queueEntry.getValue().remove(lockId);
+ }
+ }
}
} finally {
- if (rsp.getState() == LockState.ACQUIRED) {
- txnHandler.unlock(new UnlockRequest(rsp.getLockid()));
+ if (compactionsCleaned.size() > 0) {
+ for (Long compactId : compactionsCleaned) {
+ compactId2LockMap.remove(compactId);
+ compactId2CompactInfoMap.remove(compactId);
+ }
}
}
}
@@ -91,9 +143,37 @@ public class Cleaner extends CompactorTh
LOG.error("Caught an exception in the main loop of compactor cleaner, " +
StringUtils.stringifyException(t));
}
+ if (setLooped) {
+ looped.boolVal = true;
+ }
} while (!stop.boolVal);
}
+ private Set<Long> findRelatedLocks(CompactionInfo ci, ShowLocksResponse locksResponse) {
+ Set<Long> relatedLocks = new HashSet<Long>();
+ for (ShowLocksResponseElement lock : locksResponse.getLocks()) {
+ if (ci.dbname.equals(lock.getDbname())) {
+ if ((ci.tableName == null && lock.getTablename() == null) ||
+ (ci.tableName != null && ci.tableName.equals(lock.getTablename()))) {
+ if ((ci.partName == null && lock.getPartname() == null) ||
+ (ci.partName != null && ci.partName.equals(lock.getPartname()))) {
+ relatedLocks.add(lock.getLockid());
+ }
+ }
+ }
+ }
+
+ return relatedLocks;
+ }
+
+ private Set<Long> buildCurrentLockSet(ShowLocksResponse locksResponse) {
+ Set<Long> currentLocks = new HashSet<Long>(locksResponse.getLocks().size());
+ for (ShowLocksResponseElement lock : locksResponse.getLocks()) {
+ currentLocks.add(lock.getLockid());
+ }
+ return currentLocks;
+ }
+
private void clean(CompactionInfo ci) throws MetaException {
LOG.info("Starting cleaning for " + ci.getFullPartitionName());
try {
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java Thu Oct 30 16:22:33 2014
@@ -506,13 +506,15 @@ public class CompactorMR {
ValidTxnList txnList =
new ValidTxnListImpl(jobConf.get(ValidTxnList.VALID_TXNS_KEY));
+ boolean isMajor = jobConf.getBoolean(IS_MAJOR, false);
AcidInputFormat.RawReader<V> reader =
- aif.getRawReader(jobConf, jobConf.getBoolean(IS_MAJOR, false), split.getBucket(),
+ aif.getRawReader(jobConf, isMajor, split.getBucket(),
txnList, split.getBaseDir(), split.getDeltaDirs());
RecordIdentifier identifier = reader.createKey();
V value = reader.createValue();
getWriter(reporter, reader.getObjectInspector(), split.getBucket());
while (reader.next(identifier, value)) {
+ if (isMajor && reader.isDelete(value)) continue;
writer.write(value);
reporter.progress();
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java Thu Oct 30 16:22:33 2014
@@ -53,6 +53,7 @@ abstract class CompactorThread extends T
protected RawStore rs;
protected int threadId;
protected BooleanPointer stop;
+ protected BooleanPointer looped;
@Override
public void setHiveConf(HiveConf conf) {
@@ -66,8 +67,9 @@ abstract class CompactorThread extends T
}
@Override
- public void init(BooleanPointer stop) throws MetaException {
+ public void init(BooleanPointer stop, BooleanPointer looped) throws MetaException {
this.stop = stop;
+ this.looped = looped;
setPriority(MIN_PRIORITY);
setDaemon(true); // this means the process will exit without waiting for this thread
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java Thu Oct 30 16:22:33 2014
@@ -137,8 +137,8 @@ public class Initiator extends Compactor
}
@Override
- public void init(BooleanPointer stop) throws MetaException {
- super.init(stop);
+ public void init(BooleanPointer stop, BooleanPointer looped) throws MetaException {
+ super.init(stop, looped);
checkInterval =
conf.getTimeVar(HiveConf.ConfVars.HIVE_COMPACTOR_CHECK_INTERVAL, TimeUnit.MILLISECONDS) ;
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java Thu Oct 30 16:22:33 2014
@@ -168,8 +168,8 @@ public class Worker extends CompactorThr
}
@Override
- public void init(BooleanPointer stop) throws MetaException {
- super.init(stop);
+ public void init(BooleanPointer stop, BooleanPointer looped) throws MetaException {
+ super.init(stop, looped);
StringBuilder name = new StringBuilder(hostname());
name.append("-");
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java Thu Oct 30 16:22:33 2014
@@ -100,6 +100,26 @@ public final class GenericUDFUtils {
* @return false if there is a type mismatch
*/
public boolean update(ObjectInspector oi) throws UDFArgumentTypeException {
+ return update(oi, false);
+ }
+
+ /**
+ * Update returnObjectInspector and valueInspectorsAreTheSame based on the
+ * ObjectInspector seen for UnionAll.
+ *
+ * @return false if there is a type mismatch
+ */
+ public boolean updateForUnionAll(ObjectInspector oi) throws UDFArgumentTypeException {
+ return update(oi, true);
+ }
+
+ /**
+ * Update returnObjectInspector and valueInspectorsAreTheSame based on the
+ * ObjectInspector seen.
+ *
+ * @return false if there is a type mismatch
+ */
+ private boolean update(ObjectInspector oi, boolean isUnionAll) throws UDFArgumentTypeException {
if (oi instanceof VoidObjectInspector) {
return true;
}
@@ -137,8 +157,14 @@ public final class GenericUDFUtils {
// Types are different, we need to check whether we can convert them to
// a common base class or not.
- TypeInfo commonTypeInfo = FunctionRegistry.getCommonClass(oiTypeInfo,
+ TypeInfo commonTypeInfo = null;
+ if (isUnionAll) {
+ commonTypeInfo = FunctionRegistry.getCommonClassForUnionAll(oiTypeInfo,
rTypeInfo);
+ } else {
+ commonTypeInfo = FunctionRegistry.getCommonClass(oiTypeInfo,
+ rTypeInfo);
+ }
if (commonTypeInfo == null) {
return false;
}
Modified: hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/util/ZooKeeperHiveHelper.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/util/ZooKeeperHiveHelper.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/util/ZooKeeperHiveHelper.java (original)
+++ hive/branches/spark/ql/src/java/org/apache/hadoop/hive/ql/util/ZooKeeperHiveHelper.java Thu Oct 30 16:22:33 2014
@@ -33,6 +33,7 @@ import org.apache.zookeeper.data.ACL;
public class ZooKeeperHiveHelper {
public static final Log LOG = LogFactory.getLog(ZooKeeperHiveHelper.class.getName());
public static final String ZOOKEEPER_PATH_SEPARATOR = "/";
+
/**
* Get the ensemble server addresses from the configuration. The format is: host1:port,
* host2:port..
@@ -90,6 +91,7 @@ public class ZooKeeperHiveHelper {
* A no-op watcher class
*/
public static class DummyWatcher implements Watcher {
+ @Override
public void process(org.apache.zookeeper.WatchedEvent event) {
}
}
Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java Thu Oct 30 16:22:33 2014
@@ -256,6 +256,11 @@ public class TestFunctionRegistry extend
comparison(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.dateTypeInfo,
TypeInfoFactory.stringTypeInfo);
+ comparison(TypeInfoFactory.intTypeInfo, TypeInfoFactory.timestampTypeInfo,
+ TypeInfoFactory.doubleTypeInfo);
+ comparison(TypeInfoFactory.timestampTypeInfo, TypeInfoFactory.intTypeInfo,
+ TypeInfoFactory.doubleTypeInfo);
+
comparison(TypeInfoFactory.stringTypeInfo, varchar10, TypeInfoFactory.stringTypeInfo);
comparison(varchar10, TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo);
comparison(varchar5, varchar10, varchar10);
Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java Thu Oct 30 16:22:33 2014
@@ -18,10 +18,7 @@
package org.apache.hadoop.hive.ql.exec;
-import java.io.File;
import java.io.IOException;
-import java.io.OutputStream;
-import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -32,7 +29,6 @@ import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.Driver;
@@ -41,7 +37,6 @@ import org.apache.hadoop.hive.ql.parse.T
import org.apache.hadoop.hive.ql.plan.CollectDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.FilterDesc;
import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.OperatorDesc;
import org.apache.hadoop.hive.ql.plan.PartitionDesc;
@@ -49,8 +44,6 @@ import org.apache.hadoop.hive.ql.plan.Pl
import org.apache.hadoop.hive.ql.plan.ScriptDesc;
import org.apache.hadoop.hive.ql.plan.SelectDesc;
import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.ql.processors.CommandProcessor;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject;
@@ -60,12 +53,9 @@ import org.apache.hadoop.hive.serde2.obj
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
-import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.junit.Test;
@@ -109,55 +99,6 @@ public class TestOperators extends TestC
}
}
- public void testBaseFilterOperator() throws Throwable {
- try {
- System.out.println("Testing Filter Operator");
- ExprNodeDesc col0 = TestExecDriver.getStringColumn("col0");
- ExprNodeDesc col1 = TestExecDriver.getStringColumn("col1");
- ExprNodeDesc col2 = TestExecDriver.getStringColumn("col2");
- ExprNodeDesc zero = new ExprNodeConstantDesc("0");
- ExprNodeDesc func1 = TypeCheckProcFactory.DefaultExprProcessor
- .getFuncExprNodeDesc(">", col2, col1);
- ExprNodeDesc func2 = TypeCheckProcFactory.DefaultExprProcessor
- .getFuncExprNodeDesc("==", col0, zero);
- ExprNodeDesc func3 = TypeCheckProcFactory.DefaultExprProcessor
- .getFuncExprNodeDesc("and", func1, func2);
- assert (func3 != null);
- FilterDesc filterCtx = new FilterDesc(func3, false);
-
- // Configuration
- Operator<FilterDesc> op = OperatorFactory.get(FilterDesc.class);
- op.setConf(filterCtx);
-
- // runtime initialization
- op.initialize(new JobConf(TestOperators.class),
- new ObjectInspector[] {r[0].oi});
-
- for (InspectableObject oner : r) {
- op.processOp(oner.o, 0);
- }
-
- Map<Enum<?>, Long> results = op.getStats();
- System.out.println("filtered = "
- + results.get(FilterOperator.Counter.FILTERED));
- assertEquals(Long.valueOf(4), results
- .get(FilterOperator.Counter.FILTERED));
- System.out.println("passed = "
- + results.get(FilterOperator.Counter.PASSED));
- assertEquals(Long.valueOf(1), results.get(FilterOperator.Counter.PASSED));
-
- /*
- * for(Enum e: results.keySet()) { System.out.println(e.toString() + ":" +
- * results.get(e)); }
- */
- System.out.println("Filter Operator ok");
-
- } catch (Throwable e) {
- e.printStackTrace();
- throw e;
- }
- }
-
private void testTaskIds(String [] taskIds, String expectedAttemptId, String expectedTaskId) {
Configuration conf = new JobConf(TestOperators.class);
for (String one: taskIds) {
@@ -328,7 +269,7 @@ public class TestOperators extends TestC
try {
System.out.println("Testing Map Operator");
// initialize configuration
- Configuration hconf = new JobConf(TestOperators.class);
+ JobConf hconf = new JobConf(TestOperators.class);
HiveConf.setVar(hconf, HiveConf.ConfVars.HADOOPMAPFILENAME,
"hdfs:///testDir/testFile");
IOContext.get(hconf).setInputPath(
Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java Thu Oct 30 16:22:33 2014
@@ -47,117 +47,122 @@ public class TestTezSessionPool {
}
@Before
- public void setUp() {
- conf = new HiveConf();
- }
+ public void setUp() {
+ conf = new HiveConf();
+ }
@Test
- public void testGetNonDefaultSession() {
- poolManager = new TestTezSessionPoolManager();
- try {
- TezSessionState sessionState = poolManager.getSession(null, conf, true);
- TezSessionState sessionState1 = poolManager.getSession(sessionState, conf, true);
- if (sessionState1 != sessionState) {
- fail();
- }
- } catch (Exception e) {
- e.printStackTrace();
+ public void testGetNonDefaultSession() {
+ poolManager = new TestTezSessionPoolManager();
+ try {
+ TezSessionState sessionState = poolManager.getSession(null, conf, true);
+ TezSessionState sessionState1 = poolManager.getSession(sessionState, conf, true);
+ if (sessionState1 != sessionState) {
+ fail();
+ }
+ conf.set("tez.queue.name", "nondefault");
+ TezSessionState sessionState2 = poolManager.getSession(sessionState, conf, true);
+ if (sessionState2 == sessionState) {
fail();
}
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail();
}
+ }
@Test
- public void testSessionPoolGetInOrder() {
- try {
- conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
- conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_TEZ_DEFAULT_QUEUES, "a,b,c");
- conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE, 2);
-
- poolManager = new TestTezSessionPoolManager();
- poolManager.setupPool(conf);
- poolManager.startPool();
- TezSessionState sessionState = poolManager.getSession(null, conf, true);
- if (sessionState.getQueueName().compareTo("a") != 0) {
- fail();
- }
- poolManager.returnSession(sessionState);
-
- sessionState = poolManager.getSession(null, conf, true);
- if (sessionState.getQueueName().compareTo("b") != 0) {
- fail();
- }
- poolManager.returnSession(sessionState);
+ public void testSessionPoolGetInOrder() {
+ try {
+ conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
+ conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_TEZ_DEFAULT_QUEUES, "a,b,c");
+ conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE, 2);
- sessionState = poolManager.getSession(null, conf, true);
- if (sessionState.getQueueName().compareTo("c") != 0) {
- fail();
- }
- poolManager.returnSession(sessionState);
+ poolManager = new TestTezSessionPoolManager();
+ poolManager.setupPool(conf);
+ poolManager.startPool();
+ TezSessionState sessionState = poolManager.getSession(null, conf, true);
+ if (sessionState.getQueueName().compareTo("a") != 0) {
+ fail();
+ }
+ poolManager.returnSession(sessionState);
- sessionState = poolManager.getSession(null, conf, true);
- if (sessionState.getQueueName().compareTo("a") != 0) {
- fail();
- }
+ sessionState = poolManager.getSession(null, conf, true);
+ if (sessionState.getQueueName().compareTo("b") != 0) {
+ fail();
+ }
+ poolManager.returnSession(sessionState);
- poolManager.returnSession(sessionState);
+ sessionState = poolManager.getSession(null, conf, true);
+ if (sessionState.getQueueName().compareTo("c") != 0) {
+ fail();
+ }
+ poolManager.returnSession(sessionState);
- } catch (Exception e) {
- e.printStackTrace();
+ sessionState = poolManager.getSession(null, conf, true);
+ if (sessionState.getQueueName().compareTo("a") != 0) {
fail();
}
+
+ poolManager.returnSession(sessionState);
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail();
}
+ }
public class SessionThread implements Runnable {
@Override
- public void run() {
- try {
- HiveConf tmpConf = new HiveConf(conf);
- if (random.nextDouble() > 0.5) {
- tmpConf.set("tez.queue.name", "default");
- } else {
- tmpConf.set("tez.queue.name", "");
- }
-
- TezSessionState session = poolManager.getSession(null, tmpConf, true);
- Thread.sleep((random.nextInt(9) % 10) * 1000);
- poolManager.returnSession(session);
- } catch (Exception e) {
- e.printStackTrace();
+ public void run() {
+ try {
+ HiveConf tmpConf = new HiveConf(conf);
+ if (random.nextDouble() > 0.5) {
+ tmpConf.set("tez.queue.name", "default");
+ } else {
+ tmpConf.set("tez.queue.name", "");
}
+
+ TezSessionState session = poolManager.getSession(null, tmpConf, true);
+ Thread.sleep((random.nextInt(9) % 10) * 1000);
+ poolManager.returnSession(session);
+ } catch (Exception e) {
+ e.printStackTrace();
}
+ }
}
@Test
- public void testReturn() {
- conf.set("tez.queue.name", "");
- random = new Random(1000);
- conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
- conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_TEZ_DEFAULT_QUEUES, "a,b,c");
- conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE, 2);
+ public void testReturn() {
+ conf.set("tez.queue.name", "");
+ random = new Random(1000);
+ conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
+ conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_TEZ_DEFAULT_QUEUES, "a,b,c");
+ conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE, 2);
+ try {
+ poolManager = new TestTezSessionPoolManager();
+ poolManager.setupPool(conf);
+ poolManager.startPool();
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail();
+ }
+ List<Thread> threadList = new ArrayList<Thread>();
+ for (int i = 0; i < 15; i++) {
+ Thread t = new Thread(new SessionThread());
+ t.start();
+ }
+
+ for (Thread t : threadList) {
try {
- poolManager = new TestTezSessionPoolManager();
- poolManager.setupPool(conf);
- poolManager.startPool();
- } catch (Exception e) {
+ t.join();
+ } catch (InterruptedException e) {
e.printStackTrace();
fail();
}
- List<Thread> threadList = new ArrayList<Thread>();
- for (int i = 0; i < 15; i++) {
- Thread t = new Thread(new SessionThread());
- t.start();
- }
-
- for (Thread t : threadList) {
- try {
- t.join();
- } catch (InterruptedException e) {
- e.printStackTrace();
- fail();
- }
- }
}
+ }
@Test
public void testCloseAndOpenDefault() throws Exception {
Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionState.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionState.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionState.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionState.java Thu Oct 30 16:22:33 2014
@@ -37,9 +37,10 @@ import org.apache.tez.dag.api.TezExcepti
public class TestTezSessionState extends TezSessionState {
private boolean open;
- private String sessionId;
+ private final String sessionId;
private HiveConf hiveConf;
private String user;
+ private boolean doAsEnabled;
public TestTezSessionState(String sessionId) {
super(sessionId);
@@ -47,38 +48,46 @@ public class TestTezSessionState extends
}
@Override
- public boolean isOpen() {
- return open;
- }
+ public boolean isOpen() {
+ return open;
+ }
public void setOpen(boolean open) {
this.open = open;
}
@Override
- public void open(HiveConf conf) throws IOException,
- LoginException, URISyntaxException, TezException {
- this.hiveConf = conf;
- UserGroupInformation ugi;
- ugi = ShimLoader.getHadoopShims().getUGIForConf(conf);
- user = ShimLoader.getHadoopShims().getShortUserName(ugi);
- }
+ public void open(HiveConf conf) throws IOException, LoginException, URISyntaxException,
+ TezException {
+ this.hiveConf = conf;
+ UserGroupInformation ugi;
+ ugi = ShimLoader.getHadoopShims().getUGIForConf(conf);
+ user = ShimLoader.getHadoopShims().getShortUserName(ugi);
+ this.doAsEnabled = conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS);
+ }
@Override
- public void close(boolean keepTmpDir) throws TezException, IOException {
- open = keepTmpDir;
- }
+ public void close(boolean keepTmpDir) throws TezException, IOException {
+ open = keepTmpDir;
+ }
+ @Override
public HiveConf getConf() {
return this.hiveConf;
}
@Override
- public String getSessionId() {
- return sessionId;
- }
-
+ public String getSessionId() {
+ return sessionId;
+ }
+
+ @Override
public String getUser() {
return user;
}
+
+ @Override
+ public boolean getDoAsEnabled() {
+ return this.doAsEnabled;
+ }
}
Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorFilterOperator.java Thu Oct 30 16:22:33 2014
@@ -18,7 +18,9 @@
package org.apache.hadoop.hive.ql.exec.vector;
+import java.util.ArrayList;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import junit.framework.Assert;
@@ -83,9 +85,9 @@ public class TestVectorFilterOperator {
private VectorFilterOperator getAVectorFilterOperator() throws HiveException {
ExprNodeColumnDesc col1Expr = new ExprNodeColumnDesc(Long.class, "col1", "table", false);
- Map<String, Integer> columnMap = new HashMap<String, Integer>();
- columnMap.put("col1", 1);
- VectorizationContext vc = new VectorizationContext(columnMap, 1);
+ List<String> columns = new ArrayList<String>();
+ columns.add("col1");
+ VectorizationContext vc = new VectorizationContext(columns);
FilterDesc fdesc = new FilterDesc();
fdesc.setPredicate(col1Expr);
return new VectorFilterOperator(vc, fdesc);
Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java Thu Oct 30 16:22:33 2014
@@ -170,10 +170,10 @@ public class TestVectorGroupByOperator {
@Test
public void testMemoryPressureFlush() throws HiveException {
- Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
- mapColumnNames.put("Key", 0);
- mapColumnNames.put("Value", 1);
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, 2);
+ List<String> mapColumnNames = new ArrayList<String>();
+ mapColumnNames.add("Key");
+ mapColumnNames.add("Value");
+ VectorizationContext ctx = new VectorizationContext(mapColumnNames);
GroupByDesc desc = buildKeyGroupByDesc (ctx, "max",
"Value", TypeInfoFactory.longTypeInfo,
@@ -1710,7 +1710,7 @@ public class TestVectorGroupByOperator {
mapColumnNames.put("value", i);
outputColumnNames.add("value");
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, i+1);
+ VectorizationContext ctx = new VectorizationContext(outputColumnNames);
ArrayList<AggregationDesc> aggs = new ArrayList(1);
aggs.add(
@@ -1818,10 +1818,10 @@ public class TestVectorGroupByOperator {
FakeVectorRowBatchFromObjectIterables data,
Map<Object, Object> expected) throws HiveException {
- Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
- mapColumnNames.put("Key", 0);
- mapColumnNames.put("Value", 1);
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, 2);
+ List<String> mapColumnNames = new ArrayList<String>();
+ mapColumnNames.add("Key");
+ mapColumnNames.add("Value");
+ VectorizationContext ctx = new VectorizationContext(mapColumnNames);
Set<Object> keys = new HashSet<Object>();
AggregationDesc agg = buildAggregationDesc(ctx, aggregateName,
@@ -2233,9 +2233,9 @@ public class TestVectorGroupByOperator {
public void testAggregateCountStarIterable (
Iterable<VectorizedRowBatch> data,
Object expected) throws HiveException {
- Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
- mapColumnNames.put("A", 0);
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, 1);
+ List<String> mapColumnNames = new ArrayList<String>();
+ mapColumnNames.add("A");
+ VectorizationContext ctx = new VectorizationContext(mapColumnNames);
GroupByDesc desc = buildGroupByDescCountStar (ctx);
@@ -2262,9 +2262,9 @@ public class TestVectorGroupByOperator {
public void testAggregateCountReduceIterable (
Iterable<VectorizedRowBatch> data,
Object expected) throws HiveException {
- Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
- mapColumnNames.put("A", 0);
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, 1);
+ List<String> mapColumnNames = new ArrayList<String>();
+ mapColumnNames.add("A");
+ VectorizationContext ctx = new VectorizationContext(mapColumnNames);
GroupByDesc desc = buildGroupByDescType(ctx, "count", "A", TypeInfoFactory.longTypeInfo);
VectorGroupByDesc vectorDesc = desc.getVectorDesc();
@@ -2294,9 +2294,9 @@ public class TestVectorGroupByOperator {
String aggregateName,
Iterable<VectorizedRowBatch> data,
Object expected) throws HiveException {
- Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
- mapColumnNames.put("A", 0);
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, 1);
+ List<String> mapColumnNames = new ArrayList<String>();
+ mapColumnNames.add("A");
+ VectorizationContext ctx = new VectorizationContext(mapColumnNames);
GroupByDesc desc = buildGroupByDescType(ctx, aggregateName, "A",
TypeInfoFactory.stringTypeInfo);
@@ -2325,9 +2325,9 @@ public class TestVectorGroupByOperator {
String aggregateName,
Iterable<VectorizedRowBatch> data,
Object expected) throws HiveException {
- Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
- mapColumnNames.put("A", 0);
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, 1);
+ List<String> mapColumnNames = new ArrayList<String>();
+ mapColumnNames.add("A");
+ VectorizationContext ctx = new VectorizationContext(mapColumnNames);
GroupByDesc desc = buildGroupByDescType(ctx, aggregateName, "A",
TypeInfoFactory.getDecimalTypeInfo(30, 4));
@@ -2357,9 +2357,9 @@ public class TestVectorGroupByOperator {
String aggregateName,
Iterable<VectorizedRowBatch> data,
Object expected) throws HiveException {
- Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
- mapColumnNames.put("A", 0);
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, 1);
+ List<String> mapColumnNames = new ArrayList<String>();
+ mapColumnNames.add("A");
+ VectorizationContext ctx = new VectorizationContext(mapColumnNames);
GroupByDesc desc = buildGroupByDescType (ctx, aggregateName, "A",
TypeInfoFactory.doubleTypeInfo);
@@ -2388,9 +2388,9 @@ public class TestVectorGroupByOperator {
String aggregateName,
Iterable<VectorizedRowBatch> data,
Object expected) throws HiveException {
- Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
- mapColumnNames.put("A", 0);
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, 1);
+ List<String> mapColumnNames = new ArrayList<String>();
+ mapColumnNames.add("A");
+ VectorizationContext ctx = new VectorizationContext(mapColumnNames);
GroupByDesc desc = buildGroupByDescType(ctx, aggregateName, "A", TypeInfoFactory.longTypeInfo);
@@ -2418,10 +2418,11 @@ public class TestVectorGroupByOperator {
String aggregateName,
Iterable<VectorizedRowBatch> data,
HashMap<Object,Object> expected) throws HiveException {
- Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
- mapColumnNames.put("Key", 0);
- mapColumnNames.put("Value", 1);
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, 2);
+ List<String> mapColumnNames = new ArrayList<String>();
+ mapColumnNames.add("Key");
+ mapColumnNames.add("Value");
+ VectorizationContext ctx = new VectorizationContext(mapColumnNames);
+
Set<Object> keys = new HashSet<Object>();
GroupByDesc desc = buildKeyGroupByDesc (ctx, aggregateName, "Value",
@@ -2484,10 +2485,10 @@ public class TestVectorGroupByOperator {
Iterable<VectorizedRowBatch> data,
TypeInfo dataTypeInfo,
HashMap<Object,Object> expected) throws HiveException {
- Map<String, Integer> mapColumnNames = new HashMap<String, Integer>();
- mapColumnNames.put("Key", 0);
- mapColumnNames.put("Value", 1);
- VectorizationContext ctx = new VectorizationContext(mapColumnNames, 2);
+ List<String> mapColumnNames = new ArrayList<String>();
+ mapColumnNames.add("Key");
+ mapColumnNames.add("Value");
+ VectorizationContext ctx = new VectorizationContext(mapColumnNames);
Set<Object> keys = new HashSet<Object>();
GroupByDesc desc = buildKeyGroupByDesc (ctx, aggregateName, "Value",
Modified: hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSelectOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSelectOperator.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSelectOperator.java (original)
+++ hive/branches/spark/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorSelectOperator.java Thu Oct 30 16:22:33 2014
@@ -84,9 +84,11 @@ public class TestVectorSelectOperator {
@Test
public void testSelectOperator() throws HiveException {
- Map<String, Integer> columnMap = new HashMap<String, Integer>();
- columnMap.put("a", 0); columnMap.put("b", 1); columnMap.put("c", 2);
- VectorizationContext vc = new VectorizationContext(columnMap, 3);
+ List<String> columns = new ArrayList<String>();
+ columns.add("a");
+ columns.add("b");
+ columns.add("c");
+ VectorizationContext vc = new VectorizationContext(columns);
SelectDesc selDesc = new SelectDesc(false);
List<ExprNodeDesc> colList = new ArrayList<ExprNodeDesc>();