You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2013/04/26 21:16:13 UTC

svn commit: r1476348 [10/29] - in /hive/branches/vectorization: ./ beeline/ beeline/src/java/org/apache/hive/beeline/ beeline/src/test/org/ beeline/src/test/org/apache/ beeline/src/test/org/apache/hive/ beeline/src/test/org/apache/hive/beeline/ beeline...

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Fri Apr 26 19:14:49 2013
@@ -43,6 +43,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -75,8 +76,10 @@ import org.apache.hadoop.hive.ql.plan.Ad
 import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.AlterIndexDesc;
 import org.apache.hadoop.hive.ql.plan.AlterIndexDesc.AlterIndexTypes;
+import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc;
 import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes;
+import org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition;
 import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc;
 import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.CreateIndexDesc;
@@ -126,7 +129,6 @@ import org.apache.hadoop.hive.serde.serd
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
-import org.apache.hadoop.hive.ql.plan.AlterTableAlterPartDesc;
 
 /**
  * DDLSemanticAnalyzer.
@@ -405,6 +407,9 @@ public class DDLSemanticAnalyzer extends
     case HiveParser.TOK_ALTERTABLE_SKEWED:
       analyzeAltertableSkewedby(ast);
       break;
+   case HiveParser.TOK_EXCHANGEPARTITION:
+      analyzeExchangePartition(ast);
+      break;
     default:
       throw new SemanticException("Unsupported command.");
     }
@@ -663,6 +668,69 @@ public class DDLSemanticAnalyzer extends
 
   }
 
+  private void analyzeExchangePartition(ASTNode ast) throws SemanticException {
+    Table sourceTable =  getTable(getUnescapedName((ASTNode)ast.getChild(0)));
+    Table destTable = getTable(getUnescapedName((ASTNode)ast.getChild(2)));
+
+    // Get the partition specs
+    Map<String, String> partSpecs = getPartSpec((ASTNode) ast.getChild(1));
+    validatePartitionValues(partSpecs);
+    boolean sameColumns = MetaStoreUtils.compareFieldColumns(
+        sourceTable.getAllCols(), destTable.getAllCols());
+    boolean samePartitions = MetaStoreUtils.compareFieldColumns(
+        sourceTable.getPartitionKeys(), destTable.getPartitionKeys());
+    if (!sameColumns || !samePartitions) {
+      throw new SemanticException(ErrorMsg.TABLES_INCOMPATIBLE_SCHEMAS.getMsg());
+    }
+    List<Partition> partitions = getPartitions(sourceTable, partSpecs, true);
+
+    // Verify that the partitions specified are continuous
+    // If a subpartition value is specified without specifying a partition's value
+    // then we throw an exception
+    if (!isPartitionValueContinuous(sourceTable.getPartitionKeys(), partSpecs)) {
+      throw new SemanticException(
+          ErrorMsg.PARTITION_VALUE_NOT_CONTINUOUS.getMsg(partSpecs.toString()));
+    }
+    List<Partition> destPartitions = null;
+    try {
+      destPartitions = getPartitions(destTable, partSpecs, true);
+    } catch (SemanticException ex) {
+      // We should expect a semantic exception being throw as this partition
+      // should not be present.
+    }
+    if (destPartitions != null) {
+      // If any destination partition is present then throw a Semantic Exception.
+      throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(destPartitions.toString()));
+    }
+    AlterTableExchangePartition alterTableExchangePartition =
+      new AlterTableExchangePartition(sourceTable, destTable, partSpecs);
+    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
+      alterTableExchangePartition), conf));
+  }
+
+  /**
+   * @param partitionKeys the list of partition keys of the table
+   * @param partSpecs the partition specs given by the user
+   * @return true if no subpartition value is specified without a partition's
+   *         value being specified else it returns false
+   */
+  private boolean isPartitionValueContinuous(List<FieldSchema> partitionKeys,
+      Map<String, String> partSpecs) {
+    boolean partitionMissing = false;
+    for (FieldSchema partitionKey: partitionKeys) {
+      if (!partSpecs.containsKey(partitionKey.getName())) {
+        partitionMissing = true;
+      } else {
+        if (partitionMissing) {
+          // A subpartition value exists after a missing partition
+          // The partition value specified are not continuous, return false
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
   private void analyzeCreateDatabase(ASTNode ast) throws SemanticException {
     String dbName = unescapeIdentifier(ast.getChild(0).getText());
     boolean ifNotExists = false;
@@ -781,7 +849,148 @@ public class DDLSemanticAnalyzer extends
     }
 
     TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec);
-    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), truncateTblDesc), conf));
+
+    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
+    Task<? extends Serializable> truncateTask = TaskFactory.get(ddlWork, conf);
+
+    // Is this a truncate column command
+    List<String> columnNames = null;
+    if (ast.getChildCount() == 2) {
+      try {
+        columnNames = getColumnNames((ASTNode)ast.getChild(1));
+
+        // Throw an error if the table is indexed
+        List<Index> indexes = db.getIndexes(table.getDbName(), tableName, (short)1);
+        if (indexes != null && indexes.size() > 0) {
+          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
+        }
+
+        List<String> bucketCols = null;
+        Class<? extends InputFormat> inputFormatClass = null;
+        boolean isArchived = false;
+        Path newTblPartLoc = null;
+        Path oldTblPartLoc = null;
+        List<FieldSchema> cols = null;
+        ListBucketingCtx lbCtx = null;
+        boolean isListBucketed = false;
+        List<String> listBucketColNames = null;
+
+        if (table.isPartitioned()) {
+          Partition part = db.getPartition(table, partSpec, false);
+
+          Path tabPath = table.getPath();
+          Path partPath = part.getPartitionPath();
+
+          // if the table is in a different dfs than the partition,
+          // replace the partition's dfs with the table's dfs.
+          newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
+              .getAuthority(), partPath.toUri().getPath());
+
+          oldTblPartLoc = partPath;
+
+          cols = part.getCols();
+          bucketCols = part.getBucketCols();
+          inputFormatClass = part.getInputFormatClass();
+          isArchived = ArchiveUtils.isArchived(part);
+          lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
+              part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
+          isListBucketed = part.isStoredAsSubDirectories();
+          listBucketColNames = part.getSkewedColNames();
+        } else {
+          // input and output are the same
+          oldTblPartLoc = table.getPath();
+          newTblPartLoc = table.getPath();
+          cols  = table.getCols();
+          bucketCols = table.getBucketCols();
+          inputFormatClass = table.getInputFormatClass();
+          lbCtx = constructListBucketingCtx(table.getSkewedColNames(), table.getSkewedColValues(),
+              table.getSkewedColValueLocationMaps(), table.isStoredAsSubDirectories(), conf);
+          isListBucketed = table.isStoredAsSubDirectories();
+          listBucketColNames = table.getSkewedColNames();
+        }
+
+        // throw a HiveException for non-rcfile.
+        if (!inputFormatClass.equals(RCFileInputFormat.class)) {
+          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_NOT_RC.getMsg());
+        }
+
+        // throw a HiveException if the table/partition is archived
+        if (isArchived) {
+          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_ARCHIVED.getMsg());
+        }
+
+        Set<Integer> columnIndexes = new HashSet<Integer>();
+        for (String columnName : columnNames) {
+          boolean found = false;
+          for (int columnIndex = 0; columnIndex < cols.size(); columnIndex++) {
+            if (columnName.equalsIgnoreCase(cols.get(columnIndex).getName())) {
+              columnIndexes.add(columnIndex);
+              found = true;
+              break;
+            }
+          }
+          // Throw an exception if the user is trying to truncate a column which doesn't exist
+          if (!found) {
+            throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(columnName));
+          }
+          // Throw an exception if the table/partition is bucketed on one of the columns
+          for (String bucketCol : bucketCols) {
+            if (bucketCol.equalsIgnoreCase(columnName)) {
+              throw new SemanticException(ErrorMsg.TRUNCATE_BUCKETED_COLUMN.getMsg(columnName));
+            }
+          }
+          if (isListBucketed) {
+            for (String listBucketCol : listBucketColNames) {
+              if (listBucketCol.equalsIgnoreCase(columnName)) {
+                throw new SemanticException(
+                    ErrorMsg.TRUNCATE_LIST_BUCKETED_COLUMN.getMsg(columnName));
+              }
+            }
+          }
+        }
+
+        truncateTblDesc.setColumnIndexes(new ArrayList<Integer>(columnIndexes));
+
+        truncateTblDesc.setInputDir(oldTblPartLoc.toString());
+        addInputsOutputsAlterTable(tableName, partSpec);
+
+        truncateTblDesc.setLbCtx(lbCtx);
+
+        addInputsOutputsAlterTable(tableName, partSpec);
+        ddlWork.setNeedLock(true);
+        TableDesc tblDesc = Utilities.getTableDesc(table);
+        // Write the output to temporary directory and move it to the final location at the end
+        // so the operation is atomic.
+        String queryTmpdir = ctx.getExternalTmpFileURI(newTblPartLoc.toUri());
+        truncateTblDesc.setOutputDir(queryTmpdir);
+        LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, queryTmpdir, tblDesc,
+            partSpec == null ? new HashMap<String, String>() : partSpec);
+        ltd.setLbCtx(lbCtx);
+        Task<MoveWork> moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
+            conf);
+        truncateTask.addDependentTask(moveTsk);
+
+        // Recalculate the HDFS stats if auto gather stats is set
+        if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
+          StatsWork statDesc;
+          if (oldTblPartLoc.equals(newTblPartLoc)) {
+            // If we're merging to the same location, we can avoid some metastore calls
+            tableSpec tablepart = new tableSpec(this.db, conf, root);
+            statDesc = new StatsWork(tablepart);
+          } else {
+            statDesc = new StatsWork(ltd);
+          }
+          statDesc.setNoStatsAggregator(true);
+          statDesc.setStatsReliable(conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE));
+          Task<? extends Serializable> statTask = TaskFactory.get(statDesc, conf);
+          moveTsk.addDependentTask(statTask);
+        }
+      } catch (HiveException e) {
+        throw new SemanticException(e);
+      }
+    }
+
+    rootTasks.add(truncateTask);
   }
 
   private boolean isFullSpec(Table table, Map<String, String> partSpec) {

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g Fri Apr 26 19:14:49 2013
@@ -258,6 +258,7 @@ KW_PARTIALSCAN: 'PARTIALSCAN';
 KW_USER: 'USER';
 KW_ROLE: 'ROLE';
 KW_INNER: 'INNER';
+KW_EXCHANGE: 'EXCHANGE';
 
 // Operators
 // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work.

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Fri Apr 26 19:14:49 2013
@@ -295,6 +295,7 @@ TOK_WINDOWSPEC;
 TOK_WINDOWVALUES;
 TOK_WINDOWRANGE;
 TOK_IGNOREPROTECTION;
+TOK_EXCHANGEPARTITION;
 }
 
 
@@ -756,7 +757,7 @@ createTableStatement
 truncateTableStatement
 @init { msgs.push("truncate table statement"); }
 @after { msgs.pop(); }
-    : KW_TRUNCATE KW_TABLE tablePartitionPrefix -> ^(TOK_TRUNCATETABLE tablePartitionPrefix);
+    : KW_TRUNCATE KW_TABLE tablePartitionPrefix (KW_COLUMNS LPAREN columnNameList RPAREN)? -> ^(TOK_TRUNCATETABLE tablePartitionPrefix columnNameList?);
 
 createIndexStatement
 @init { msgs.push("create index statement");}
@@ -867,6 +868,7 @@ alterTableStatementSuffix
     | alterStatementSuffixProperties
     | alterTblPartitionStatement
     | alterStatementSuffixSkewedby
+    | alterStatementSuffixExchangePartition
     ;
 
 alterViewStatementSuffix
@@ -1103,6 +1105,13 @@ alterStatementSuffixSkewedby
 	->^(TOK_ALTERTABLE_SKEWED $name storedAsDirs)
 	;
 
+alterStatementSuffixExchangePartition
+@init {msgs.push("alter exchange partition");}
+@after{msgs.pop();}
+    : name=tableName KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName
+    -> ^(TOK_EXCHANGEPARTITION $name partitionSpec $exchangename)
+    ;
+
 alterStatementSuffixProtectMode
 @init { msgs.push("alter partition protect mode statement"); }
 @after { msgs.pop(); }

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Fri Apr 26 19:14:49 2013
@@ -2428,6 +2428,7 @@ public class SemanticAnalyzer extends Ba
       posn++;
     }
 
+    boolean subQuery = qb.getParseInfo().getIsSubQ();
     boolean isInTransform = (selExprList.getChild(posn).getChild(0).getType() ==
         HiveParser.TOK_TRANSFORM);
     if (isInTransform) {
@@ -2463,6 +2464,10 @@ public class SemanticAnalyzer extends Ba
         unparseTranslator.addIdentifierTranslation((ASTNode) udtfExpr
             .getChild(0));
       }
+      if (isUDTF && (selectStar = udtfExprType == HiveParser.TOK_FUNCTIONSTAR)) {
+        genColListRegex(".*", null, (ASTNode) udtfExpr.getChild(0),
+            col_list, inputRR, pos, out_rwsch, qb.getAliases(), subQuery);
+      }
     }
 
     if (isUDTF) {
@@ -2567,7 +2572,6 @@ public class SemanticAnalyzer extends Ba
 
       }
 
-      boolean subQuery = qb.getParseInfo().getIsSubQ();
       if (expr.getType() == HiveParser.TOK_ALLCOLREF) {
         pos = genColListRegex(".*", expr.getChildCount() == 0 ? null
             : getUnescapedName((ASTNode) expr.getChild(0)).toLowerCase(),
@@ -5982,6 +5986,7 @@ public class SemanticAnalyzer extends Ba
             reduceKeys.size(), numReds), new RowSchema(outputRS
             .getColumnInfos()), child), outputRS);
     rsOp.setColumnExprMap(colExprMap);
+    rsOp.setInputAlias(srcName);
     return rsOp;
   }
 
@@ -8075,9 +8080,6 @@ public class SemanticAnalyzer extends Ba
     RowResolver lvForwardRR = new RowResolver();
     RowResolver source = opParseCtx.get(op).getRowResolver();
     for (ColumnInfo col : source.getColumnInfos()) {
-      if (col.getIsVirtualCol() && col.isHiddenVirtualCol()) {
-        continue;
-      }
       String[] tabCol = source.reverseLookup(col.getInternalName());
       lvForwardRR.put(tabCol[0], tabCol[1], col);
     }
@@ -8161,7 +8163,7 @@ public class SemanticAnalyzer extends Ba
       String internalName = getColumnInternalName(outputInternalColNames.size());
       outputInternalColNames.add(internalName);
       ColumnInfo newCol = new ColumnInfo(internalName, c.getType(), c
-          .getTabAlias(), c.getIsVirtualCol());
+          .getTabAlias(), c.getIsVirtualCol(), c.isHiddenVirtualCol());
       String[] tableCol = source.reverseLookup(c.getInternalName());
       String tableAlias = tableCol[0];
       String colAlias = tableCol[1];
@@ -8371,7 +8373,7 @@ public class SemanticAnalyzer extends Ba
 
     // For each task, set the key descriptor for the reducer
     for (Task<? extends Serializable> rootTask : rootTasks) {
-      setKeyDescTaskTree(rootTask);
+      GenMapRedUtils.setKeyAndValueDescForTaskTree(rootTask);
     }
 
     // If a task contains an operator which instructs bucketizedhiveinputformat
@@ -8597,36 +8599,6 @@ public class SemanticAnalyzer extends Ba
     }
   }
 
-  // loop over all the tasks recursviely
-  private void setKeyDescTaskTree(Task<? extends Serializable> task) {
-
-    if (task instanceof ExecDriver) {
-      MapredWork work = (MapredWork) task.getWork();
-      work.deriveExplainAttributes();
-      HashMap<String, Operator<? extends OperatorDesc>> opMap = work
-          .getAliasToWork();
-      if (!opMap.isEmpty()) {
-        for (Operator<? extends OperatorDesc> op : opMap.values()) {
-          GenMapRedUtils.setKeyAndValueDesc(work, op);
-        }
-      }
-    } else if (task instanceof ConditionalTask) {
-      List<Task<? extends Serializable>> listTasks = ((ConditionalTask) task)
-          .getListTasks();
-      for (Task<? extends Serializable> tsk : listTasks) {
-        setKeyDescTaskTree(tsk);
-      }
-    }
-
-    if (task.getChildTasks() == null) {
-      return;
-    }
-
-    for (Task<? extends Serializable> childTask : task.getChildTasks()) {
-      setKeyDescTaskTree(childTask);
-    }
-  }
-
   @SuppressWarnings("nls")
   public Phase1Ctx initPhase1Ctx() {
 
@@ -10661,6 +10633,7 @@ public class SemanticAnalyzer extends Ba
       {
         RowResolver ptfMapRR = tabDef.getRawInputShape().getRr();
 
+        ptfDesc.setMapSide(true);
         input = putOpInsertMap(OperatorFactory.getAndMakeChild(ptfDesc,
             new RowSchema(ptfMapRR.getColumnInfos()),
             input), ptfMapRR);

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java Fri Apr 26 19:14:49 2013
@@ -202,6 +202,7 @@ public final class SemanticAnalyzerFacto
       case HiveParser.TOK_ALTERDATABASE_PROPERTIES:
       case HiveParser.TOK_ALTERTABLE_SKEWED:
       case HiveParser.TOK_TRUNCATETABLE:
+      case HiveParser.TOK_EXCHANGEPARTITION:
         return new DDLSemanticAnalyzer(conf);
       case HiveParser.TOK_ALTERTABLE_PARTITION:
         HiveOperation commandType = null;

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java Fri Apr 26 19:14:49 2013
@@ -71,7 +71,7 @@ public class ConditionalResolverCommonJo
     }
 
     public HashMap<String, Long> getAliasToKnownSize() {
-      return aliasToKnownSize;
+      return aliasToKnownSize == null ? new HashMap<String, Long>() : aliasToKnownSize;
     }
 
     public void setAliasToKnownSize(HashMap<String, Long> aliasToKnownSize) {

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java Fri Apr 26 19:14:49 2013
@@ -64,6 +64,7 @@ public class DDLWork implements Serializ
   private AlterDatabaseDesc alterDbDesc;
   private AlterTableAlterPartDesc alterTableAlterPartDesc;
   private TruncateTableDesc truncateTblDesc;
+  private AlterTableExchangePartition alterTableExchangePartition;
 
   private RoleDDLDesc roleDDLDesc;
   private GrantDesc grantDesc;
@@ -449,6 +450,12 @@ public class DDLWork implements Serializ
     this.alterTableAlterPartDesc = alterPartDesc;
   }
 
+  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+      AlterTableExchangePartition alterTableExchangePartition) {
+    this(inputs, outputs);
+    this.alterTableExchangePartition = alterTableExchangePartition;
+  }
+
     /**
    * @return Create Database descriptor
    */
@@ -1025,4 +1032,20 @@ public class DDLWork implements Serializ
   public void setTruncateTblDesc(TruncateTableDesc truncateTblDesc) {
     this.truncateTblDesc = truncateTblDesc;
   }
+
+  /**
+   * @return information about the table partition to be exchanged
+   */
+  public AlterTableExchangePartition getAlterTableExchangePartition() {
+    return this.alterTableExchangePartition;
+  }
+
+  /**
+   * @param alterTableExchangePartition
+   *          set the value of the table partition to be exchanged
+   */
+  public void setAlterTableExchangePartition(
+      AlterTableExchangePartition alterTableExchangePartition) {
+    this.alterTableExchangePartition = alterTableExchangePartition;
+  }
 }

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java Fri Apr 26 19:14:49 2013
@@ -50,6 +50,16 @@ public class FileSinkDesc extends Abstra
   private String staticSpec; // static partition spec ends with a '/'
   private boolean gatherStats;
 
+  // Consider a query like:
+  // insert overwrite table T3 select ... from T1 join T2 on T1.key = T2.key;
+  // where T1, T2 and T3 are sorted and bucketed by key into the same number of buckets,
+  // We dont need a reducer to enforce bucketing and sorting for T3.
+  // The field below captures the fact that the reducer introduced to enforce sorting/
+  // bucketing of T3 has been removed.
+  // In this case, a sort-merge join is needed, and so the sort-merge join between T1 and T2
+  // cannot be performed as a map-only job
+  private transient boolean removedReduceSinkBucketSort;
+
   // This file descriptor is linked to other file descriptors.
   // One use case is that, a union->select (star)->file sink, is broken down.
   // For eg: consider a query like:
@@ -364,4 +374,11 @@ public class FileSinkDesc extends Abstra
     this.statsCollectRawDataSize = statsCollectRawDataSize;
   }
 
+  public boolean isRemovedReduceSinkBucketSort() {
+    return removedReduceSinkBucketSort;
+  }
+
+  public void setRemovedReduceSinkBucketSort(boolean removedReduceSinkBucketSort) {
+    this.removedReduceSinkBucketSort = removedReduceSinkBucketSort;
+  }
 }

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java Fri Apr 26 19:14:49 2013
@@ -35,13 +35,13 @@ public class GroupByDesc extends Abstrac
    * PARTIAL1: partial aggregation - first phase: iterate, terminatePartial
    * PARTIAL2: partial aggregation - second phase: merge, terminatePartial
    * PARTIALS: For non-distinct the same as PARTIAL2, for distinct the same as
-   *           PARTIAL1
+   * PARTIAL1
    * FINAL: partial aggregation - final phase: merge, terminate
    * HASH: For non-distinct the same as PARTIAL1 but use hash-table-based aggregation
    * MERGEPARTIAL: FINAL for non-distinct aggregations, COMPLETE for distinct
    * aggregations.
    */
-  private static final long serialVersionUID = 1L;
+  private static long serialVersionUID = 1L;
 
   /**
    * Mode.
@@ -66,6 +66,7 @@ public class GroupByDesc extends Abstrac
   private float groupByMemoryUsage;
   private float memoryThreshold;
   transient private boolean isDistinct;
+  private boolean dontResetAggrsDistinct;
 
   public GroupByDesc() {
   }
@@ -83,8 +84,8 @@ public class GroupByDesc extends Abstrac
       final int groupingSetsPosition,
       final boolean isDistinct) {
     this(mode, outputColumnNames, keys, aggregators, groupKeyNotReductionKey,
-      false, groupByMemoryUsage, memoryThreshold, listGroupingSets,
-      groupingSetsPresent, groupingSetsPosition, isDistinct);
+        false, groupByMemoryUsage, memoryThreshold, listGroupingSets,
+        groupingSetsPresent, groupingSetsPosition, isDistinct);
   }
 
   public GroupByDesc(
@@ -212,11 +213,11 @@ public class GroupByDesc extends Abstrac
    */
   public boolean isDistinctLike() {
     ArrayList<AggregationDesc> aggregators = getAggregators();
-    for(AggregationDesc ad: aggregators){
-      if(!ad.getDistinct()) {
+    for (AggregationDesc ad : aggregators) {
+      if (!ad.getDistinct()) {
         GenericUDAFEvaluator udafEval = ad.getGenericUDAFEvaluator();
         UDFType annot = udafEval.getClass().getAnnotation(UDFType.class);
-        if(annot == null || !annot.distinctLike()) {
+        if (annot == null || !annot.distinctLike()) {
           return false;
         }
       }
@@ -257,4 +258,16 @@ public class GroupByDesc extends Abstrac
   public boolean isDistinct() {
     return isDistinct;
   }
+
+  public void setDistinct(boolean isDistinct) {
+    this.isDistinct = isDistinct;
+  }
+
+  public boolean isDontResetAggrsDistinct() {
+    return dontResetAggrsDistinct;
+  }
+
+  public void setDontResetAggrsDistinct(boolean dontResetAggrsDistinct) {
+    this.dontResetAggrsDistinct = dontResetAggrsDistinct;
+  }
 }

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java Fri Apr 26 19:14:49 2013
@@ -23,6 +23,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.metastore.api.SkewedValueList;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
@@ -39,7 +40,7 @@ public class ListBucketingCtx implements
   private static final long serialVersionUID = 1L;
   private List<String> skewedColNames;
   private List<List<String>> skewedColValues;
-  private Map<List<String>, String> lbLocationMap;
+  private Map<SkewedValueList, String> lbLocationMap;
   private List<SkewedColumnPositionPair> rowSkewedIndex;
   private boolean isStoredAsSubDirectories;
   private String defaultKey;
@@ -82,14 +83,14 @@ public class ListBucketingCtx implements
   /**
    * @return the lbLocationMap
    */
-  public Map<List<String>, String> getLbLocationMap() {
+  public Map<SkewedValueList, String> getLbLocationMap() {
     return lbLocationMap;
   }
 
   /**
    * @param lbLocationMap the lbLocationMap to set
    */
-  public void setLbLocationMap(Map<List<String>, String> lbLocationMap) {
+  public void setLbLocationMap(Map<SkewedValueList, String> lbLocationMap) {
     this.lbLocationMap = lbLocationMap;
   }
 

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java Fri Apr 26 19:14:49 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.plan;
 
 import java.io.ByteArrayOutputStream;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
@@ -29,13 +30,16 @@ import java.util.Map.Entry;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.OperatorUtils;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol;
 import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol;
 import org.apache.hadoop.hive.ql.parse.OpParseContext;
 import org.apache.hadoop.hive.ql.parse.QBJoinTree;
 import org.apache.hadoop.hive.ql.parse.SplitSample;
+import org.apache.hadoop.mapred.JobConf;
 
 /**
  * MapredWork.
@@ -239,6 +243,12 @@ public class MapredWork extends Abstract
     return keyDesc;
   }
 
+  /**
+   * If the plan has a reducer and correspondingly a reduce-sink, then store the TableDesc pointing
+   * to keySerializeInfo of the ReduceSink
+   *
+   * @param keyDesc
+   */
   public void setKeyDesc(final TableDesc keyDesc) {
     this.keyDesc = keyDesc;
   }
@@ -557,4 +567,19 @@ public class MapredWork extends Abstract
   public void setFinalMapRed(boolean finalMapRed) {
     this.finalMapRed = finalMapRed;
   }
+
+  public void configureJobConf(JobConf jobConf) {
+    for (PartitionDesc partition : aliasToPartnInfo.values()) {
+      PlanUtils.configureJobConf(partition.getTableDesc(), jobConf);
+    }
+    Collection<Operator<?>> mappers = aliasToWork.values();
+    for (FileSinkOperator fs : OperatorUtils.findOperators(mappers, FileSinkOperator.class)) {
+      PlanUtils.configureJobConf(fs.getConf().getTableInfo(), jobConf);
+    }
+    if (reducer != null) {
+      for (FileSinkOperator fs : OperatorUtils.findOperators(reducer, FileSinkOperator.class)) {
+        PlanUtils.configureJobConf(fs.getConf().getTableInfo(), jobConf);
+      }
+    }
+  }
 }

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java Fri Apr 26 19:14:49 2013
@@ -46,6 +46,10 @@ public class PTFDesc extends AbstractOpe
 
   PartitionedTableFunctionDef funcDef;
   LeadLagInfo llInfo;
+  /*
+   * is this PTFDesc for a Map-Side PTF Operation?
+   */
+  boolean isMapSide = false;
 
   static{
     PTFUtils.makeTransient(PTFDesc.class, "llInfo");
@@ -75,6 +79,14 @@ public class PTFDesc extends AbstractOpe
     return funcDef != null && (funcDef instanceof WindowTableFunctionDef);
   }
 
+  public boolean isMapSide() {
+    return isMapSide;
+  }
+
+  public void setMapSide(boolean isMapSide) {
+    this.isMapSide = isMapSide;
+  }
+
   public abstract static class PTFInputDef {
     String expressionTreeString;
     ShapeDetails outputShape;
@@ -255,10 +267,7 @@ public class PTFDesc extends AbstractOpe
     transient TypeCheckCtx typeCheckCtx;
 
     static{
-      PTFUtils.makeTransient(ShapeDetails.class, "serde");
-      PTFUtils.makeTransient(ShapeDetails.class, "OI");
-      PTFUtils.makeTransient(ShapeDetails.class, "rr");
-      PTFUtils.makeTransient(ShapeDetails.class, "typeCheckCtx");
+      PTFUtils.makeTransient(ShapeDetails.class, "OI", "serde", "rr", "typeCheckCtx");
     }
 
     public String getSerdeClassName() {
@@ -588,8 +597,7 @@ public class PTFDesc extends AbstractOpe
     transient ObjectInspector OI;
 
     static{
-      PTFUtils.makeTransient(PTFExpressionDef.class, "exprEvaluator");
-      PTFUtils.makeTransient(PTFExpressionDef.class, "OI");
+      PTFUtils.makeTransient(PTFExpressionDef.class, "exprEvaluator", "OI");
     }
 
     public PTFExpressionDef() {}

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java Fri Apr 26 19:14:49 2013
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.serde2.laz
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.hadoop.mapred.SequenceFileOutputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
@@ -302,8 +303,8 @@ public final class PlanUtils {
     return new TableDesc(MetadataTypedColumnsetSerDe.class,
         TextInputFormat.class, IgnoreKeyTextOutputFormat.class, Utilities
         .makeProperties(
-        org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT,
-        separatorCode));
+            org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT,
+            separatorCode));
   }
 
   /**
@@ -729,6 +730,19 @@ public final class PlanUtils {
     }
   }
 
+  public static void configureJobConf(TableDesc tableDesc, JobConf jobConf) {
+    String handlerClass = tableDesc.getProperties().getProperty(
+        org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE);
+    try {
+      HiveStorageHandler storageHandler = HiveUtils.getStorageHandler(jobConf, handlerClass);
+      if (storageHandler != null) {
+        storageHandler.configureJobConf(tableDesc, jobConf);
+      }
+    } catch (HiveException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
   public static String stripQuotes(String val) {
     if ((val.charAt(0) == '\'' && val.charAt(val.length() - 1) == '\'')
         || (val.charAt(0) == '\"' && val.charAt(val.length() - 1) == '\"')) {

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/plan/TruncateTableDesc.java Fri Apr 26 19:14:49 2013
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.plan;
 
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -30,6 +31,10 @@ public class TruncateTableDesc extends D
 
   private String tableName;
   private Map<String, String> partSpec;
+  private List<Integer> columnIndexes;
+  private String inputDir;
+  private String outputDir;
+  private ListBucketingCtx lbCtx;
 
   public TruncateTableDesc() {
   }
@@ -56,4 +61,37 @@ public class TruncateTableDesc extends D
   public void setPartSpec(Map<String, String> partSpec) {
     this.partSpec = partSpec;
   }
+
+  @Explain(displayName = "Column Indexes")
+  public List<Integer> getColumnIndexes() {
+    return columnIndexes;
+  }
+
+  public void setColumnIndexes(List<Integer> columnIndexes) {
+    this.columnIndexes = columnIndexes;
+  }
+
+  public String getInputDir() {
+    return inputDir;
+  }
+
+  public void setInputDir(String inputDir) {
+    this.inputDir = inputDir;
+  }
+
+  public String getOutputDir() {
+    return outputDir;
+  }
+
+  public void setOutputDir(String outputDir) {
+    this.outputDir = outputDir;
+  }
+
+  public ListBucketingCtx getLbCtx() {
+    return lbCtx;
+  }
+
+  public void setLbCtx(ListBucketingCtx lbCtx) {
+    this.lbCtx = lbCtx;
+  }
 }

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java Fri Apr 26 19:14:49 2013
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hive.ql.ppd;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hive.ql.exec.Fi
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorFactory;
+import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
 import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -501,12 +503,19 @@ public final class OpProcFactory {
         Object... nodeOutputs) throws SemanticException {
       LOG.info("Processing for " + nd.getName() + "("
           + ((Operator) nd).getIdentifier() + ")");
+      ReduceSinkOperator rs = (ReduceSinkOperator) nd;
       OpWalkerInfo owi = (OpWalkerInfo) procCtx;
-      Set<String> aliases = owi.getRowResolver(nd).getTableNames();
+
+      Set<String> aliases;
       boolean ignoreAliases = false;
-      if (aliases.size() == 1 && aliases.contains("")) {
-        // Reduce sink of group by operator
-        ignoreAliases = true;
+      if (rs.getInputAlias() != null) {
+        aliases = new HashSet<String>(Arrays.asList(rs.getInputAlias()));
+      } else {
+        aliases = owi.getRowResolver(nd).getTableNames();
+        if (aliases.size() == 1 && aliases.contains("")) {
+          // Reduce sink of group by operator
+          ignoreAliases = true;
+        }
       }
       boolean hasUnpushedPredicates = mergeWithChildrenPred(nd, owi, null, aliases, ignoreAliases);
       if (HiveConf.getBoolVar(owi.getParseContext().getConf(),

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java Fri Apr 26 19:14:49 2013
@@ -191,19 +191,6 @@ public class SessionState {
     ls = new LineageState();
     overriddenConfigurations = new HashMap<String, String>();
     overriddenConfigurations.putAll(HiveConf.getConfSystemProperties());
-
-    // Register the Hive builtins jar and all of its functions
-    try {
-      Class<?> pluginClass = Utilities.getBuiltinUtilsClass();
-      URL jarLocation = pluginClass.getProtectionDomain().getCodeSource()
-        .getLocation();
-      add_builtin_resource(
-        ResourceType.JAR, jarLocation.toString());
-      FunctionRegistry.registerFunctionsFromPluginJar(
-        jarLocation, pluginClass.getClassLoader());
-    } catch (Exception ex) {
-      throw new RuntimeException("Failed to load Hive builtin functions", ex);
-    }
   }
 
   public void setCmd(String cmdString) {

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java Fri Apr 26 19:14:49 2013
@@ -24,49 +24,56 @@ import org.apache.hadoop.hive.ql.exec.PT
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
+import org.apache.hadoop.io.IntWritable;
 
 public abstract class GenericUDFLeadLag extends GenericUDF
 {
 	transient ExprNodeEvaluator exprEvaluator;
 	transient PTFPartitionIterator<Object> pItr;
-	ObjectInspector firstArgOI;
-
-	private PrimitiveObjectInspector amtOI;
+	transient ObjectInspector firstArgOI;
+	transient ObjectInspector defaultArgOI;
+	transient Converter defaultValueConverter;
+	int amt;
 
 	static{
-		PTFUtils.makeTransient(GenericUDFLeadLag.class, "exprEvaluator");
-		PTFUtils.makeTransient(GenericUDFLeadLag.class, "pItr");
+		PTFUtils.makeTransient(GenericUDFLeadLag.class, "exprEvaluator", "pItr",
+        "firstArgOI", "defaultArgOI", "defaultValueConverter");
 	}
 
 	@Override
 	public Object evaluate(DeferredObject[] arguments) throws HiveException
 	{
-		DeferredObject amt = arguments[1];
-		int intAmt = 0;
-		try
-		{
-			intAmt = PrimitiveObjectInspectorUtils.getInt(amt.get(), amtOI);
-		}
-		catch (NullPointerException e)
-		{
-			intAmt = Integer.MAX_VALUE;
-		}
-		catch (NumberFormatException e)
-		{
-			intAmt = Integer.MAX_VALUE;
-		}
+    Object defaultVal = null;
+    if(arguments.length == 3){
+      defaultVal =  ObjectInspectorUtils.copyToStandardObject(
+          defaultValueConverter.convert(arguments[2].get()),
+          defaultArgOI);
+    }
 
 		int idx = pItr.getIndex() - 1;
+		int start = 0;
+		int end = pItr.getPartition().size();
 		try
 		{
-			Object row = getRow(intAmt);
-			Object ret = exprEvaluator.evaluate(row);
-			ret = ObjectInspectorUtils.copyToStandardObject(ret, firstArgOI, ObjectInspectorCopyOption.WRITABLE);
+		  Object ret = null;
+		  int newIdx = getIndex(amt);
+
+		  if(newIdx >= end || newIdx < start) {
+        ret = defaultVal;
+		  }
+		  else {
+        Object row = getRow(amt);
+        ret = exprEvaluator.evaluate(row);
+        ret = ObjectInspectorUtils.copyToStandardObject(ret,
+            firstArgOI, ObjectInspectorCopyOption.WRITABLE);
+		  }
 			return ret;
 		}
 		finally
@@ -83,25 +90,41 @@ public abstract class GenericUDFLeadLag 
 	public ObjectInspector initialize(ObjectInspector[] arguments)
 			throws UDFArgumentException
 	{
-		// index has to be a primitive
-		if (arguments[1] instanceof PrimitiveObjectInspector)
-		{
-			amtOI = (PrimitiveObjectInspector) arguments[1];
-		}
-		else
-		{
-			throw new UDFArgumentTypeException(1,
-					"Primitive Type is expected but "
-							+ arguments[1].getTypeName() + "\" is found");
-		}
-
-		firstArgOI = arguments[0];
-		return ObjectInspectorUtils.getStandardObjectInspector(firstArgOI,
-				ObjectInspectorCopyOption.WRITABLE);
+    if (!(arguments.length >= 1 && arguments.length <= 3)) {
+      throw new UDFArgumentTypeException(arguments.length - 1,
+          "Incorrect invocation of " + _getFnName() + ": _FUNC_(expr, amt, default)");
+    }
+
+    amt = 1;
+
+    if (arguments.length > 1) {
+      ObjectInspector amtOI = arguments[1];
+      if ( !ObjectInspectorUtils.isConstantObjectInspector(amtOI) ||
+          (amtOI.getCategory() != ObjectInspector.Category.PRIMITIVE) ||
+          ((PrimitiveObjectInspector)amtOI).getPrimitiveCategory() !=
+          PrimitiveObjectInspector.PrimitiveCategory.INT )
+      {
+        throw new UDFArgumentTypeException(0,
+            _getFnName() + " amount must be a integer value "
+            + amtOI.getTypeName() + " was passed as parameter 1.");
+      }
+      Object o = ((ConstantObjectInspector)amtOI).
+          getWritableConstantValue();
+      amt = ((IntWritable)o).get();
+    }
+
+    if (arguments.length == 3) {
+      defaultArgOI = arguments[2];
+      ObjectInspectorConverters.getConverter(arguments[2], arguments[0]);
+      defaultValueConverter = ObjectInspectorConverters.getConverter(arguments[2], arguments[0]);
+
+    }
+
+    firstArgOI = arguments[0];
+    return ObjectInspectorUtils.getStandardObjectInspector(firstArgOI,
+        ObjectInspectorCopyOption.WRITABLE);
 	}
 
-
-
 	public ExprNodeEvaluator getExprEvaluator()
 	{
 		return exprEvaluator;
@@ -122,7 +145,39 @@ public abstract class GenericUDFLeadLag 
 		this.pItr = pItr;
 	}
 
-	@Override
+	public ObjectInspector getFirstArgOI() {
+    return firstArgOI;
+  }
+
+  public void setFirstArgOI(ObjectInspector firstArgOI) {
+    this.firstArgOI = firstArgOI;
+  }
+
+  public ObjectInspector getDefaultArgOI() {
+    return defaultArgOI;
+  }
+
+  public void setDefaultArgOI(ObjectInspector defaultArgOI) {
+    this.defaultArgOI = defaultArgOI;
+  }
+
+  public Converter getDefaultValueConverter() {
+    return defaultValueConverter;
+  }
+
+  public void setDefaultValueConverter(Converter defaultValueConverter) {
+    this.defaultValueConverter = defaultValueConverter;
+  }
+
+  public int getAmt() {
+    return amt;
+  }
+
+  public void setAmt(int amt) {
+    this.amt = amt;
+  }
+
+  @Override
 	public String getDisplayString(String[] children)
 	{
 		assert (children.length == 2);
@@ -140,6 +195,8 @@ public abstract class GenericUDFLeadLag 
 
 	protected abstract Object getRow(int amt);
 
+	protected abstract int getIndex(int amt);
+
 	public static class GenericUDFLead extends GenericUDFLeadLag
 	{
 
@@ -150,6 +207,11 @@ public abstract class GenericUDFLeadLag 
 		}
 
 		@Override
+		protected int getIndex(int amt) {
+		  return pItr.getIndex() - 1 + amt;
+		}
+
+		@Override
 		protected Object getRow(int amt)
 		{
 			return pItr.lead(amt - 1);
@@ -166,6 +228,11 @@ public abstract class GenericUDFLeadLag 
 		}
 
 		@Override
+    protected int getIndex(int amt) {
+      return pItr.getIndex() - 1 - amt;
+    }
+
+		@Override
 		protected Object getRow(int amt)
 		{
 			return pItr.lag(amt + 1);

Modified: hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java (original)
+++ hive/branches/vectorization/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java Fri Apr 26 19:14:49 2013
@@ -67,12 +67,9 @@ public abstract class TableFunctionEvalu
   transient protected PTFPartition outputPartition;
 
   static{
-    PTFUtils.makeTransient(TableFunctionEvaluator.class, "OI");
-    PTFUtils.makeTransient(TableFunctionEvaluator.class, "rawInputOI");
-    PTFUtils.makeTransient(TableFunctionEvaluator.class, "outputPartition");
+    PTFUtils.makeTransient(TableFunctionEvaluator.class, "outputOI", "rawInputOI");
   }
 
-
   public StructObjectInspector getOutputOI()
   {
     return OI;

Modified: hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (original)
+++ hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java Fri Apr 26 19:14:49 2013
@@ -44,6 +44,7 @@ import org.apache.hadoop.hive.ql.plan.Ex
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.FilterDesc;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
+import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
 import org.apache.hadoop.hive.ql.plan.ScriptDesc;
@@ -132,6 +133,11 @@ public class TestExecDriver extends Test
     mr = PlanUtils.getMapRedWork();
   }
 
+  public static void addMapWork(MapredWork mr, Table tbl, String alias, Operator<?> work) {
+    mr.addMapWork(tbl.getDataLocation().toString(), alias, work, new PartitionDesc(
+        Utilities.getTableDesc(tbl), null));
+  }
+
   private static void fileDiff(String datafile, String testdir) throws Exception {
     String testFileDir = conf.get("test.data.files");
     System.out.println(testFileDir);
@@ -190,7 +196,7 @@ public class TestExecDriver extends Test
     Operator<FilterDesc> op1 = OperatorFactory.get(getTestFilterDesc("key"),
         op2);
 
-    Utilities.addMapWork(mr, src, "a", op1);
+    addMapWork(mr, src, "a", op1);
   }
 
   @SuppressWarnings("unchecked")
@@ -209,7 +215,7 @@ public class TestExecDriver extends Test
     Operator<FilterDesc> op1 = OperatorFactory.get(getTestFilterDesc("key"),
         op2);
 
-    Utilities.addMapWork(mr, src, "a", op1);
+    addMapWork(mr, src, "a", op1);
   }
 
   @SuppressWarnings("unchecked")
@@ -226,7 +232,7 @@ public class TestExecDriver extends Test
         Utilities.makeList(getStringColumn("value")), outputColumns, true,
         -1, 1, -1));
 
-    Utilities.addMapWork(mr, src, "a", op1);
+    addMapWork(mr, src, "a", op1);
     mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
     mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
 
@@ -254,7 +260,7 @@ public class TestExecDriver extends Test
         .makeList(getStringColumn("key"), getStringColumn("value")),
         outputColumns, false, -1, 1, -1));
 
-    Utilities.addMapWork(mr, src, "a", op1);
+    addMapWork(mr, src, "a", op1);
     mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
     mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
 
@@ -287,7 +293,7 @@ public class TestExecDriver extends Test
         Utilities.makeList(getStringColumn("value")), outputColumns, true,
         Byte.valueOf((byte) 0), 1, -1));
 
-    Utilities.addMapWork(mr, src, "a", op1);
+    addMapWork(mr, src, "a", op1);
     mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
     mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
 
@@ -296,7 +302,7 @@ public class TestExecDriver extends Test
         Utilities.makeList(getStringColumn("key")), outputColumns, true,
         Byte.valueOf((byte) 1), Integer.MAX_VALUE, -1));
 
-    Utilities.addMapWork(mr, src2, "b", op2);
+    addMapWork(mr, src2, "b", op2);
     mr.getTagToValueDesc().add(op2.getConf().getValueSerializeInfo());
 
     // reduce side work
@@ -338,7 +344,7 @@ public class TestExecDriver extends Test
         .makeList(getStringColumn("key"), getStringColumn("value")),
         outputColumns), op0);
 
-    Utilities.addMapWork(mr, src, "a", op4);
+    addMapWork(mr, src, "a", op4);
     mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
     mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
 
@@ -375,7 +381,7 @@ public class TestExecDriver extends Test
         .makeList(getStringColumn("key"), getStringColumn("value")),
         outputColumns), op0);
 
-    Utilities.addMapWork(mr, src, "a", op4);
+    addMapWork(mr, src, "a", op4);
     mr.setKeyDesc(op0.getConf().getKeySerializeInfo());
     mr.getTagToValueDesc().add(op0.getConf().getValueSerializeInfo());
 
@@ -414,7 +420,7 @@ public class TestExecDriver extends Test
         .makeList(getStringColumn("key"), getStringColumn("value")),
         outputColumns), op0);
 
-    Utilities.addMapWork(mr, src, "a", op4);
+    addMapWork(mr, src, "a", op4);
     mr.setKeyDesc(op1.getConf().getKeySerializeInfo());
     mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
 

Modified: hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (original)
+++ hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java Fri Apr 26 19:14:49 2013
@@ -320,7 +320,7 @@ public class TestOperators extends TestC
       aliases.add("b");
       LinkedHashMap<String, ArrayList<String>> pathToAliases =
         new LinkedHashMap<String, ArrayList<String>>();
-      pathToAliases.put("/testDir", aliases);
+      pathToAliases.put("hdfs:///testDir", aliases);
 
       // initialize pathToTableInfo
       // Default: treat the table as a single column "col"
@@ -328,7 +328,7 @@ public class TestOperators extends TestC
       PartitionDesc pd = new PartitionDesc(td, null);
       LinkedHashMap<String, org.apache.hadoop.hive.ql.plan.PartitionDesc> pathToPartitionInfo =
         new LinkedHashMap<String, org.apache.hadoop.hive.ql.plan.PartitionDesc>();
-      pathToPartitionInfo.put("/testDir", pd);
+      pathToPartitionInfo.put("hdfs:///testDir", pd);
 
       // initialize aliasToWork
       CollectDesc cd = new CollectDesc(Integer.valueOf(1));

Modified: hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestFileDump.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestFileDump.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestFileDump.java (original)
+++ hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestFileDump.java Fri Apr 26 19:14:49 2013
@@ -94,7 +94,7 @@ public class TestFileDump {
       inspector = ObjectInspectorFactory.getReflectionObjectInspector
           (MyRecord.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
-    Writer writer = OrcFile.createWriter(fs, testFilePath, inspector,
+    Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         100000, CompressionKind.ZLIB, 10000, 10000);
     Random r1 = new Random(1);
     String[] words = new String[]{"It", "was", "the", "best", "of", "times,",

Modified: hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java (original)
+++ hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java Fri Apr 26 19:14:49 2013
@@ -189,7 +189,7 @@ public class TestOrcFile {
       inspector = ObjectInspectorFactory.getReflectionObjectInspector
           (BigRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
-    Writer writer = OrcFile.createWriter(fs, testFilePath, inspector,
+    Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         100000, CompressionKind.ZLIB, 10000, 10000);
     writer.addRow(new BigRow(false, (byte) 1, (short) 1024, 65536,
         Long.MAX_VALUE, (float) 1.0, -15.0, bytes(0,1,2,3,4), "hi",
@@ -421,7 +421,7 @@ public class TestOrcFile {
           (InnerStruct.class,
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
-    Writer writer = OrcFile.createWriter(fs, testFilePath, inspector,
+    Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         1000, CompressionKind.NONE, 100, 1000);
     Random r1 = new Random(1);
     Random r2 = new Random(2);
@@ -504,7 +504,7 @@ public class TestOrcFile {
       inspector = ObjectInspectorFactory.getReflectionObjectInspector
           (BigRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
-    Writer writer = OrcFile.createWriter(fs, testFilePath, inspector,
+    Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         1000, CompressionKind.NONE, 100, 10000);
     writer.close();
     Reader reader = OrcFile.createReader(fs, testFilePath);
@@ -524,7 +524,7 @@ public class TestOrcFile {
       inspector = ObjectInspectorFactory.getReflectionObjectInspector
           (BigRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
-    Writer writer = OrcFile.createWriter(fs, testFilePath, inspector,
+    Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         1000, CompressionKind.NONE, 100, 10000);
     writer.addUserMetadata("my.meta", byteBuf(1, 2, 3, 4, 5, 6, 7, -1, -2, 127, -128));
     writer.addUserMetadata("clobber", byteBuf(1,2,3));
@@ -590,7 +590,7 @@ public class TestOrcFile {
       inspector = OrcStruct.createObjectInspector(0, types);
     }
     HiveDecimal maxValue = new HiveDecimal("100000000000000000000");
-    Writer writer = OrcFile.createWriter(fs, testFilePath, inspector,
+    Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         1000, CompressionKind.NONE, 100, 10000);
     OrcStruct row = new OrcStruct(3);
     OrcUnion union = new OrcUnion();
@@ -767,7 +767,7 @@ public class TestOrcFile {
           (InnerStruct.class,
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
-    Writer writer = OrcFile.createWriter(fs, testFilePath, inspector,
+    Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         1000, CompressionKind.SNAPPY, 100, 10000);
     Random rand = new Random(12);
     for(int i=0; i < 10000; ++i) {
@@ -802,7 +802,7 @@ public class TestOrcFile {
           (InnerStruct.class,
               ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
-    Writer writer = OrcFile.createWriter(fs, testFilePath, inspector,
+    Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         5000, CompressionKind.SNAPPY, 1000, 0);
     Random rand = new Random(24);
     for(int i=0; i < 10000; ++i) {
@@ -843,7 +843,7 @@ public class TestOrcFile {
       inspector = ObjectInspectorFactory.getReflectionObjectInspector
           (BigRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
     }
-    Writer writer = OrcFile.createWriter(fs, testFilePath, inspector,
+    Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         200000, CompressionKind.ZLIB, 65536, 1000);
     Random rand = new Random(42);
     final int COUNT=32768;
@@ -936,4 +936,68 @@ public class TestOrcFile {
         (float) doubleValues[i], doubleValues[i], byteValues[i],stringValues[i],
         new MiddleStruct(inner, inner2), list(), map(inner,inner2));
   }
+
+  private static class MyMemoryManager extends MemoryManager {
+    final long totalSpace;
+    double rate;
+    Path path = null;
+    long lastAllocation = 0;
+
+    MyMemoryManager(Configuration conf, long totalSpace, double rate) {
+      super(conf);
+      this.totalSpace = totalSpace;
+      this.rate = rate;
+    }
+
+    @Override
+    void addWriter(Path path, long requestedAllocation,
+                   MemoryManager.Callback callback) {
+      this.path = path;
+      this.lastAllocation = requestedAllocation;
+    }
+
+    @Override
+    synchronized void removeWriter(Path path) {
+      this.path = null;
+      this.lastAllocation = 0;
+    }
+
+    @Override
+    long getTotalMemoryPool() {
+      return totalSpace;
+    }
+
+    @Override
+    double getAllocationScale() {
+      return rate;
+    }
+  }
+
+  @Test
+  public void testMemoryManagement() throws Exception {
+    ObjectInspector inspector;
+    synchronized (TestOrcFile.class) {
+      inspector = ObjectInspectorFactory.getReflectionObjectInspector
+          (InnerStruct.class,
+              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
+    }
+    MyMemoryManager memory = new MyMemoryManager(conf, 10000, 0.1);
+    Writer writer = new WriterImpl(fs, testFilePath, inspector,
+        50000, CompressionKind.NONE, 100, 0, memory);
+    assertEquals(testFilePath, memory.path);
+    for(int i=0; i < 2500; ++i) {
+      writer.addRow(new InnerStruct(i*300, Integer.toHexString(10*i)));
+    }
+    writer.close();
+    assertEquals(null, memory.path);
+    Reader reader = OrcFile.createReader(fs, testFilePath);
+    int i = 0;
+    for(StripeInformation stripe: reader.getStripes()) {
+      i += 1;
+      assertTrue("stripe " + i + " is too long at " + stripe.getDataLength(),
+          stripe.getDataLength() < 10000);
+    }
+    assertEquals(3, i);
+    assertEquals(2500, reader.getNumberOfRows());
+  }
 }

Modified: hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java (original)
+++ hive/branches/vectorization/ql/src/test/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java Fri Apr 26 19:14:49 2013
@@ -169,7 +169,7 @@ public class TestAuthorizationPreEventLi
   }
 
   public void testListener() throws Exception {
-    String dbName = "tmpdb";
+    String dbName = "hive3705";
     String tblName = "tmptbl";
     String renamed = "tmptbl2";
     int listSize = 0;
@@ -199,7 +199,7 @@ public class TestAuthorizationPreEventLi
 
     driver.run("alter table tmptbl add partition (b='2011')");
     listSize++;
-    Partition part = msc.getPartition("tmpdb", "tmptbl", "b=2011");
+    Partition part = msc.getPartition("hive3705", "tmptbl", "b=2011");
 
     Partition ptnFromEvent = (
         (org.apache.hadoop.hive.ql.metadata.Partition)

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_00_unsupported_schema.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_00_unsupported_schema.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_00_unsupported_schema.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_00_unsupported_schema.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'nosuchschema://nosuchauthority/ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_01_nonpart_over_loaded.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_01_nonpart_over_loaded.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_01_nonpart_over_loaded.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_01_nonpart_over_loaded.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_02_all_part_over_overlap.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_02_all_part_over_overlap.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_02_all_part_over_overlap.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_02_all_part_over_overlap.q Fri Apr 26 19:14:49 2013
@@ -14,7 +14,7 @@ load data local inpath "../data/files/te
 	into table exim_employee partition (emp_country="us", emp_state="tn");	
 load data local inpath "../data/files/test.dat" 
 	into table exim_employee partition (emp_country="us", emp_state="ka");		
-dfs -mkdir ../build/ql/test/data/exports/exim_employee/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_employee;
 export table exim_employee to 'ql/test/data/exports/exim_employee';
 drop table exim_employee;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_03_nonpart_noncompat_colschema.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_03_nonpart_noncompat_colschema.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_03_nonpart_noncompat_colschema.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_03_nonpart_noncompat_colschema.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_04_nonpart_noncompat_colnumber.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_04_nonpart_noncompat_colnumber.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_04_nonpart_noncompat_colnumber.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_04_nonpart_noncompat_colnumber.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_05_nonpart_noncompat_coltype.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_05_nonpart_noncompat_coltype.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_05_nonpart_noncompat_coltype.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_05_nonpart_noncompat_coltype.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_06_nonpart_noncompat_storage.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_06_nonpart_noncompat_storage.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_06_nonpart_noncompat_storage.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_06_nonpart_noncompat_storage.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_07_nonpart_noncompat_ifof.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_07_nonpart_noncompat_ifof.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_07_nonpart_noncompat_ifof.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_07_nonpart_noncompat_ifof.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_08_nonpart_noncompat_serde.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_08_nonpart_noncompat_serde.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_08_nonpart_noncompat_serde.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_08_nonpart_noncompat_serde.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_09_nonpart_noncompat_serdeparam.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_09_nonpart_noncompat_serdeparam.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_09_nonpart_noncompat_serdeparam.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_09_nonpart_noncompat_serdeparam.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_10_nonpart_noncompat_bucketing.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_10_nonpart_noncompat_bucketing.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_10_nonpart_noncompat_bucketing.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_10_nonpart_noncompat_bucketing.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_11_nonpart_noncompat_sorting.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_11_nonpart_noncompat_sorting.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_11_nonpart_noncompat_sorting.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_11_nonpart_noncompat_sorting.q Fri Apr 26 19:14:49 2013
@@ -6,7 +6,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;	
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_13_nonnative_import.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_13_nonnative_import.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_13_nonnative_import.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_13_nonnative_import.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;		
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_14_nonpart_part.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_14_nonpart_part.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_14_nonpart_part.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_14_nonpart_part.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;		
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_15_part_nonpart.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_15_part_nonpart.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_15_part_nonpart.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_15_part_nonpart.q Fri Apr 26 19:14:49 2013
@@ -6,7 +6,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department partition (dep_org="hr");		
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_16_part_noncompat_schema.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_16_part_noncompat_schema.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_16_part_noncompat_schema.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_16_part_noncompat_schema.q Fri Apr 26 19:14:49 2013
@@ -6,7 +6,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department partition (dep_org="hr");		
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_17_part_spec_underspec.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_17_part_spec_underspec.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_17_part_spec_underspec.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_17_part_spec_underspec.q Fri Apr 26 19:14:49 2013
@@ -14,7 +14,7 @@ load data local inpath "../data/files/te
 	into table exim_employee partition (emp_country="us", emp_state="tn");	
 load data local inpath "../data/files/test.dat" 
 	into table exim_employee partition (emp_country="us", emp_state="ka");		
-dfs -mkdir ../build/ql/test/data/exports/exim_employee/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_employee;
 export table exim_employee to 'ql/test/data/exports/exim_employee';
 drop table exim_employee;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_18_part_spec_missing.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_18_part_spec_missing.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_18_part_spec_missing.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_18_part_spec_missing.q Fri Apr 26 19:14:49 2013
@@ -14,7 +14,7 @@ load data local inpath "../data/files/te
 	into table exim_employee partition (emp_country="us", emp_state="tn");	
 load data local inpath "../data/files/test.dat" 
 	into table exim_employee partition (emp_country="us", emp_state="ka");		
-dfs -mkdir ../build/ql/test/data/exports/exim_employee/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_employee;
 export table exim_employee to 'ql/test/data/exports/exim_employee';
 drop table exim_employee;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_19_external_over_existing.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_19_external_over_existing.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_19_external_over_existing.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_19_external_over_existing.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;		
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;

Modified: hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_20_managed_location_over_existing.q
URL: http://svn.apache.org/viewvc/hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_20_managed_location_over_existing.q?rev=1476348&r1=1476347&r2=1476348&view=diff
==============================================================================
--- hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_20_managed_location_over_existing.q (original)
+++ hive/branches/vectorization/ql/src/test/queries/clientnegative/exim_20_managed_location_over_existing.q Fri Apr 26 19:14:49 2013
@@ -5,7 +5,7 @@ create table exim_department ( dep_id in
 	stored as textfile	
 	tblproperties("creator"="krishna");
 load data local inpath "../data/files/test.dat" into table exim_department;		
-dfs -mkdir ../build/ql/test/data/exports/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
 dfs -rmr ../build/ql/test/data/exports/exim_department;
 export table exim_department to 'ql/test/data/exports/exim_department';
 drop table exim_department;
@@ -13,7 +13,7 @@ drop table exim_department;
 create database importer;
 use importer;
 
-dfs -mkdir ../build/ql/test/data/tablestore/exim_department/temp;
+dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_department/temp;
 dfs -rmr ../build/ql/test/data/tablestore/exim_department;
 
 create table exim_department ( dep_id int comment "department id")