You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by nz...@apache.org on 2011/04/28 19:52:11 UTC

svn commit: r1097563 - in /hive/trunk: common/src/java/org/apache/hadoop/hive/conf/ conf/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/ ql/src/java/org/apache/hadoop/hive/ql/parse/ ql/src/test/querie...

Author: nzhang
Date: Thu Apr 28 17:52:10 2011
New Revision: 1097563

URL: http://svn.apache.org/viewvc?rev=1097563&view=rev
Log:
HIVE-2125. alter table concatenate fails and deletes data

Added:
    hive/trunk/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q
    hive/trunk/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q
    hive/trunk/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out
    hive/trunk/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out
Modified:
    hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/trunk/conf/hive-default.xml
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1097563&r1=1097562&r2=1097563&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Thu Apr 28 17:52:10 2011
@@ -432,8 +432,9 @@ public class HiveConf extends Configurat
     // temporary variable for testing. This is added just to turn off this feature in case of a bug in
     // deployment. It has not been documented in hive-default.xml intentionally, this should be removed
     // once the feature is stable
-    HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false), 
-    HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false), 
+    HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false),
+    HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false),
+    HIVE_CONCATENATE_CHECK_INDEX ("hive.exec.concatenate.check.index", true),
     ;
 
     public final String varname;

Modified: hive/trunk/conf/hive-default.xml
URL: http://svn.apache.org/viewvc/hive/trunk/conf/hive-default.xml?rev=1097563&r1=1097562&r2=1097563&view=diff
==============================================================================
--- hive/trunk/conf/hive-default.xml (original)
+++ hive/trunk/conf/hive-default.xml Thu Apr 28 17:52:10 2011
@@ -1057,4 +1057,14 @@
   This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time.</description>
 </property>
 
+<property>
+  <name>hive.exec.concatenate.check.index</name>
+  <value>true</value>
+  <description>If this sets to true, hive will throw error when doing
+   'alter table tbl_name [partSpec] concatenate' on a table/partition 
+    that has indexes on it. The reason the user want to set this to true 
+    is because it can help user to avoid handling all index drop, recreation, 
+    rebuild work. This is very helpful for tables with thousands of partitions.</description>
+</property>
+
 </configuration>

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1097563&r1=1097562&r2=1097563&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Thu Apr 28 17:52:10 2011
@@ -403,6 +403,7 @@ public class DDLTask extends Task<DDLWor
     BlockMergeTask taskExec = new BlockMergeTask();
     taskExec.initialize(db.getConf(), null, driverCxt);
     taskExec.setWork(mergeWork);
+    taskExec.setQueryPlan(this.getQueryPlan());
     int ret = taskExec.execute(driverCxt);
 
     return ret;

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java?rev=1097563&r1=1097562&r2=1097563&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java Thu Apr 28 17:52:10 2011
@@ -486,4 +486,12 @@ public abstract class Task<T extends Ser
   public void setRetryCmdWhenFail(boolean retryCmdWhenFail) {
     this.retryCmdWhenFail = retryCmdWhenFail;
   }
+  
+  public QueryPlan getQueryPlan() {
+    return queryPlan;
+  }
+  
+  public void setQueryPlan(QueryPlan queryPlan) {
+    this.queryPlan = queryPlan;
+  }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java?rev=1097563&r1=1097562&r2=1097563&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java Thu Apr 28 17:52:10 2011
@@ -111,6 +111,9 @@ public class BlockMergeTask extends Task
     if(work.getNumMapTasks() != null) {
       job.setNumMapTasks(work.getNumMapTasks());      
     }
+    
+    // zero reducers
+    job.setNumReduceTasks(0);
 
     if (work.getMinSplitSize() != null) {
       HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work
@@ -146,11 +149,18 @@ public class BlockMergeTask extends Task
     RunningJob rj = null;
     boolean noName = StringUtils.isEmpty(HiveConf.getVar(job,
         HiveConf.ConfVars.HADOOPJOBNAME));
-
+    
+    String jobName = null;
+    if (noName && this.getQueryPlan() != null) {
+      int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
+      jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(),
+          maxlen - 6);
+    }
+    
     if (noName) {
       // This is for a special case to ensure unit tests pass
-      HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME, "JOB"
-          + Utilities.randGen.nextInt());
+      HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME,
+          jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
     }
 
     try {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1097563&r1=1097562&r2=1097563&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Thu Apr 28 17:52:10 2011
@@ -25,6 +25,8 @@ import static org.apache.hadoop.hive.ql.
 import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_SHOWDATABASES;
 
 import java.io.Serializable;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -83,7 +85,9 @@ import org.apache.hadoop.hive.ql.plan.Dr
 import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.GrantDesc;
 import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL;
+import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.LockTableDesc;
+import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.MsckDesc;
 import org.apache.hadoop.hive.ql.plan.PrincipalDesc;
 import org.apache.hadoop.hive.ql.plan.PrivilegeDesc;
@@ -1116,19 +1120,24 @@ public class DDLSemanticAnalyzer extends
         tableName, partSpec);
 
     List<String> inputDir = new ArrayList<String>();
-    String outputDir = null;
+    String tblPartLoc = null;
+    Table tblObj = null;
 
     try {
-      Table tblObj = db.getTable(tableName);
+      tblObj = db.getTable(tableName);
 
       List<String> bucketCols = null;
       Class<? extends InputFormat> inputFormatClass = null;
       boolean isArchived = false;
-      List<Index> indexes = db.getIndexes(tblObj.getDbName(), tableName,
-          Short.MAX_VALUE);
-      if (indexes != null && indexes.size() > 0) {
-        throw new SemanticException("can not do merge because source table "
-            + tableName + " is indexed.");
+      boolean checkIndex = HiveConf.getBoolVar(conf,
+          HiveConf.ConfVars.HIVE_CONCATENATE_CHECK_INDEX);
+      if (checkIndex) {
+        List<Index> indexes = db.getIndexes(tblObj.getDbName(), tableName,
+            Short.MAX_VALUE);
+        if (indexes != null && indexes.size() > 0) {
+          throw new SemanticException("can not do merge because source table "
+              + tableName + " is indexed.");
+        }
       }
 
       if (tblObj.isPartitioned()) {
@@ -1144,12 +1153,12 @@ public class DDLSemanticAnalyzer extends
           bucketCols = part.getBucketCols();
           inputFormatClass = part.getInputFormatClass();
           isArchived = Utilities.isArchived(part);
-          outputDir = part.getDataLocation().toString();
+          tblPartLoc = part.getDataLocation().toString();
         }
       } else {
         inputFormatClass = tblObj.getInputFormatClass();
         bucketCols = tblObj.getBucketCols();
-        outputDir = tblObj.getDataLocation().toString();
+        tblPartLoc = tblObj.getDataLocation().toString();
       }
 
       // throw a HiveException for non-rcfile.
@@ -1169,29 +1178,34 @@ public class DDLSemanticAnalyzer extends
         throw new SemanticException(
             "Merge can not perform on archived partitions.");
       }
-    } catch (HiveException e) {
-      throw new SemanticException(e);
-    }
 
-    // input and output are the same
-    inputDir.add(outputDir);
+      // input and output are the same
+      inputDir.add(tblPartLoc);
 
-    mergeDesc.setInputDir(inputDir);
-    mergeDesc.setOutputDir(outputDir);
+      mergeDesc.setInputDir(inputDir);
 
-    addInputsOutputsAlterTable(tableName, partSpec);
+      addInputsOutputsAlterTable(tableName, partSpec);
+      DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc);
+      ddlWork.setNeedLock(true);
+      Task<? extends Serializable> mergeTask = TaskFactory.get(ddlWork, conf);
+      TableDesc tblDesc = Utilities.getTableDesc(tblObj);
+      String queryTmpdir = ctx.getExternalTmpFileURI(new URI(tblPartLoc));
+      mergeDesc.setOutputDir(queryTmpdir);
+      LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, queryTmpdir, tblDesc,
+          partSpec == null ? new HashMap<String, String>() : partSpec);
+      Task<MoveWork> moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
+          conf);
+      mergeTask.addDependentTask(moveTsk);
+      tableSpec tablepart = new tableSpec(this.db, conf, tablePartAST);
+      StatsWork statDesc = new StatsWork(tablepart);
+      statDesc.setNoStatsAggregator(true);
+      Task<? extends Serializable> statTask = TaskFactory.get(statDesc, conf);
+      moveTsk.addDependentTask(statTask);
 
-    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc);
-    ddlWork.setNeedLock(true);
-    Task<? extends Serializable> mergeTask = TaskFactory.get(ddlWork, conf);
-
-    tableSpec tablepart = new tableSpec(this.db, conf, tablePartAST);
-    StatsWork statDesc = new StatsWork(tablepart);
-    statDesc.setNoStatsAggregator(true);
-    Task<? extends Serializable> statTask = TaskFactory.get(statDesc, conf);
-    mergeTask.addDependentTask(statTask);
-
-    rootTasks.add(mergeTask);
+      rootTasks.add(mergeTask);
+    } catch (Exception e) {
+      throw new SemanticException(e);
+    }
   }
 
   private void analyzeAlterTableClusterSort(ASTNode ast)

Added: hive/trunk/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q?rev=1097563&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q (added)
+++ hive/trunk/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q Thu Apr 28 17:52:10 2011
@@ -0,0 +1,16 @@
+set hive.exec.concatenate.check.index=true;
+create table src_rc_concatenate_test(key int, value string) stored as rcfile;
+
+load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test;
+load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test;
+load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test;
+
+show table extended like `src_rc_concatenate_test`;
+
+select count(1) from src_rc_concatenate_test;
+select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test;
+
+create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2"); 
+show indexes on src_rc_concatenate_test;
+
+alter table src_rc_concatenate_test concatenate;

Added: hive/trunk/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q?rev=1097563&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q Thu Apr 28 17:52:10 2011
@@ -0,0 +1,48 @@
+set hive.exec.concatenate.check.index =false;
+create table src_rc_concatenate_test(key int, value string) stored as rcfile;
+
+load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test;
+load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test;
+load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test;
+
+show table extended like `src_rc_concatenate_test`;
+
+select count(1) from src_rc_concatenate_test;
+select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test;
+
+create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2"); 
+show indexes on src_rc_concatenate_test;
+
+alter table src_rc_concatenate_test concatenate;
+
+show table extended like `src_rc_concatenate_test`;
+
+select count(1) from src_rc_concatenate_test;
+select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test;
+
+drop index src_rc_concatenate_test_index on src_rc_concatenate_test;
+
+create table src_rc_concatenate_test_part(key int, value string) partitioned by (ds string) stored as rcfile;
+
+alter table src_rc_concatenate_test_part add partition (ds='2011');
+
+load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test_part partition (ds='2011');
+load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test_part partition (ds='2011');
+load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test_part partition (ds='2011');
+
+show table extended like `src_rc_concatenate_test_part` partition (ds='2011');
+
+select count(1) from src_rc_concatenate_test_part;
+select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part;
+
+create index src_rc_concatenate_test_part_index on table src_rc_concatenate_test_part(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2");
+show indexes on src_rc_concatenate_test_part;
+
+alter table src_rc_concatenate_test_part partition (ds='2011') concatenate;
+
+show table extended like `src_rc_concatenate_test_part` partition (ds='2011');
+
+select count(1) from src_rc_concatenate_test_part;
+select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part;
+
+drop index src_rc_concatenate_test_part_index on src_rc_concatenate_test_part;

Added: hive/trunk/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out?rev=1097563&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out (added)
+++ hive/trunk/ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out Thu Apr 28 17:52:10 2011
@@ -0,0 +1,70 @@
+PREHOOK: query: create table src_rc_concatenate_test(key int, value string) stored as rcfile
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table src_rc_concatenate_test(key int, value string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@src_rc_concatenate_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_concatenate_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_concatenate_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_concatenate_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_concatenate_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_concatenate_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_concatenate_test
+PREHOOK: query: show table extended like `src_rc_concatenate_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_rc_concatenate_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_rc_concatenate_test
+owner:heyongqiang
+location:pfile:/Users/heyongqiang/Documents/workspace/Hive-3/build/ql/test/data/warehouse/src_rc_concatenate_test
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:3
+totalFileSize:636
+maxFileSize:222
+minFileSize:206
+lastAccessTime:0
+lastUpdateTime:1303856313000
+
+PREHOOK: query: select count(1) from src_rc_concatenate_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_concatenate_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-18-34_169_7540361530581615138/-mr-10000
+POSTHOOK: query: select count(1) from src_rc_concatenate_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_concatenate_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-18-34_169_7540361530581615138/-mr-10000
+15
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_concatenate_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-18-42_067_46716291029153270/-mr-10000
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_concatenate_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-18-42_067_46716291029153270/-mr-10000
+214	-7678496319
+PREHOOK: query: create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+PREHOOK: type: CREATEINDEX
+POSTHOOK: query: create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+POSTHOOK: type: CREATEINDEX
+PREHOOK: query: show indexes on src_rc_concatenate_test
+PREHOOK: type: SHOWINDEXES
+POSTHOOK: query: show indexes on src_rc_concatenate_test
+POSTHOOK: type: SHOWINDEXES
+src_rc_concatenate_test_index	src_rc_concatenate_test	key                 	default__src_rc_concatenate_test_src_rc_concatenate_test_index__	compact             	
+FAILED: Error in semantic analysis: org.apache.hadoop.hive.ql.parse.SemanticException: can not do merge because source table src_rc_concatenate_test is indexed.

Added: hive/trunk/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out?rev=1097563&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/alter_concatenate_indexed_table.q.out Thu Apr 28 17:52:10 2011
@@ -0,0 +1,243 @@
+PREHOOK: query: create table src_rc_concatenate_test(key int, value string) stored as rcfile
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table src_rc_concatenate_test(key int, value string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@src_rc_concatenate_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_concatenate_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_concatenate_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_concatenate_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_concatenate_test
+PREHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_concatenate_test
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_concatenate_test
+PREHOOK: query: show table extended like `src_rc_concatenate_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_rc_concatenate_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_rc_concatenate_test
+owner:heyongqiang
+location:pfile:/Users/heyongqiang/Documents/workspace/Hive-3/build/ql/test/data/warehouse/src_rc_concatenate_test
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:3
+totalFileSize:636
+maxFileSize:222
+minFileSize:206
+lastAccessTime:0
+lastUpdateTime:1303856093000
+
+PREHOOK: query: select count(1) from src_rc_concatenate_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_concatenate_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-14-54_116_5056538740056177435/-mr-10000
+POSTHOOK: query: select count(1) from src_rc_concatenate_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_concatenate_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-14-54_116_5056538740056177435/-mr-10000
+15
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_concatenate_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-01_971_2647514620376325946/-mr-10000
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_concatenate_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-01_971_2647514620376325946/-mr-10000
+214	-7678496319
+PREHOOK: query: create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+PREHOOK: type: CREATEINDEX
+POSTHOOK: query: create index src_rc_concatenate_test_index on table src_rc_concatenate_test(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+POSTHOOK: type: CREATEINDEX
+PREHOOK: query: show indexes on src_rc_concatenate_test
+PREHOOK: type: SHOWINDEXES
+POSTHOOK: query: show indexes on src_rc_concatenate_test
+POSTHOOK: type: SHOWINDEXES
+src_rc_concatenate_test_index	src_rc_concatenate_test	key                 	default__src_rc_concatenate_test_src_rc_concatenate_test_index__	compact             	
+PREHOOK: query: alter table src_rc_concatenate_test concatenate
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@src_rc_concatenate_test
+PREHOOK: Output: default@src_rc_concatenate_test
+POSTHOOK: query: alter table src_rc_concatenate_test concatenate
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@src_rc_concatenate_test
+POSTHOOK: Output: default@src_rc_concatenate_test
+PREHOOK: query: show table extended like `src_rc_concatenate_test`
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_rc_concatenate_test`
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_rc_concatenate_test
+owner:heyongqiang
+location:pfile:/Users/heyongqiang/Documents/workspace/Hive-3/build/ql/test/data/warehouse/src_rc_concatenate_test
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:false
+partitionColumns:
+totalNumberFiles:1
+totalFileSize:334
+maxFileSize:334
+minFileSize:334
+lastAccessTime:0
+lastUpdateTime:1303856111000
+
+PREHOOK: query: select count(1) from src_rc_concatenate_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_concatenate_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-12_232_1765943327509437357/-mr-10000
+POSTHOOK: query: select count(1) from src_rc_concatenate_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_concatenate_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-12_232_1765943327509437357/-mr-10000
+15
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_concatenate_test
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-18_738_6289595467011155110/-mr-10000
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_concatenate_test
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-18_738_6289595467011155110/-mr-10000
+214	-7678496319
+PREHOOK: query: drop index src_rc_concatenate_test_index on src_rc_concatenate_test
+PREHOOK: type: DROPINDEX
+POSTHOOK: query: drop index src_rc_concatenate_test_index on src_rc_concatenate_test
+POSTHOOK: type: DROPINDEX
+PREHOOK: query: create table src_rc_concatenate_test_part(key int, value string) partitioned by (ds string) stored as rcfile
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table src_rc_concatenate_test_part(key int, value string) partitioned by (ds string) stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@src_rc_concatenate_test_part
+PREHOOK: query: alter table src_rc_concatenate_test_part add partition (ds='2011')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Input: default@src_rc_concatenate_test_part
+POSTHOOK: query: alter table src_rc_concatenate_test_part add partition (ds='2011')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Input: default@src_rc_concatenate_test_part
+POSTHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
+PREHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test_part partition (ds='2011')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test_part partition (ds='2011')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
+PREHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test_part partition (ds='2011')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test_part partition (ds='2011')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
+PREHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test_part partition (ds='2011')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
+POSTHOOK: query: load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test_part partition (ds='2011')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
+PREHOOK: query: show table extended like `src_rc_concatenate_test_part` partition (ds='2011')
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_rc_concatenate_test_part` partition (ds='2011')
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_rc_concatenate_test_part
+owner:heyongqiang
+location:pfile:/Users/heyongqiang/Documents/workspace/Hive-3/build/ql/test/data/warehouse/src_rc_concatenate_test_part/ds=2011
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:true
+partitionColumns:struct partition_columns { string ds}
+totalNumberFiles:3
+totalFileSize:636
+maxFileSize:222
+minFileSize:206
+lastAccessTime:0
+lastUpdateTime:1303856128000
+
+PREHOOK: query: select count(1) from src_rc_concatenate_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-28_709_6508379632697292274/-mr-10000
+POSTHOOK: query: select count(1) from src_rc_concatenate_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-28_709_6508379632697292274/-mr-10000
+15
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-35_496_4356530739014843487/-mr-10000
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-35_496_4356530739014843487/-mr-10000
+214	-7678496319
+PREHOOK: query: create index src_rc_concatenate_test_part_index on table src_rc_concatenate_test_part(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+PREHOOK: type: CREATEINDEX
+POSTHOOK: query: create index src_rc_concatenate_test_part_index on table src_rc_concatenate_test_part(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2")
+POSTHOOK: type: CREATEINDEX
+PREHOOK: query: show indexes on src_rc_concatenate_test_part
+PREHOOK: type: SHOWINDEXES
+POSTHOOK: query: show indexes on src_rc_concatenate_test_part
+POSTHOOK: type: SHOWINDEXES
+src_rc_concatenate_test_part_index	src_rc_concatenate_test_part	key                 	default__src_rc_concatenate_test_part_src_rc_concatenate_test_part_index__	compact             	
+PREHOOK: query: alter table src_rc_concatenate_test_part partition (ds='2011') concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@src_rc_concatenate_test_part
+PREHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
+POSTHOOK: query: alter table src_rc_concatenate_test_part partition (ds='2011') concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@src_rc_concatenate_test_part
+POSTHOOK: Output: default@src_rc_concatenate_test_part@ds=2011
+PREHOOK: query: show table extended like `src_rc_concatenate_test_part` partition (ds='2011')
+PREHOOK: type: SHOW_TABLESTATUS
+POSTHOOK: query: show table extended like `src_rc_concatenate_test_part` partition (ds='2011')
+POSTHOOK: type: SHOW_TABLESTATUS
+tableName:src_rc_concatenate_test_part
+owner:heyongqiang
+location:pfile:/Users/heyongqiang/Documents/workspace/Hive-3/build/ql/test/data/warehouse/src_rc_concatenate_test_part/ds=2011
+inputformat:org.apache.hadoop.hive.ql.io.RCFileInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+columns:struct columns { i32 key, string value}
+partitioned:true
+partitionColumns:struct partition_columns { string ds}
+totalNumberFiles:1
+totalFileSize:334
+maxFileSize:334
+minFileSize:334
+lastAccessTime:0
+lastUpdateTime:1303856144000
+
+PREHOOK: query: select count(1) from src_rc_concatenate_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-45_308_2380163877365308340/-mr-10000
+POSTHOOK: query: select count(1) from src_rc_concatenate_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-45_308_2380163877365308340/-mr-10000
+15
+PREHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
+PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-51_822_7148108227690658912/-mr-10000
+POSTHOOK: query: select sum(hash(key)), sum(hash(value)) from src_rc_concatenate_test_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_rc_concatenate_test_part@ds=2011
+POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2011-04-26_15-15-51_822_7148108227690658912/-mr-10000
+214	-7678496319
+PREHOOK: query: drop index src_rc_concatenate_test_part_index on src_rc_concatenate_test_part
+PREHOOK: type: DROPINDEX
+POSTHOOK: query: drop index src_rc_concatenate_test_part_index on src_rc_concatenate_test_part
+POSTHOOK: type: DROPINDEX