You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by he...@apache.org on 2010/02/26 04:19:19 UTC

svn commit: r916550 - in /hadoop/hive/trunk: ./ common/src/java/org/apache/hadoop/hive/conf/ conf/ ql/src/java/org/apache/hadoop/hive/ql/parse/ ql/src/test/queries/clientpositive/ ql/src/test/results/clientpositive/

Author: heyongqiang
Date: Fri Feb 26 03:19:18 2010
New Revision: 916550

URL: http://svn.apache.org/viewvc?rev=916550&view=rev
Log:
HIVE-1193. ensure sorting properties for a table.(Namit via He Yongqiang).

Added:
    hadoop/hive/trunk/ql/src/test/queries/clientpositive/bucket4.q
    hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket4.q.out
Modified:
    hadoop/hive/trunk/CHANGES.txt
    hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hadoop/hive/trunk/conf/hive-default.xml
    hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket1.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket2.q.out
    hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket3.q.out

Modified: hadoop/hive/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/CHANGES.txt?rev=916550&r1=916549&r2=916550&view=diff
==============================================================================
--- hadoop/hive/trunk/CHANGES.txt (original)
+++ hadoop/hive/trunk/CHANGES.txt Fri Feb 26 03:19:18 2010
@@ -36,6 +36,9 @@
     HIVE-1032. Better Error Messages for Execution Errors.
     (Paul Yang via zshao)
 
+    HIVE-1193. ensure sorting properties for a table.
+    (Namit via He Yongqiang)
+
   IMPROVEMENTS
     HIVE-983. Function from_unixtime takes long.
     (Ning Zhang via zshao)

Modified: hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=916550&r1=916549&r2=916550&view=diff
==============================================================================
--- hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hadoop/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Fri Feb 26 03:19:18 2010
@@ -192,6 +192,7 @@
     HIVEINPUTFORMAT("hive.input.format", ""),
 
     HIVEENFORCEBUCKETING("hive.enforce.bucketing", false),
+    HIVEENFORCESORTING("hive.enforce.sorting", false),
     HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner"),
 
     // Optimizer

Modified: hadoop/hive/trunk/conf/hive-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/conf/hive-default.xml?rev=916550&r1=916549&r2=916550&view=diff
==============================================================================
--- hadoop/hive/trunk/conf/hive-default.xml (original)
+++ hadoop/hive/trunk/conf/hive-default.xml Fri Feb 26 03:19:18 2010
@@ -479,4 +479,10 @@
   <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced. </description>
 </property>
 
+<property>
+  <name>hive.enforce.sorting</name>
+  <value>false</value>
+  <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced. </description>
+</property>
+
 </configuration>

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=916550&r1=916549&r2=916550&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Fri Feb 26 03:19:18 2010
@@ -2964,12 +2964,149 @@
   private int getReducersBucketing(int totalFiles, int maxReducers) {
     int numFiles = totalFiles/maxReducers;
     while (true) {
-      if (totalFiles%numFiles == 0)
+      if (totalFiles%numFiles == 0) {
         return totalFiles/numFiles;
+      }
       numFiles++;
     }
   }
 
+  private static class SortBucketRSCtx {
+    ArrayList<ExprNodeDesc> partnCols;
+    boolean multiFileSpray;
+    int     numFiles;
+    int     totalFiles;
+
+    public SortBucketRSCtx() {
+      partnCols = null;
+      multiFileSpray = false;
+      numFiles = 1;
+      totalFiles = 1;
+    }
+
+    /**
+     * @return the partnCols
+     */
+    public ArrayList<ExprNodeDesc> getPartnCols() {
+      return partnCols;
+    }
+
+    /**
+     * @param partnCols the partnCols to set
+     */
+    public void setPartnCols(ArrayList<ExprNodeDesc> partnCols) {
+      this.partnCols = partnCols;
+    }
+
+    /**
+     * @return the multiFileSpray
+     */
+    public boolean isMultiFileSpray() {
+      return multiFileSpray;
+    }
+
+    /**
+     * @param multiFileSpray the multiFileSpray to set
+     */
+    public void setMultiFileSpray(boolean multiFileSpray) {
+      this.multiFileSpray = multiFileSpray;
+    }
+
+    /**
+     * @return the numFiles
+     */
+    public int getNumFiles() {
+      return numFiles;
+    }
+
+    /**
+     * @param numFiles the numFiles to set
+     */
+    public void setNumFiles(int numFiles) {
+      this.numFiles = numFiles;
+    }
+
+    /**
+     * @return the totalFiles
+     */
+    public int getTotalFiles() {
+      return totalFiles;
+    }
+
+    /**
+     * @param totalFiles the totalFiles to set
+     */
+    public void setTotalFiles(int totalFiles) {
+      this.totalFiles = totalFiles;
+    }
+  }
+
+  @SuppressWarnings("nls")
+  private Operator genBucketingSortingDest(String dest, Operator input, QB qb, TableDesc table_desc,
+                                           Table dest_tab, SortBucketRSCtx ctx)
+      throws SemanticException {
+
+    // If the table is bucketed, and bucketing is enforced, do the following:
+    // If the number of buckets is smaller than the number of maximum reducers,
+    // create those many reducers.
+    // If not, create a multiFileSink instead of FileSink - the multiFileSink will
+    // spray the data into multiple buckets. That way, we can support a very large
+    // number of buckets without needing a very large number of reducers.
+    boolean enforceBucketing = false;
+    boolean enforceSorting   = false;
+    ArrayList<ExprNodeDesc> partnCols = new ArrayList<ExprNodeDesc>();
+    ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
+    ArrayList<ExprNodeDesc> sortCols  = new ArrayList<ExprNodeDesc>();
+    boolean multiFileSpray = false;
+    int     numFiles = 1;
+    int     totalFiles = 1;
+
+    if ((dest_tab.getNumBuckets() > 0) &&
+        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
+      enforceBucketing = true;
+      partnCols = getParitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input, true);
+      partnColsNoConvert = getParitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input, false);
+    }
+
+    if ((dest_tab.getSortCols() != null) &&
+        (dest_tab.getSortCols().size() > 0) &&
+        (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING))) {
+      enforceSorting = true;
+      sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
+      if (!enforceBucketing) {
+        partnCols = sortCols;
+        partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
+      }
+    }
+
+    if (enforceBucketing || enforceSorting) {
+      int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
+      int numBuckets  = dest_tab.getNumBuckets();
+      if (numBuckets > maxReducers) {
+        multiFileSpray = true;
+        totalFiles = numBuckets;
+        if (totalFiles % maxReducers == 0) {
+          numFiles = totalFiles / maxReducers;
+        }
+        else {
+          // find the number of reducers such that it is a divisor of totalFiles
+          maxReducers = getReducersBucketing(totalFiles, maxReducers);
+          numFiles = totalFiles/maxReducers;
+        }
+      }
+      else {
+        maxReducers = numBuckets;
+      }
+
+      input = genReduceSinkPlanForSortingBucketing(dest_tab, input, sortCols, partnCols, maxReducers);
+      ctx.setMultiFileSpray(multiFileSpray);
+      ctx.setNumFiles(numFiles);
+      ctx.setPartnCols(partnColsNoConvert);
+      ctx.setTotalFiles(totalFiles);
+    }
+    return input;
+  }
+
   @SuppressWarnings("nls")
   private Operator genFileSinkPlan(String dest, QB qb, Operator input)
       throws SemanticException {
@@ -2984,47 +3121,13 @@
     TableDesc table_desc = null;
     int currentTableId = 0;
     boolean isLocal = false;
-    boolean multiFileSpray = false;
-    int numFiles = 1;
-    int totalFiles = 1;
-    ArrayList<ExprNodeDesc> partnCols = null;
+    SortBucketRSCtx rsCtx = new SortBucketRSCtx();
 
     switch (dest_type.intValue()) {
     case QBMetaData.DEST_TABLE: {
 
       dest_tab = qbm.getDestTableForAlias(dest);
 
-      // If the table is bucketed, and bucketing is enforced, do the following:
-      // If the number of buckets is smaller than the number of maximum reducers,
-      // create those many reducers.
-      // If not, create a multiFileSink instead of FileSink - the multiFileSink will
-      // spray the data into multiple buckets. That way, we can support a very large
-      // number of buckets without needing a very large number of reducers.
-
-      if ((dest_tab.getNumBuckets() > 0) &&
-          (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
-        int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
-        int numBuckets  = dest_tab.getNumBuckets();
-        if (numBuckets > maxReducers) {
-          multiFileSpray = true;
-          totalFiles = numBuckets;
-          if (totalFiles % maxReducers == 0) {
-            numFiles = totalFiles / maxReducers;
-          }
-          else {
-            // find the number of reducers such that it is a divisor of totalFiles
-            maxReducers = getReducersBucketing(totalFiles, maxReducers);
-            numFiles = totalFiles/maxReducers;
-          }
-        }
-        else {
-          maxReducers = numBuckets;
-        }
-
-        partnCols = getParitionColsFromBucketCols(dest_tab, input);
-        input = genReduceSinkPlanForBucketing(dest_tab, input, partnCols, maxReducers);
-      }
-
       // check for partition
       List<FieldSchema> parts = dest_tab.getPartitionKeys();
       if (parts != null && parts.size() > 0) {
@@ -3034,6 +3137,9 @@
       queryTmpdir = ctx.getExternalTmpFileURI(dest_path.toUri());
       table_desc = Utilities.getTableDesc(dest_tab);
 
+      // Add sorting/bucketing if needed
+      input = genBucketingSortingDest(dest, input, qb, table_desc, dest_tab, rsCtx);
+
       idToTableNameMap.put(String.valueOf(destTableId), dest_tab.getTableName());
       currentTableId = destTableId;
       destTableId++;
@@ -3053,34 +3159,13 @@
       Partition dest_part = qbm.getDestPartitionForAlias(dest);
       dest_tab = dest_part.getTable();
 
-      if ((dest_tab.getNumBuckets() > 0) &&
-          (conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETING))) {
-        int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);
-        int numBuckets  = dest_tab.getNumBuckets();
-        if (numBuckets > maxReducers) {
-          multiFileSpray = true;
-          totalFiles = numBuckets;
-          if (totalFiles % maxReducers == 0) {
-            numFiles = totalFiles / maxReducers;
-          }
-          else {
-            // find the number of reducers such that it is a divisor of totalFiles
-            maxReducers = getReducersBucketing(totalFiles, maxReducers);
-            numFiles = totalFiles/maxReducers;
-          }
-        }
-        else {
-          maxReducers = numBuckets;
-        }
-
-        partnCols = getParitionColsFromBucketCols(dest_tab, input);
-        input = genReduceSinkPlanForBucketing(dest_tab, input, partnCols, maxReducers);
-      }
-
       dest_path = dest_part.getPath()[0];
       queryTmpdir = ctx.getExternalTmpFileURI(dest_path.toUri());
       table_desc = Utilities.getTableDesc(dest_tab);
 
+      // Add sorting/bucketing if needed
+      input = genBucketingSortingDest(dest, input, qb, table_desc, dest_tab, rsCtx);
+
       idToTableNameMap.put(String.valueOf(destTableId), dest_tab.getTableName());
       currentTableId = destTableId;
       destTableId++;
@@ -3232,7 +3317,7 @@
     Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(
         new FileSinkDesc(queryTmpdir, table_desc, conf
         .getBoolVar(HiveConf.ConfVars.COMPRESSRESULT), currentTableId,
-        multiFileSpray, numFiles, totalFiles, partnCols),
+        rsCtx.isMultiFileSpray(), rsCtx.getNumFiles(), rsCtx.getTotalFiles(), rsCtx.getPartnCols()),
         fsRS, input), inputRR);
 
     LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: "
@@ -3463,38 +3548,109 @@
     return genLimitPlan(dest, qb, curr, limit);
   }
 
-  private ArrayList<ExprNodeDesc> getParitionColsFromBucketCols(Table tab, Operator input) {
+  private ArrayList<ExprNodeDesc> getParitionColsFromBucketCols(String dest, QB qb, Table tab,
+                                                                TableDesc table_desc, Operator input, boolean convert)
+    throws SemanticException {
     RowResolver inputRR = opParseCtx.get(input).getRR();
     List<String> tabBucketCols = tab.getBucketCols();
     List<FieldSchema> tabCols  = tab.getCols();
 
     // Partition by the bucketing column
-    ArrayList<ExprNodeDesc> partitionCols = new ArrayList<ExprNodeDesc>();
+    List<Integer> posns = new ArrayList<Integer>();
+
     for (String bucketCol : tabBucketCols) {
       int pos = 0;
       for (FieldSchema tabCol : tabCols) {
         if (bucketCol.equals(tabCol.getName())) {
-          ColumnInfo colInfo = inputRR.getColumnInfos().get(pos);
-          partitionCols.add(new ExprNodeColumnDesc(colInfo.getType(), colInfo
-                                                   .getInternalName(), colInfo.getTabAlias(), colInfo
-                                                   .getIsPartitionCol()));
+          posns.add(pos);
+          break;
+        }
+        pos++;
+      }
+    }
+
+    return genConvertCol(dest, qb, tab, table_desc, input, posns, convert);
+  }
+
+  private ArrayList<ExprNodeDesc> genConvertCol(String dest, QB qb, Table tab, TableDesc table_desc, Operator input,
+                                                List<Integer> posns, boolean convert) throws SemanticException {
+    StructObjectInspector oi = null;
+    try {
+      Deserializer deserializer = table_desc.getDeserializerClass()
+          .newInstance();
+      deserializer.initialize(conf, table_desc.getProperties());
+      oi = (StructObjectInspector) deserializer.getObjectInspector();
+    } catch (Exception e) {
+      throw new SemanticException(e);
+    }
 
+    List<? extends StructField> tableFields = oi.getAllStructFieldRefs();
+    ArrayList<ColumnInfo> rowFields = opParseCtx.get(input).getRR()
+        .getColumnInfos();
+
+    // Check column type
+    int columnNumber = posns.size();
+    ArrayList<ExprNodeDesc> expressions = new ArrayList<ExprNodeDesc>(columnNumber);
+    for (Integer posn: posns) {
+      ObjectInspector tableFieldOI = tableFields.get(posn).getFieldObjectInspector();
+      TypeInfo tableFieldTypeInfo = TypeInfoUtils.getTypeInfoFromObjectInspector(tableFieldOI);
+      TypeInfo rowFieldTypeInfo = rowFields.get(posn).getType();
+      ExprNodeDesc column = new ExprNodeColumnDesc(rowFieldTypeInfo, rowFields.get(posn).getInternalName(),
+                                                   rowFields.get(posn).getTabAlias(), rowFields.get(posn).getIsPartitionCol());
+
+      if (convert && !tableFieldTypeInfo.equals(rowFieldTypeInfo)) {
+        // need to do some conversions here
+        if (tableFieldTypeInfo.getCategory() != Category.PRIMITIVE) {
+          // cannot convert to complex types
+          column = null;
+        } else {
+          column = TypeCheckProcFactory.DefaultExprProcessor
+            .getFuncExprNodeDesc(tableFieldTypeInfo.getTypeName(), column);
+        }
+        if (column == null) {
+          String reason = "Cannot convert column " + posn + " from "
+            + rowFieldTypeInfo + " to " + tableFieldTypeInfo + ".";
+          throw new SemanticException(ErrorMsg.TARGET_TABLE_COLUMN_MISMATCH
+                                      .getMsg(qb.getParseInfo().getDestForClause(dest), reason));
+        }
+      }
+      expressions.add(column);
+    }
+
+    return expressions;
+  }
+
+  private ArrayList<ExprNodeDesc> getSortCols(String dest, QB qb, Table tab, TableDesc table_desc, Operator input, boolean convert)
+    throws SemanticException {
+    RowResolver inputRR = opParseCtx.get(input).getRR();
+    List<Order> tabSortCols = tab.getSortCols();
+    List<FieldSchema> tabCols  = tab.getCols();
+
+    // Partition by the bucketing column
+    List<Integer> posns = new ArrayList<Integer>();
+    for (Order sortCol : tabSortCols) {
+      int pos = 0;
+      for (FieldSchema tabCol : tabCols) {
+        if (sortCol.getCol().equals(tabCol.getName())) {
+          ColumnInfo colInfo = inputRR.getColumnInfos().get(pos);
+          posns.add(pos);
           break;
         }
         pos++;
       }
     }
-    return partitionCols;
+
+    return genConvertCol(dest, qb, tab, table_desc, input, posns, convert);
   }
 
   @SuppressWarnings("nls")
-  private Operator genReduceSinkPlanForBucketing(Table tab, Operator input, ArrayList<ExprNodeDesc> partitionCols,
-      int numReducers)
+  private Operator genReduceSinkPlanForSortingBucketing(Table tab, Operator input,
+                                                        ArrayList<ExprNodeDesc> sortCols,
+                                                        ArrayList<ExprNodeDesc> partitionCols,
+                                                        int numReducers)
     throws SemanticException {
     RowResolver inputRR = opParseCtx.get(input).getRR();
 
-    ArrayList<ExprNodeDesc> sortCols = new ArrayList<ExprNodeDesc>();
-
     // For the generation of the values expression just get the inputs
     // signature and generate field expressions for those
     Map<String, ExprNodeDesc> colExprMap = new HashMap<String, ExprNodeDesc>();
@@ -3511,9 +3667,15 @@
     for (int i = 0; i < valueCols.size(); i++) {
       outputColumns.add(getColumnInternalName(i));
     }
+
+    StringBuilder order = new StringBuilder();
+    for (int i = 0; i < sortCols.size(); i++) {
+      order.append("+");
+    }
+
     Operator interim = putOpInsertMap(OperatorFactory.getAndMakeChild(PlanUtils
         .getReduceSinkDesc(sortCols, valueCols, outputColumns, false, -1,
-                           partitionCols, new String(), numReducers),
+                           partitionCols, order.toString(), numReducers),
         new RowSchema(inputRR.getColumnInfos()), input), inputRR);
     interim.setColumnExprMap(colExprMap);
 

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/bucket4.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/bucket4.q?rev=916550&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/bucket4.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/bucket4.q Fri Feb 26 03:19:18 2010
@@ -0,0 +1,20 @@
+set hive.enforce.bucketing = true;
+set hive.enforce.sorting = true;
+set hive.exec.reducers.max = 1;
+
+drop table bucket4_1;
+CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
+
+explain extended
+insert overwrite table bucket4_1
+select * from src;
+
+insert overwrite table bucket4_1
+select * from src;
+
+explain
+select * from bucket4_1 tablesample (bucket 1 out of 2) s;
+
+select * from bucket4_1 tablesample (bucket 1 out of 2) s;
+
+drop table bucket4_1;

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket1.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket1.q.out?rev=916550&r1=916549&r2=916550&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket1.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket1.q.out Fri Feb 26 03:19:18 2010
@@ -39,8 +39,8 @@
               Reduce Output Operator
                 sort order: 
                 Map-reduce partition columns:
-                      expr: _col0
-                      type: string
+                      expr: UDFToInteger(_col0)
+                      type: int
                 tag: -1
                 value expressions:
                       expr: _col0
@@ -67,7 +67,7 @@
               serialization.ddl struct src { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              transient_lastDdlTime 1266535134
+              transient_lastDdlTime 1267129667
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -83,7 +83,7 @@
                 serialization.ddl struct src { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1266535134
+                transient_lastDdlTime 1267129667
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: src
             name: src
@@ -99,7 +99,7 @@
             File Output Operator
               compressed: false
               GlobalTableId: 1
-              directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-18-54_872_8904445139398831015/10000
+              directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_12-27-48_091_9013372046421498540/10000
               NumFilesPerFileSink: 1
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
@@ -116,7 +116,7 @@
                     serialization.ddl struct bucket1_1 { i32 key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    transient_lastDdlTime 1266535134
+                    transient_lastDdlTime 1267129668
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: bucket1_1
               TotalFiles: 1
@@ -126,7 +126,7 @@
     Move Operator
       tables:
           replace: true
-          source: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-18-54_872_8904445139398831015/10000
+          source: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_12-27-48_091_9013372046421498540/10000
           table:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -142,10 +142,10 @@
                 serialization.ddl struct bucket1_1 { i32 key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1266535134
+                transient_lastDdlTime 1267129668
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucket1_1
-          tmp directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-18-54_872_8904445139398831015/10001
+          tmp directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_12-27-48_091_9013372046421498540/10001
 
 
 PREHOOK: query: insert overwrite table bucket1_1
@@ -161,11 +161,11 @@
 PREHOOK: query: select * from bucket1_1 order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucket1_1
-PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-18-59_053_2682513110201565086/10000
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_12-27-52_282_2132999711625321779/10000
 POSTHOOK: query: select * from bucket1_1 order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucket1_1
-POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-18-59_053_2682513110201565086/10000
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_12-27-52_282_2132999711625321779/10000
 0	val_0
 0	val_0
 0	val_0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket2.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket2.q.out?rev=916550&r1=916549&r2=916550&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket2.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket2.q.out Fri Feb 26 03:19:18 2010
@@ -39,8 +39,8 @@
               Reduce Output Operator
                 sort order: 
                 Map-reduce partition columns:
-                      expr: _col0
-                      type: string
+                      expr: UDFToInteger(_col0)
+                      type: int
                 tag: -1
                 value expressions:
                       expr: _col0
@@ -67,7 +67,7 @@
               serialization.ddl struct src { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              transient_lastDdlTime 1266535991
+              transient_lastDdlTime 1267133685
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -83,7 +83,7 @@
                 serialization.ddl struct src { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1266535991
+                transient_lastDdlTime 1267133685
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: src
             name: src
@@ -99,7 +99,7 @@
             File Output Operator
               compressed: false
               GlobalTableId: 1
-              directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-33-12_469_4081195671679736172/10000
+              directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-34-46_333_5166554257056855352/10000
               NumFilesPerFileSink: 2
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
@@ -116,7 +116,7 @@
                     serialization.ddl struct bucket2_1 { i32 key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    transient_lastDdlTime 1266535992
+                    transient_lastDdlTime 1267133686
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: bucket2_1
               TotalFiles: 2
@@ -126,7 +126,7 @@
     Move Operator
       tables:
           replace: true
-          source: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-33-12_469_4081195671679736172/10000
+          source: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-34-46_333_5166554257056855352/10000
           table:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -142,10 +142,10 @@
                 serialization.ddl struct bucket2_1 { i32 key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1266535992
+                transient_lastDdlTime 1267133686
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucket2_1
-          tmp directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-33-12_469_4081195671679736172/10001
+          tmp directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-34-46_333_5166554257056855352/10001
 
 
 PREHOOK: query: insert overwrite table bucket2_1
@@ -221,11 +221,11 @@
 PREHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucket2_1
-PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-33-17_502_8138633127834770024/10000
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-34-51_044_2605954984360069823/10000
 POSTHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucket2_1
-POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-33-17_502_8138633127834770024/10000
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-34-51_044_2605954984360069823/10000
 0	val_0
 0	val_0
 0	val_0

Modified: hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket3.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket3.q.out?rev=916550&r1=916549&r2=916550&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket3.q.out (original)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket3.q.out Fri Feb 26 03:19:18 2010
@@ -39,8 +39,8 @@
               Reduce Output Operator
                 sort order: 
                 Map-reduce partition columns:
-                      expr: _col0
-                      type: string
+                      expr: UDFToInteger(_col0)
+                      type: int
                 tag: -1
                 value expressions:
                       expr: _col0
@@ -67,7 +67,7 @@
               serialization.ddl struct src { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              transient_lastDdlTime 1266536485
+              transient_lastDdlTime 1267133766
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
@@ -83,7 +83,7 @@
                 serialization.ddl struct src { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1266536485
+                transient_lastDdlTime 1267133766
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: src
             name: src
@@ -99,7 +99,7 @@
             File Output Operator
               compressed: false
               GlobalTableId: 1
-              directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-41-26_502_8846263313357969996/10000
+              directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-36-07_876_1324315962912856358/10000
               NumFilesPerFileSink: 2
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
@@ -117,7 +117,7 @@
                     serialization.ddl struct bucket3_1 { i32 key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    transient_lastDdlTime 1266536486
+                    transient_lastDdlTime 1267133767
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: bucket3_1
               TotalFiles: 2
@@ -129,7 +129,7 @@
           partition:
             ds 1
           replace: true
-          source: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-41-26_502_8846263313357969996/10000
+          source: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-36-07_876_1324315962912856358/10000
           table:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -146,10 +146,10 @@
                 serialization.ddl struct bucket3_1 { i32 key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                transient_lastDdlTime 1266536486
+                transient_lastDdlTime 1267133767
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: bucket3_1
-          tmp directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-41-26_502_8846263313357969996/10001
+          tmp directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-36-07_876_1324315962912856358/10001
 
 
 PREHOOK: query: insert overwrite table bucket3_1 partition (ds='1')
@@ -243,11 +243,11 @@
 PREHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucket3_1@ds=1
-PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-41-35_350_1043920384757897875/10000
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-36-16_445_4212015408358894345/10000
 POSTHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucket3_1@ds=1
-POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-18_15-41-35_350_1043920384757897875/10000
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-36-16_445_4212015408358894345/10000
 0	val_0	1
 0	val_0	1
 0	val_0	1

Added: hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket4.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket4.q.out?rev=916550&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket4.q.out (added)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/bucket4.q.out Fri Feb 26 03:19:18 2010
@@ -0,0 +1,472 @@
+PREHOOK: query: drop table bucket4_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table bucket4_1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@bucket4_1
+PREHOOK: query: explain extended
+insert overwrite table bucket4_1
+select * from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucket4_1
+select * from src
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB bucket4_1)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        src 
+          TableScan
+            alias: src
+            Select Operator
+              expressions:
+                    expr: key
+                    type: string
+                    expr: value
+                    type: string
+              outputColumnNames: _col0, _col1
+              Reduce Output Operator
+                key expressions:
+                      expr: UDFToInteger(_col0)
+                      type: int
+                sort order: +
+                Map-reduce partition columns:
+                      expr: UDFToInteger(_col0)
+                      type: int
+                tag: -1
+                value expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+      Needs Tagging: false
+      Path -> Alias:
+        file:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/src [src]
+      Path -> Partition:
+        file:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/src 
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.types string:string
+              file.inputformat org.apache.hadoop.mapred.TextInputFormat
+              file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              location file:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/src
+              name src
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              transient_lastDdlTime 1267133823
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.types string:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location file:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/src
+                name src
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1267133823
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: src
+            name: src
+      Reduce Operator Tree:
+        Extract
+          Select Operator
+            expressions:
+                  expr: UDFToInteger(_col0)
+                  type: int
+                  expr: _col1
+                  type: string
+            outputColumnNames: _col0, _col1
+            File Output Operator
+              compressed: false
+              GlobalTableId: 1
+              directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-37-04_798_2890848395538612897/10000
+              NumFilesPerFileSink: 2
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    SORTBUCKETCOLSPREFIX TRUE
+                    bucket_count 2
+                    bucket_field_name key
+                    columns key,value
+                    columns.types int:string
+                    file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                    file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    location file:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/bucket4_1
+                    name bucket4_1
+                    serialization.ddl struct bucket4_1 { i32 key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    transient_lastDdlTime 1267133824
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: bucket4_1
+              TotalFiles: 2
+              MultiFileSpray: true
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          source: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-37-04_798_2890848395538612897/10000
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                SORTBUCKETCOLSPREFIX TRUE
+                bucket_count 2
+                bucket_field_name key
+                columns key,value
+                columns.types int:string
+                file.inputformat org.apache.hadoop.mapred.TextInputFormat
+                file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                location file:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/bucket4_1
+                name bucket4_1
+                serialization.ddl struct bucket4_1 { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                transient_lastDdlTime 1267133824
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: bucket4_1
+          tmp directory: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-37-04_798_2890848395538612897/10001
+
+
+PREHOOK: query: insert overwrite table bucket4_1
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket4_1
+POSTHOOK: query: insert overwrite table bucket4_1
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket4_1
+PREHOOK: query: explain
+select * from bucket4_1 tablesample (bucket 1 out of 2) s
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from bucket4_1 tablesample (bucket 1 out of 2) s
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_TABREF bucket4_1 (TOK_TABLESAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        s 
+          TableScan
+            alias: s
+            Filter Operator
+              predicate:
+                  expr: (((hash(key) & 2147483647) % 2) = 0)
+                  type: boolean
+              Filter Operator
+                predicate:
+                    expr: (((hash(key) & 2147483647) % 2) = 0)
+                    type: boolean
+                Select Operator
+                  expressions:
+                        expr: key
+                        type: int
+                        expr: value
+                        type: string
+                  outputColumnNames: _col0, _col1
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: select * from bucket4_1 tablesample (bucket 1 out of 2) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket4_1
+PREHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-37-09_385_8660032017511325986/10000
+POSTHOOK: query: select * from bucket4_1 tablesample (bucket 1 out of 2) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket4_1
+POSTHOOK: Output: file:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-02-25_13-37-09_385_8660032017511325986/10000
+0	val_0
+0	val_0
+0	val_0
+2	val_2
+4	val_4
+8	val_8
+10	val_10
+12	val_12
+12	val_12
+18	val_18
+18	val_18
+20	val_20
+24	val_24
+24	val_24
+26	val_26
+26	val_26
+28	val_28
+30	val_30
+34	val_34
+42	val_42
+42	val_42
+44	val_44
+54	val_54
+58	val_58
+58	val_58
+64	val_64
+66	val_66
+70	val_70
+70	val_70
+70	val_70
+72	val_72
+72	val_72
+74	val_74
+76	val_76
+76	val_76
+78	val_78
+80	val_80
+82	val_82
+84	val_84
+84	val_84
+86	val_86
+90	val_90
+90	val_90
+90	val_90
+92	val_92
+96	val_96
+98	val_98
+98	val_98
+100	val_100
+100	val_100
+104	val_104
+104	val_104
+114	val_114
+116	val_116
+118	val_118
+118	val_118
+120	val_120
+120	val_120
+126	val_126
+128	val_128
+128	val_128
+128	val_128
+134	val_134
+134	val_134
+136	val_136
+138	val_138
+138	val_138
+138	val_138
+138	val_138
+146	val_146
+146	val_146
+150	val_150
+152	val_152
+152	val_152
+156	val_156
+158	val_158
+160	val_160
+162	val_162
+164	val_164
+164	val_164
+166	val_166
+168	val_168
+170	val_170
+172	val_172
+172	val_172
+174	val_174
+174	val_174
+176	val_176
+176	val_176
+178	val_178
+180	val_180
+186	val_186
+190	val_190
+192	val_192
+194	val_194
+196	val_196
+200	val_200
+200	val_200
+202	val_202
+208	val_208
+208	val_208
+208	val_208
+214	val_214
+216	val_216
+216	val_216
+218	val_218
+222	val_222
+224	val_224
+224	val_224
+226	val_226
+228	val_228
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+230	val_230
+238	val_238
+238	val_238
+242	val_242
+242	val_242
+244	val_244
+248	val_248
+252	val_252
+256	val_256
+256	val_256
+258	val_258
+260	val_260
+262	val_262
+266	val_266
+272	val_272
+272	val_272
+274	val_274
+278	val_278
+278	val_278
+280	val_280
+280	val_280
+282	val_282
+282	val_282
+284	val_284
+286	val_286
+288	val_288
+288	val_288
+292	val_292
+296	val_296
+298	val_298
+298	val_298
+298	val_298
+302	val_302
+306	val_306
+308	val_308
+310	val_310
+316	val_316
+316	val_316
+316	val_316
+318	val_318
+318	val_318
+318	val_318
+322	val_322
+322	val_322
+332	val_332
+336	val_336
+338	val_338
+342	val_342
+342	val_342
+344	val_344
+344	val_344
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+348	val_348
+356	val_356
+360	val_360
+362	val_362
+364	val_364
+366	val_366
+368	val_368
+374	val_374
+378	val_378
+382	val_382
+382	val_382
+384	val_384
+384	val_384
+384	val_384
+386	val_386
+392	val_392
+394	val_394
+396	val_396
+396	val_396
+396	val_396
+400	val_400
+402	val_402
+404	val_404
+404	val_404
+406	val_406
+406	val_406
+406	val_406
+406	val_406
+414	val_414
+414	val_414
+418	val_418
+424	val_424
+424	val_424
+430	val_430
+430	val_430
+430	val_430
+432	val_432
+436	val_436
+438	val_438
+438	val_438
+438	val_438
+444	val_444
+446	val_446
+448	val_448
+452	val_452
+454	val_454
+454	val_454
+454	val_454
+458	val_458
+458	val_458
+460	val_460
+462	val_462
+462	val_462
+466	val_466
+466	val_466
+466	val_466
+468	val_468
+468	val_468
+468	val_468
+468	val_468
+470	val_470
+472	val_472
+478	val_478
+478	val_478
+480	val_480
+480	val_480
+480	val_480
+482	val_482
+484	val_484
+490	val_490
+492	val_492
+492	val_492
+494	val_494
+496	val_496
+498	val_498
+498	val_498
+498	val_498
+PREHOOK: query: drop table bucket4_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table bucket4_1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Output: default@bucket4_1