You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2016/03/08 19:09:44 UTC

[1/2] hive git commit: HIVE-13210: Revert changes in HIVE-12994 related to metastore (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Repository: hive
Updated Branches:
  refs/heads/master 7c4599baa -> cc8893e48


http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java
index 9159120..391cfda 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingOpProcFactory.java
@@ -166,7 +166,6 @@ public class BucketingSortingOpProcFactory {
         }
 
         String sortOrder = rsDesc.getOrder();
-        String nullSortOrder = rsDesc.getNullOrder();
         List<ExprNodeDesc> keyCols = rsDesc.getKeyCols();
         List<ExprNodeDesc> valCols = ExprNodeDescUtils.backtrack(joinValues, jop, parent);
 
@@ -187,8 +186,7 @@ public class BucketingSortingOpProcFactory {
               newSortCols[keyIndex].addAlias(vname, vindex);
             } else {
               newBucketCols[keyIndex] = new BucketCol(vname, vindex);
-              newSortCols[keyIndex] = new SortCol(vname, vindex, sortOrder.charAt(keyIndex),
-                      nullSortOrder.charAt(keyIndex));
+              newSortCols[keyIndex] = new SortCol(vname, vindex, sortOrder.charAt(keyIndex));
             }
           }
         }
@@ -313,8 +311,7 @@ public class BucketingSortingOpProcFactory {
         int sortIndex = indexOfColName(sortCols, columnExpr.getColumn());
         if (sortIndex != -1) {
           if (newSortCols[sortIndex] == null) {
-            newSortCols[sortIndex] = new SortCol(sortCols.get(sortIndex).getSortOrder(),
-                    sortCols.get(sortIndex).getNullSortOrder());
+            newSortCols[sortIndex] = new SortCol(sortCols.get(sortIndex).getSortOrder());
           }
           newSortCols[sortIndex].addAlias(
               colInfos.get(colInfosIndex).getInternalName(), colInfosIndex);
@@ -439,7 +436,7 @@ public class BucketingSortingOpProcFactory {
   private static List<SortCol> getNewSortCols(List<SortCol> sortCols, List<ColumnInfo> colInfos) {
     List<SortCol> newSortCols = new ArrayList<SortCol>(sortCols.size());
     for (int i = 0; i < sortCols.size(); i++) {
-      SortCol sortCol = new SortCol(sortCols.get(i).getSortOrder(), sortCols.get(i).getNullSortOrder());
+      SortCol sortCol = new SortCol(sortCols.get(i).getSortOrder());
       for (Integer index : sortCols.get(i).getIndexes()) {
         // The only time this condition should be false is in the case of dynamic partitioning
         if (index < colInfos.size()) {
@@ -540,7 +537,6 @@ public class BucketingSortingOpProcFactory {
 
   static List<SortCol> extractSortCols(ReduceSinkOperator rop, List<ExprNodeDesc> outputValues) {
     String sortOrder = rop.getConf().getOrder();
-    String nullSortOrder = rop.getConf().getNullOrder();
     List<SortCol> sortCols = new ArrayList<SortCol>();
     ArrayList<ExprNodeDesc> keyCols = rop.getConf().getKeyCols();
     for (int i = 0; i < keyCols.size(); i++) {
@@ -553,7 +549,7 @@ public class BucketingSortingOpProcFactory {
         break;
       }
       sortCols.add(new SortCol(((ExprNodeColumnDesc) keyCol).getColumn(), index,
-              sortOrder.charAt(i), nullSortOrder.charAt(i)));
+              sortOrder.charAt(i)));
     }
     // If the sorted columns can't all be found in the values then the data is only sorted on
     // the columns seen up until now
@@ -654,7 +650,6 @@ public class BucketingSortingOpProcFactory {
 
       GroupByDesc groupByDesc = gop.getConf();
       String sortOrder = rop.getConf().getOrder();
-      String nullSortOrder = rop.getConf().getNullOrder();
       List<BucketCol> bucketCols = new ArrayList<BucketCol>();
       List<SortCol> sortCols = new ArrayList<SortCol>();
       assert rop.getConf().getKeyCols().size() <= rop.getSchema().getSignature().size();
@@ -665,7 +660,7 @@ public class BucketingSortingOpProcFactory {
         }
         String colName = rop.getSchema().getSignature().get(i).getInternalName();
         bucketCols.add(new BucketCol(colName, i));
-        sortCols.add(new SortCol(colName, i, sortOrder.charAt(i), nullSortOrder.charAt(i)));
+        sortCols.add(new SortCol(colName, i, sortOrder.charAt(i)));
       }
       bctx.setBucketedCols(rop, bucketCols);
       bctx.setSortedCols(rop, sortCols);

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index f10a40a..b36a9a0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -653,7 +653,7 @@ public abstract class BaseSemanticAnalyzer {
     return colList;
   }
 
-  protected List<Order> getColumnNamesOrder(ASTNode ast) {
+  protected List<Order> getColumnNamesOrder(ASTNode ast) throws SemanticException {
     List<Order> colList = new ArrayList<Order>();
     int numCh = ast.getChildCount();
     for (int i = 0; i < numCh; i++) {
@@ -662,19 +662,19 @@ public abstract class BaseSemanticAnalyzer {
         child = (ASTNode) child.getChild(0);
         if (child.getToken().getType() == HiveParser.TOK_NULLS_FIRST) {
           colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(),
-              HIVE_COLUMN_ORDER_ASC, HIVE_COLUMN_NULLS_FIRST));
+              HIVE_COLUMN_ORDER_ASC));
         } else {
-          colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(),
-              HIVE_COLUMN_ORDER_ASC, HIVE_COLUMN_NULLS_LAST));
+          throw new SemanticException("create/alter table: "
+                  + "not supported NULLS LAST for ORDER BY in ASC order");
         }
       } else {
         child = (ASTNode) child.getChild(0);
         if (child.getToken().getType() == HiveParser.TOK_NULLS_LAST) {
           colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(),
-              HIVE_COLUMN_ORDER_DESC, HIVE_COLUMN_NULLS_LAST));
+              HIVE_COLUMN_ORDER_DESC));
         } else {
-          colList.add(new Order(unescapeIdentifier(child.getChild(0).getText()).toLowerCase(),
-              HIVE_COLUMN_ORDER_DESC, HIVE_COLUMN_NULLS_FIRST));
+          throw new SemanticException("create/alter table: "
+                  + "not supported NULLS FIRST for ORDER BY in DESC order");
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 3e91e10..9ab091d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -117,7 +117,6 @@ import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.optimizer.ColumnPruner;
 import org.apache.hadoop.hive.ql.optimizer.Optimizer;
 import org.apache.hadoop.hive.ql.optimizer.Transform;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
@@ -6358,7 +6357,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         (dest_tab.getSortCols().size() > 0)) {
       sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
       sortOrders = getSortOrders(dest, qb, dest_tab, input);
-      nullSortOrders = getNullSortOrders(dest, qb, dest_tab, input);
       if (!enforceBucketing && !dest_tab.isIndexTable()) {
         throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
       } else {
@@ -6393,12 +6391,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       }
 
       StringBuilder order = new StringBuilder();
+      StringBuilder nullOrder = new StringBuilder();
       for (int sortOrder : sortOrders) {
         order.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? '+' : '-');
-      }
-      StringBuilder nullOrder = new StringBuilder();
-      for (int pos : nullSortOrders) {
-        nullOrder.append(pos == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST ? 'a' : 'z');
+        nullOrder.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? 'a' : 'z');
       }
       input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), nullOrder.toString(),
               maxReducers, (AcidUtils.isAcidTable(dest_tab) ? getAcidType() : AcidUtils.Operation.NOT_ACID));
@@ -7362,23 +7358,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     return orders;
   }
 
-  private ArrayList<Integer> getNullSortOrders(String dest, QB qb, Table tab, Operator input)
-      throws SemanticException {
-    List<Order> tabSortCols = tab.getSortCols();
-    List<FieldSchema> tabCols = tab.getCols();
-
-    ArrayList<Integer> orders = new ArrayList<Integer>();
-    for (Order sortCol : tabSortCols) {
-      for (FieldSchema tabCol : tabCols) {
-        if (sortCol.getCol().equals(tabCol.getName())) {
-          orders.add(sortCol.getNullOrder());
-          break;
-        }
-      }
-    }
-    return orders;
-  }
-
   private Operator genReduceSinkPlan(String dest, QB qb, Operator<?> input,
       int numReducers, boolean hasOrderBy) throws SemanticException {
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
index 7f6430f..cf7eb70 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
@@ -224,7 +224,7 @@ public class TestWorker extends CompactorTest {
   @Test
   public void sortedTable() throws Exception {
     List<Order> sortCols = new ArrayList<Order>(1);
-    sortCols.add(new Order("b", 1, 0));
+    sortCols.add(new Order("b", 1));
 
     Table t = newTable("default", "st", false, new HashMap<String, String>(), sortCols, false);
 
@@ -249,7 +249,7 @@ public class TestWorker extends CompactorTest {
   @Test
   public void sortedPartition() throws Exception {
     List<Order> sortCols = new ArrayList<Order>(1);
-    sortCols.add(new Order("b", 1, 0));
+    sortCols.add(new Order("b", 1));
 
     Table t = newTable("default", "sp", true, new HashMap<String, String>(), sortCols, false);
     Partition p = newPartition(t, "today", sortCols);

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
index 9eea7f7..3b71598 100644
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
+++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
@@ -232,7 +232,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	8                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:key, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
@@ -281,7 +281,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	8                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:key, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test changing the bucket columns
@@ -326,7 +326,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	8                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:key, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
@@ -375,7 +375,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	8                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:key, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test changing the number of buckets
@@ -420,7 +420,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:key, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
@@ -469,7 +469,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:key, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:key, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test changing the sort columns
@@ -514,7 +514,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:value, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
@@ -563,7 +563,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:value, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test changing the sort order
@@ -608,7 +608,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src
@@ -657,7 +657,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test a sorted partition gets converted to unsorted

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
index af2f47a..cab3de4 100644
--- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
+++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
@@ -408,7 +408,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	12                  	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test changing sort order
@@ -455,7 +455,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	12                  	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:value, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:value, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test removing test order

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out b/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
index 46d5b34..184d2e4 100644
--- a/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
@@ -110,7 +110,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[b]                 	 
-Sort Columns:       	[Order(col:b, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:b, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Turn off clustering for a partition
@@ -200,7 +200,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[a, b]              	 
-Sort Columns:       	[Order(col:a, order:0, nullOrder:1), Order(col:b, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:a, order:0), Order(col:b, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: drop table alter_table_partition_clusterby_sortby

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out b/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
index 8a88bc4..6e1ec59 100644
--- a/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
@@ -34,7 +34,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[a]                 	 
-Sort Columns:       	[Order(col:a, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:a, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: alter table alter_table_not_sorted not sorted

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/authorization_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_index.q.out b/ql/src/test/results/clientpositive/authorization_index.q.out
index a6ec998..adc02ad 100644
--- a/ql/src/test/results/clientpositive/authorization_index.q.out
+++ b/ql/src/test/results/clientpositive/authorization_index.q.out
@@ -41,7 +41,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
-Sort Columns:       	[Order(col:a, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:a, order:1)]	 
 PREHOOK: query: alter index t1_index on t1 rebuild
 PREHOOK: type: ALTERINDEX_REBUILD
 PREHOOK: Input: default@t1

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/bucket5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket5.q.out b/ql/src/test/results/clientpositive/bucket5.q.out
index aa47ba8..fa926aa 100644
--- a/ql/src/test/results/clientpositive/bucket5.q.out
+++ b/ql/src/test/results/clientpositive/bucket5.q.out
@@ -535,7 +535,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: SELECT * FROM bucketed_table TABLESAMPLE (BUCKET 1 OUT OF 2) s LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
index c5d695e..d03bfe4 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
@@ -1155,7 +1155,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -1196,7 +1196,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: select count(*) from over1k_part_orc
@@ -2129,7 +2129,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2170,7 +2170,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: explain select * from over1k_part_buck_sort2_orc
@@ -2339,7 +2339,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2380,7 +2380,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: explain select * from over1k_part_buck_sort2_orc

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
index 9a222b1..857d609 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
@@ -1060,7 +1060,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -1101,7 +1101,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: select count(*) from over1k_part
@@ -2027,7 +2027,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2068,7 +2068,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: select * from over1k_part_buck_sort2
@@ -2170,7 +2170,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2211,7 +2211,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: select * from over1k_part_buck_sort2

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/index_skewtable.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_skewtable.q.out b/ql/src/test/results/clientpositive/index_skewtable.q.out
index 45e6834..972789d 100644
--- a/ql/src/test/results/clientpositive/index_skewtable.q.out
+++ b/ql/src/test/results/clientpositive/index_skewtable.q.out
@@ -57,7 +57,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 PREHOOK: query: ALTER INDEX kv_index ON kv REBUILD
 PREHOOK: type: ALTERINDEX_REBUILD
 PREHOOK: Input: default@kv

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
index 709ca10..182dd3a 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort.q.out
@@ -62,7 +62,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by where a key isn't selected, should not be bucketed or sorted
@@ -168,7 +168,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test join with two keys, should be bucketed and sorted by join keys
@@ -221,7 +221,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key, value]        	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1), Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test join with two keys and only one selected, should not be bucketed or sorted
@@ -327,7 +327,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test join on three tables on different keys, should be bucketed and sorted by latter key
@@ -380,7 +380,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test distribute by, should only be bucketed by key
@@ -486,7 +486,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test sort by desc, should be sorted by key
@@ -539,7 +539,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
-Sort Columns:       	[Order(col:key, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:key, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test cluster by, should be bucketed and sorted by key
@@ -592,7 +592,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test distribute by and sort by different keys, should be bucketed by one key sorted by the other
@@ -645,7 +645,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test join in simple subquery, should be bucketed and sorted on key
@@ -698,7 +698,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test join in simple subquery renaming key column, should be bucketed and sorted on key
@@ -751,7 +751,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in simple subquery, should be bucketed and sorted on key
@@ -804,7 +804,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in simple subquery renaming key column, should be bucketed and sorted on key
@@ -857,7 +857,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in subquery with where outside, should still be bucketed and sorted on key
@@ -910,7 +910,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in subquery with expression on value, should still be bucketed and sorted on key
@@ -963,7 +963,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in subquery with lateral view outside, should still be bucketed and sorted
@@ -1016,7 +1016,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in subquery with another group by outside, should be bucketed and sorted by the
@@ -1071,7 +1071,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in subquery with select on outside reordering the columns, should be bucketed and
@@ -1126,7 +1126,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in subquery followed by distribute by, should only be bucketed by the distribute key
@@ -1179,7 +1179,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in subquery followed by sort by, should only be sorted by the sort key
@@ -1232,7 +1232,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in subquery followed by transform script, should not be bucketed or sorted
@@ -1338,6 +1338,6 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key, value]        	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1), Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
index 6db9428..33d795b 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_bucketed_table.q.out
@@ -64,7 +64,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	3                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- If the count(*) from sampling the buckets matches the count(*) from each file, the table is

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
index e5626a4..2f7e538 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out
@@ -123,6 +123,6 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
index 5f983c7..7e3b48f 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
@@ -302,7 +302,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12')
@@ -342,7 +342,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: CREATE TABLE srcpart_merge_dp LIKE srcpart

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
index 90a71ae..ebfce60 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out
@@ -144,7 +144,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key, value]        	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1), Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test rollup, should be bucketed and sorted on key, value, grouping_key
@@ -203,7 +203,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key, value, grouping_key]	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0), Order(col:grouping_key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test cube, should not be bucketed or sorted because its missing the grouping ID
@@ -328,7 +328,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key, value]        	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1), Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test cube, should be bucketed and sorted on key, value, grouping_key
@@ -387,7 +387,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key, value, grouping_key]	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0), Order(col:grouping_key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test grouping sets, should not be bucketed or sorted because its missing the grouping ID
@@ -512,7 +512,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key, value]        	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1), Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test grouping sets, should be bucketed and sorted on key, value, grouping_key
@@ -571,6 +571,6 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key, value, grouping_key]	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0), Order(col:grouping_key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
index 5ebcf41..4ca6517 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
@@ -361,7 +361,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test SMB join doesn't affect inference, should not be bucketed or sorted
@@ -640,6 +640,6 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
index 97cb92d..53407c5 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_merge.q.out
@@ -117,6 +117,6 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
index e59026e..d4c22f4 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out
@@ -172,7 +172,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '2')
@@ -211,7 +211,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- The first partition should be bucketed and sorted, the second should not
@@ -272,7 +272,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '2')
@@ -372,7 +372,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (part = '2')
@@ -411,6 +411,6 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
index 91466f1..1e4db29 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_reducers_power_two.q.out
@@ -64,7 +64,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test join, should be bucketed and sorted by join key
@@ -117,7 +117,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test join with two keys, should be bucketed and sorted by join keys
@@ -170,7 +170,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[key, value]        	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0), Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1), Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test join on three tables on same key, should be bucketed and sorted by join key
@@ -223,7 +223,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test join on three tables on different keys, should be bucketed and sorted by latter key
@@ -276,7 +276,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	16                  	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- Test group by in subquery with another group by outside, should be bucketed and sorted by the
@@ -331,6 +331,6 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/orc_analyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_analyze.q.out b/ql/src/test/results/clientpositive/orc_analyze.q.out
index 9b7e7b7..87855fa 100644
--- a/ql/src/test/results/clientpositive/orc_analyze.q.out
+++ b/ql/src/test/results/clientpositive/orc_analyze.q.out
@@ -917,7 +917,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -960,7 +960,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
@@ -1015,7 +1015,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1058,7 +1058,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
@@ -1113,7 +1113,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1156,7 +1156,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: drop table orc_create_people
@@ -1262,7 +1262,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1305,7 +1305,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: drop table orc_create_people

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
index 1a93b7a..bc159eb 100644
--- a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
+++ b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
@@ -154,7 +154,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:key, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- rename partition
@@ -205,7 +205,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:0, nullOrder:1)]	 
+Sort Columns:       	[Order(col:key, order:0)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- insert partition

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/show_create_table_alter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_create_table_alter.q.out b/ql/src/test/results/clientpositive/show_create_table_alter.q.out
index 78a34de..32819ea 100644
--- a/ql/src/test/results/clientpositive/show_create_table_alter.q.out
+++ b/ql/src/test/results/clientpositive/show_create_table_alter.q.out
@@ -24,7 +24,7 @@ CREATE EXTERNAL TABLE `tmp_showcrt1`(
 CLUSTERED BY ( 
   key) 
 SORTED BY ( 
-  value DESC NULLS LAST) 
+  value DESC) 
 INTO 5 BUCKETS
 ROW FORMAT SERDE 
   'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
@@ -59,7 +59,7 @@ COMMENT 'temporary table'
 CLUSTERED BY ( 
   key) 
 SORTED BY ( 
-  value DESC NULLS LAST) 
+  value DESC) 
 INTO 5 BUCKETS
 ROW FORMAT SERDE 
   'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
@@ -98,7 +98,7 @@ COMMENT 'changed comment'
 CLUSTERED BY ( 
   key) 
 SORTED BY ( 
-  value DESC NULLS LAST) 
+  value DESC) 
 INTO 5 BUCKETS
 ROW FORMAT SERDE 
   'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
@@ -136,7 +136,7 @@ COMMENT 'changed comment'
 CLUSTERED BY ( 
   key) 
 SORTED BY ( 
-  value DESC NULLS LAST) 
+  value DESC) 
 INTO 5 BUCKETS
 ROW FORMAT SERDE 
   'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 
@@ -174,7 +174,7 @@ COMMENT 'changed comment'
 CLUSTERED BY ( 
   key) 
 SORTED BY ( 
-  value DESC NULLS LAST) 
+  value DESC) 
 INTO 5 BUCKETS
 ROW FORMAT SERDE 
   'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/spark/bucket5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket5.q.out b/ql/src/test/results/clientpositive/spark/bucket5.q.out
index af6d399..5baf054 100644
--- a/ql/src/test/results/clientpositive/spark/bucket5.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket5.q.out
@@ -391,7 +391,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
-Sort Columns:       	[Order(col:key, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:key, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: SELECT * FROM bucketed_table TABLESAMPLE (BUCKET 1 OUT OF 2) s LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
index 6db9428..33d795b 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out
@@ -64,7 +64,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	3                   	 
 Bucket Columns:     	[value]             	 
-Sort Columns:       	[Order(col:value, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:value, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: -- If the count(*) from sampling the buckets matches the count(*) from each file, the table is

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
index f7a94ac..a90e3f6 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
@@ -1215,7 +1215,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -1256,7 +1256,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: select count(*) from over1k_part_orc
@@ -2255,7 +2255,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2296,7 +2296,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: explain select * from over1k_part_buck_sort2_orc
@@ -2471,7 +2471,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2512,7 +2512,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: explain select * from over1k_part_buck_sort2_orc

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
index 01dc4ae..5292106 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
@@ -1128,7 +1128,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -1169,7 +1169,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: select count(*) from over1k_part
@@ -2152,7 +2152,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2193,7 +2193,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: select * from over1k_part_buck_sort2
@@ -2295,7 +2295,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__")
@@ -2336,7 +2336,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	1                   	 
 Bucket Columns:     	[si]                	 
-Sort Columns:       	[Order(col:f, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:f, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: select * from over1k_part_buck_sort2

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
index 9b7e7b7..87855fa 100644
--- a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
+++ b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out
@@ -917,7 +917,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -960,7 +960,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan
@@ -1015,7 +1015,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1058,7 +1058,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan
@@ -1113,7 +1113,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1156,7 +1156,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: drop table orc_create_people
@@ -1262,7 +1262,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: desc formatted orc_create_people partition(state="Or")
@@ -1305,7 +1305,7 @@ OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
 Compressed:         	No                  	 
 Num Buckets:        	4                   	 
 Bucket Columns:     	[first_name]        	 
-Sort Columns:       	[Order(col:last_name, order:1, nullOrder:0)]	 
+Sort Columns:       	[Order(col:last_name, order:1)]	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
 PREHOOK: query: drop table orc_create_people

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/serde/if/serde.thrift
----------------------------------------------------------------------
diff --git a/serde/if/serde.thrift b/serde/if/serde.thrift
index 0b3804d..6caad36 100644
--- a/serde/if/serde.thrift
+++ b/serde/if/serde.thrift
@@ -30,7 +30,7 @@ const string SERIALIZATION_NULL_FORMAT = "serialization.null.format"
 const string SERIALIZATION_ESCAPE_CRLF = "serialization.escape.crlf"
 const string SERIALIZATION_LAST_COLUMN_TAKES_REST = "serialization.last.column.takes.rest"
 const string SERIALIZATION_SORT_ORDER = "serialization.sort.order"
-const string SERIALIZATION_NULL_POSITION = "serialization.null.position";
+const string SERIALIZATION_NULL_SORT_ORDER = "serialization.sort.order.null";
 const string SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object"
 const string SERIALIZATION_ENCODING = "serialization.encoding"
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/serde/src/gen/thrift/gen-cpp/serde_constants.cpp
----------------------------------------------------------------------
diff --git a/serde/src/gen/thrift/gen-cpp/serde_constants.cpp b/serde/src/gen/thrift/gen-cpp/serde_constants.cpp
index 75701e2..3a675bf 100644
--- a/serde/src/gen/thrift/gen-cpp/serde_constants.cpp
+++ b/serde/src/gen/thrift/gen-cpp/serde_constants.cpp
@@ -27,7 +27,7 @@ serdeConstants::serdeConstants() {
 
   SERIALIZATION_SORT_ORDER = "serialization.sort.order";
 
-  SERIALIZATION_NULL_POSITION = "serialization.null.position";
+  SERIALIZATION_NULL_SORT_ORDER = "serialization.sort.order.null";
 
   SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object";
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/serde/src/gen/thrift/gen-cpp/serde_constants.h
----------------------------------------------------------------------
diff --git a/serde/src/gen/thrift/gen-cpp/serde_constants.h b/serde/src/gen/thrift/gen-cpp/serde_constants.h
index 6d85928..a5f33fb 100644
--- a/serde/src/gen/thrift/gen-cpp/serde_constants.h
+++ b/serde/src/gen/thrift/gen-cpp/serde_constants.h
@@ -23,7 +23,7 @@ class serdeConstants {
   std::string SERIALIZATION_ESCAPE_CRLF;
   std::string SERIALIZATION_LAST_COLUMN_TAKES_REST;
   std::string SERIALIZATION_SORT_ORDER;
-  std::string SERIALIZATION_NULL_POSITION;
+  std::string SERIALIZATION_NULL_SORT_ORDER;
   std::string SERIALIZATION_USE_JSON_OBJECTS;
   std::string SERIALIZATION_ENCODING;
   std::string FIELD_DELIM;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
----------------------------------------------------------------------
diff --git a/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php b/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
index 0bc6dd7..18c3991 100644
--- a/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
+++ b/serde/src/gen/thrift/gen-php/org/apache/hadoop/hive/serde/Types.php
@@ -26,7 +26,7 @@ final class Constant extends \Thrift\Type\TConstant {
   static protected $SERIALIZATION_ESCAPE_CRLF;
   static protected $SERIALIZATION_LAST_COLUMN_TAKES_REST;
   static protected $SERIALIZATION_SORT_ORDER;
-  static protected $SERIALIZATION_NULL_POSITION;
+  static protected $SERIALIZATION_NULL_SORT_ORDER;
   static protected $SERIALIZATION_USE_JSON_OBJECTS;
   static protected $SERIALIZATION_ENCODING;
   static protected $FIELD_DELIM;
@@ -98,8 +98,8 @@ final class Constant extends \Thrift\Type\TConstant {
     return "serialization.sort.order";
   }
 
-  static protected function init_SERIALIZATION_NULL_POSITION() {
-    return "serialization.null.position";
+  static protected function init_SERIALIZATION_NULL_SORT_ORDER() {
+    return "serialization.sort.order.null";
   }
 
   static protected function init_SERIALIZATION_USE_JSON_OBJECTS() {

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
----------------------------------------------------------------------
diff --git a/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py b/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
index 7939791..fafdc24 100644
--- a/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
+++ b/serde/src/gen/thrift/gen-py/org_apache_hadoop_hive_serde/constants.py
@@ -17,7 +17,7 @@ SERIALIZATION_NULL_FORMAT = "serialization.null.format"
 SERIALIZATION_ESCAPE_CRLF = "serialization.escape.crlf"
 SERIALIZATION_LAST_COLUMN_TAKES_REST = "serialization.last.column.takes.rest"
 SERIALIZATION_SORT_ORDER = "serialization.sort.order"
-SERIALIZATION_NULL_POSITION = "serialization.null.position"
+SERIALIZATION_NULL_SORT_ORDER = "serialization.sort.order.null"
 SERIALIZATION_USE_JSON_OBJECTS = "serialization.use.json.object"
 SERIALIZATION_ENCODING = "serialization.encoding"
 FIELD_DELIM = "field.delim"

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/serde/src/gen/thrift/gen-rb/serde_constants.rb
----------------------------------------------------------------------
diff --git a/serde/src/gen/thrift/gen-rb/serde_constants.rb b/serde/src/gen/thrift/gen-rb/serde_constants.rb
index d09e3c2..0ce9f27 100644
--- a/serde/src/gen/thrift/gen-rb/serde_constants.rb
+++ b/serde/src/gen/thrift/gen-rb/serde_constants.rb
@@ -23,7 +23,7 @@ SERIALIZATION_LAST_COLUMN_TAKES_REST = %q"serialization.last.column.takes.rest"
 
 SERIALIZATION_SORT_ORDER = %q"serialization.sort.order"
 
-SERIALIZATION_NULL_POSITION = %q"serialization.null.position"
+SERIALIZATION_NULL_SORT_ORDER = %q"serialization.sort.order.null"
 
 SERIALIZATION_USE_JSON_OBJECTS = %q"serialization.use.json.object"
 


[2/2] hive git commit: HIVE-13210: Revert changes in HIVE-12994 related to metastore (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jc...@apache.org.
HIVE-13210: Revert changes in HIVE-12994 related to metastore (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cc8893e4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cc8893e4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cc8893e4

Branch: refs/heads/master
Commit: cc8893e48e5552b5e6fc47d3f689f482e17d686e
Parents: 7c4599b
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Fri Mar 4 23:55:37 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Tue Mar 8 19:09:00 2016 +0100

----------------------------------------------------------------------
 metastore/if/hive_metastore.thrift              |   3 +-
 .../upgrade/derby/034-HIVE-12994.derby.sql      |   2 -
 .../upgrade/derby/hive-schema-2.1.0.derby.sql   |   2 +-
 .../derby/upgrade-2.0.0-to-2.1.0.derby.sql      |   1 -
 .../upgrade/mssql/019-HIVE-12994.mssql.sql      |   2 -
 .../upgrade/mssql/hive-schema-2.1.0.mssql.sql   |   1 -
 .../mssql/upgrade-2.0.0-to-2.1.0.mssql.sql      |   1 -
 .../upgrade/mysql/034-HIVE-12994.mysql.sql      |   2 -
 .../upgrade/mysql/hive-schema-2.1.0.mysql.sql   |   1 -
 .../mysql/upgrade-2.0.0-to-2.1.0.mysql.sql      |   1 -
 .../upgrade/oracle/034-HIVE-12994.oracle.sql    |   2 -
 .../upgrade/oracle/hive-schema-2.1.0.oracle.sql |   1 -
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql    |   1 -
 .../postgres/033-HIVE-12994.postgres.sql        |   2 -
 .../postgres/hive-schema-2.1.0.postgres.sql     |   1 -
 .../upgrade-2.0.0-to-2.1.0.postgres.sql         |   1 -
 .../metastore/hbase/HbaseMetastoreProto.java    | 175 +++++--------------
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  20 ---
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |  10 +-
 .../apache/hadoop/hive/metastore/api/Order.java | 106 +----------
 .../src/gen/thrift/gen-php/metastore/Types.php  |  23 ---
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  15 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   4 +-
 .../hive/metastore/MetaStoreDirectSql.java      |  10 +-
 .../hadoop/hive/metastore/ObjectStore.java      |  19 +-
 .../hadoop/hive/metastore/hbase/HBaseUtils.java |  41 ++---
 .../hadoop/hive/metastore/model/MOrder.java     |  18 +-
 .../metastore/hbase/hbase_metastore_proto.proto |   1 -
 .../hive/metastore/hbase/TestHBaseStore.java    |   7 +-
 .../hbase/TestHBaseStoreBitVector.java          |   2 +-
 .../hbase/TestSharedStorageDescriptor.java      |   7 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   5 -
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |   5 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   2 +-
 .../hive/ql/optimizer/AbstractSMBJoinProc.java  |   3 +-
 .../BucketingSortingReduceSinkOptimizer.java    |  44 +----
 .../optimizer/SortedDynPartitionOptimizer.java  |  29 +--
 .../ql/optimizer/calcite/RelOptHiveTable.java   |  12 +-
 .../optimizer/physical/BucketingSortingCtx.java |  15 +-
 .../physical/BucketingSortingOpProcFactory.java |  15 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |  14 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  25 +--
 .../hive/ql/txn/compactor/TestWorker.java       |   4 +-
 ...lter_numbuckets_partitioned_table2_h23.q.out |  20 +--
 ...alter_numbuckets_partitioned_table_h23.q.out |   4 +-
 .../alter_partition_clusterby_sortby.q.out      |   4 +-
 .../clientpositive/alter_table_not_sorted.q.out |   2 +-
 .../clientpositive/authorization_index.q.out    |   2 +-
 .../test/results/clientpositive/bucket5.q.out   |   2 +-
 .../dynpart_sort_opt_vectorization.q.out        |  12 +-
 .../dynpart_sort_optimization.q.out             |  12 +-
 .../clientpositive/index_skewtable.q.out        |   2 +-
 .../clientpositive/infer_bucket_sort.q.out      |  42 ++---
 .../infer_bucket_sort_bucketed_table.q.out      |   2 +-
 .../infer_bucket_sort_convert_join.q.out        |   2 +-
 .../infer_bucket_sort_dyn_part.q.out            |   4 +-
 .../infer_bucket_sort_grouping_operators.q.out  |  12 +-
 .../infer_bucket_sort_map_operators.q.out       |   4 +-
 .../infer_bucket_sort_merge.q.out               |   2 +-
 .../infer_bucket_sort_multi_insert.q.out        |  10 +-
 .../infer_bucket_sort_reducers_power_two.q.out  |  12 +-
 .../results/clientpositive/orc_analyze.q.out    |  16 +-
 .../partition_coltype_literals.q.out            |   4 +-
 .../show_create_table_alter.q.out               |  10 +-
 .../results/clientpositive/spark/bucket5.q.out  |   2 +-
 .../infer_bucket_sort_bucketed_table.q.out      |   2 +-
 .../tez/dynpart_sort_opt_vectorization.q.out    |  12 +-
 .../tez/dynpart_sort_optimization.q.out         |  12 +-
 .../clientpositive/tez/orc_analyze.q.out        |  16 +-
 serde/if/serde.thrift                           |   2 +-
 .../src/gen/thrift/gen-cpp/serde_constants.cpp  |   2 +-
 serde/src/gen/thrift/gen-cpp/serde_constants.h  |   2 +-
 .../org/apache/hadoop/hive/serde/Types.php      |   6 +-
 .../org_apache_hadoop_hive_serde/constants.py   |   2 +-
 serde/src/gen/thrift/gen-rb/serde_constants.rb  |   2 +-
 75 files changed, 255 insertions(+), 640 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift
index a4fb612..3635054 100755
--- a/metastore/if/hive_metastore.thrift
+++ b/metastore/if/hive_metastore.thrift
@@ -227,8 +227,7 @@ struct SerDeInfo {
 // sort order of a column (column name along with asc(1)/desc(0))
 struct Order {
   1: string col,      // sort column name
-  2: i32    order,    // asc(1) or desc(0)
-  3: i32    nullOrder // nulls first(0) or nulls last(1)
+  2: i32    order     // asc(1) or desc(0)
 }
 
 // this object holds all the information about skewed table

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/derby/034-HIVE-12994.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/034-HIVE-12994.derby.sql b/metastore/scripts/upgrade/derby/034-HIVE-12994.derby.sql
deleted file mode 100644
index a8b48bf..0000000
--- a/metastore/scripts/upgrade/derby/034-HIVE-12994.derby.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE "APP".SORT_COLS ADD "NULL_ORDER" INTEGER NOT NULL DEFAULT 0;
-UPDATE "APP".SORT_COLS SET "NULL_ORDER" = 1 WHERE "ORDER" = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
index 00c49ae..42f4eb6 100644
--- a/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql
@@ -28,7 +28,7 @@ CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCH
 
 CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(1000) NOT NULL, "TYPE_NAME" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
 
-CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "ORDER" INTEGER NOT NULL, "NULL_ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
 
 CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql
index 13aeabb..a0bac3c 100644
--- a/metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql
@@ -1,5 +1,4 @@
 -- Upgrade MetaStore schema from 2.0.0 to 2.1.0
 RUN '033-HIVE-12892.derby.sql';
-RUN '034-HIVE-12994.derby.sql';
 
 UPDATE "APP".VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/mssql/019-HIVE-12994.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/019-HIVE-12994.mssql.sql b/metastore/scripts/upgrade/mssql/019-HIVE-12994.mssql.sql
deleted file mode 100644
index 51e2c43..0000000
--- a/metastore/scripts/upgrade/mssql/019-HIVE-12994.mssql.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE SORT_COLS ADD "NULL_ORDER" int NOT NULL DEFAULT 0;
-UPDATE SORT_COLS SET "NULL_ORDER" = 1 WHERE "ORDER" = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql
index c98c7ef..cf5a662 100644
--- a/metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql
@@ -411,7 +411,6 @@ CREATE TABLE SORT_COLS
     SD_ID bigint NOT NULL,
     "COLUMN_NAME" nvarchar(1000) NULL,
     "ORDER" int NOT NULL,
-    "NULL_ORDER" int NOT NULL,
     INTEGER_IDX int NOT NULL
 );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql
index 3b4a2a3..f25daf2 100644
--- a/metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql
@@ -1,7 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS MESSAGE;
 
 :r 018-HIVE-12892.mssql.sql;
-:r 019-HIVE-12994.mssql.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS MESSAGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/mysql/034-HIVE-12994.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/034-HIVE-12994.mysql.sql b/metastore/scripts/upgrade/mysql/034-HIVE-12994.mysql.sql
deleted file mode 100644
index ff4cf37..0000000
--- a/metastore/scripts/upgrade/mysql/034-HIVE-12994.mysql.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE `SORT_COLS` ADD `NULL_ORDER` INTEGER NOT NULL DEFAULT 0;
-UPDATE `SORT_COLS` SET `NULL_ORDER` = 1 WHERE `ORDER` = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql
index 1369349..6fd3209 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql
@@ -546,7 +546,6 @@ CREATE TABLE IF NOT EXISTS `SORT_COLS` (
   `SD_ID` bigint(20) NOT NULL,
   `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `ORDER` int(11) NOT NULL,
-  `NULL_ORDER` int(11) NOT NULL,
   `INTEGER_IDX` int(11) NOT NULL,
   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
   KEY `SORT_COLS_N49` (`SD_ID`),

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql
index 09bb8b8..e790636 100644
--- a/metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql
@@ -1,7 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS ' ';
 
 SOURCE 033-HIVE-12892.mysql.sql;
-SOURCE 034-HIVE-12994.mysql.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS ' ';

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/oracle/034-HIVE-12994.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/034-HIVE-12994.oracle.sql b/metastore/scripts/upgrade/oracle/034-HIVE-12994.oracle.sql
deleted file mode 100644
index 0b57ef8..0000000
--- a/metastore/scripts/upgrade/oracle/034-HIVE-12994.oracle.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE SORT_COLS ADD "NULL_ORDER" NUMBER (10) NOT NULL DEFAULT 0;
-UPDATE SORT_COLS SET "NULL_ORDER" = 1 WHERE "ORDER" = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql
index 198f0bd..774f6be 100644
--- a/metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql
@@ -274,7 +274,6 @@ CREATE TABLE SORT_COLS
     SD_ID NUMBER NOT NULL,
     "COLUMN_NAME" VARCHAR2(1000) NULL,
     "ORDER" NUMBER (10) NOT NULL,
-    "NULL_ORDER" NUMBER (10) NOT NULL,
     INTEGER_IDX NUMBER(10) NOT NULL
 );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql
index a729baa..8368d08 100644
--- a/metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql
@@ -1,7 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS Status from dual;
 
 @033-HIVE-12892.oracle.sql;
-@034-HIVE-12994.oracle.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/postgres/033-HIVE-12994.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/033-HIVE-12994.postgres.sql b/metastore/scripts/upgrade/postgres/033-HIVE-12994.postgres.sql
deleted file mode 100644
index 0894fed..0000000
--- a/metastore/scripts/upgrade/postgres/033-HIVE-12994.postgres.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE SORT_COLS ADD COLUMN "NULL_ORDER" bigint NOT NULL DEFAULT 0;
-UPDATE SORT_COLS SET "NULL_ORDER" = 1 WHERE "ORDER" = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql
index 48d16de..7463a37 100644
--- a/metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql
@@ -342,7 +342,6 @@ CREATE TABLE "SORT_COLS" (
     "SD_ID" bigint NOT NULL,
     "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
     "ORDER" bigint NOT NULL,
-    "NULL_ORDER" bigint NOT NULL,
     "INTEGER_IDX" bigint NOT NULL
 );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql
index 30d070d..6172407 100644
--- a/metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql
@@ -1,7 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0';
 
 \i 032-HIVE-12892.postgres.sql;
-\i 033-HIVE-12994.postgres.sql;
 
 UPDATE "VERSION" SET "SCHEMA_VERSION"='2.1.0', "VERSION_COMMENT"='Hive release version 2.1.0' where "VER_ID"=1;
 SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0';

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
index 3057fff..3b2d7b5 100644
--- a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
+++ b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
@@ -22582,16 +22582,6 @@ public final class HbaseMetastoreProto {
        * <code>optional sint32 order = 2 [default = 1];</code>
        */
       int getOrder();
-
-      // optional sint32 nullOrder = 3 [default = 0];
-      /**
-       * <code>optional sint32 nullOrder = 3 [default = 0];</code>
-       */
-      boolean hasNullOrder();
-      /**
-       * <code>optional sint32 nullOrder = 3 [default = 0];</code>
-       */
-      int getNullOrder();
     }
     /**
      * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order}
@@ -22654,11 +22644,6 @@ public final class HbaseMetastoreProto {
                 order_ = input.readSInt32();
                 break;
               }
-              case 24: {
-                bitField0_ |= 0x00000004;
-                nullOrder_ = input.readSInt32();
-                break;
-              }
             }
           }
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -22758,26 +22743,9 @@ public final class HbaseMetastoreProto {
         return order_;
       }
 
-      // optional sint32 nullOrder = 3 [default = 0];
-      public static final int NULLORDER_FIELD_NUMBER = 3;
-      private int nullOrder_;
-      /**
-       * <code>optional sint32 nullOrder = 3 [default = 0];</code>
-       */
-      public boolean hasNullOrder() {
-        return ((bitField0_ & 0x00000004) == 0x00000004);
-      }
-      /**
-       * <code>optional sint32 nullOrder = 3 [default = 0];</code>
-       */
-      public int getNullOrder() {
-        return nullOrder_;
-      }
-
       private void initFields() {
         columnName_ = "";
         order_ = 1;
-        nullOrder_ = 0;
       }
       private byte memoizedIsInitialized = -1;
       public final boolean isInitialized() {
@@ -22801,9 +22769,6 @@ public final class HbaseMetastoreProto {
         if (((bitField0_ & 0x00000002) == 0x00000002)) {
           output.writeSInt32(2, order_);
         }
-        if (((bitField0_ & 0x00000004) == 0x00000004)) {
-          output.writeSInt32(3, nullOrder_);
-        }
         getUnknownFields().writeTo(output);
       }
 
@@ -22821,10 +22786,6 @@ public final class HbaseMetastoreProto {
           size += com.google.protobuf.CodedOutputStream
             .computeSInt32Size(2, order_);
         }
-        if (((bitField0_ & 0x00000004) == 0x00000004)) {
-          size += com.google.protobuf.CodedOutputStream
-            .computeSInt32Size(3, nullOrder_);
-        }
         size += getUnknownFields().getSerializedSize();
         memoizedSerializedSize = size;
         return size;
@@ -22945,8 +22906,6 @@ public final class HbaseMetastoreProto {
           bitField0_ = (bitField0_ & ~0x00000001);
           order_ = 1;
           bitField0_ = (bitField0_ & ~0x00000002);
-          nullOrder_ = 0;
-          bitField0_ = (bitField0_ & ~0x00000004);
           return this;
         }
 
@@ -22983,10 +22942,6 @@ public final class HbaseMetastoreProto {
             to_bitField0_ |= 0x00000002;
           }
           result.order_ = order_;
-          if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
-            to_bitField0_ |= 0x00000004;
-          }
-          result.nullOrder_ = nullOrder_;
           result.bitField0_ = to_bitField0_;
           onBuilt();
           return result;
@@ -23011,9 +22966,6 @@ public final class HbaseMetastoreProto {
           if (other.hasOrder()) {
             setOrder(other.getOrder());
           }
-          if (other.hasNullOrder()) {
-            setNullOrder(other.getNullOrder());
-          }
           this.mergeUnknownFields(other.getUnknownFields());
           return this;
         }
@@ -23152,39 +23104,6 @@ public final class HbaseMetastoreProto {
           return this;
         }
 
-        // optional sint32 nullOrder = 3 [default = 0];
-        private int nullOrder_ ;
-        /**
-         * <code>optional sint32 nullOrder = 3 [default = 0];</code>
-         */
-        public boolean hasNullOrder() {
-          return ((bitField0_ & 0x00000004) == 0x00000004);
-        }
-        /**
-         * <code>optional sint32 nullOrder = 3 [default = 0];</code>
-         */
-        public int getNullOrder() {
-          return nullOrder_;
-        }
-        /**
-         * <code>optional sint32 nullOrder = 3 [default = 0];</code>
-         */
-        public Builder setNullOrder(int value) {
-          bitField0_ |= 0x00000004;
-          nullOrder_ = value;
-          onChanged();
-          return this;
-        }
-        /**
-         * <code>optional sint32 nullOrder = 3 [default = 0];</code>
-         */
-        public Builder clearNullOrder() {
-          bitField0_ = (bitField0_ & ~0x00000004);
-          nullOrder_ = 0;
-          onChanged();
-          return this;
-        }
-
         // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.StorageDescriptor.Order)
       }
 
@@ -34828,7 +34747,7 @@ public final class HbaseMetastoreProto {
       "ant_info\030\001 \003(\01325.org.apache.hadoop.hive." +
       "metastore.hbase.RoleGrantInfo\"\030\n\010RoleLis",
       "t\022\014\n\004role\030\001 \003(\t\"/\n\004Role\022\023\n\013create_time\030\001" +
-      " \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\302\010\n\021StorageDesc" +
+      " \001(\003\022\022\n\nowner_name\030\002 \001(\t\"\254\010\n\021StorageDesc" +
       "riptor\022A\n\004cols\030\001 \003(\01323.org.apache.hadoop" +
       ".hive.metastore.hbase.FieldSchema\022\024\n\014inp" +
       "ut_format\030\002 \001(\t\022\025\n\routput_format\030\003 \001(\t\022\025" +
@@ -34841,51 +34760,51 @@ public final class HbaseMetastoreProto {
       "skewed_info\030\t \001(\0132D.org.apache.hadoop.hi" +
       "ve.metastore.hbase.StorageDescriptor.Ske" +
       "wedInfo\022!\n\031stored_as_sub_directories\030\n \001" +
-      "(\010\032D\n\005Order\022\023\n\013column_name\030\001 \002(\t\022\020\n\005orde" +
-      "r\030\002 \001(\021:\0011\022\024\n\tnullOrder\030\003 \001(\021:\0010\032|\n\tSerD" +
-      "eInfo\022\014\n\004name\030\001 \001(\t\022\031\n\021serialization_lib" +
-      "\030\002 \001(\t\022F\n\nparameters\030\003 \001(\01322.org.apache." +
-      "hadoop.hive.metastore.hbase.Parameters\032\214" +
-      "\003\n\nSkewedInfo\022\030\n\020skewed_col_names\030\001 \003(\t\022",
-      "r\n\021skewed_col_values\030\002 \003(\0132W.org.apache." +
-      "hadoop.hive.metastore.hbase.StorageDescr" +
-      "iptor.SkewedInfo.SkewedColValueList\022\206\001\n\036" +
-      "skewed_col_value_location_maps\030\003 \003(\0132^.o" +
-      "rg.apache.hadoop.hive.metastore.hbase.St" +
-      "orageDescriptor.SkewedInfo.SkewedColValu" +
-      "eLocationMap\032.\n\022SkewedColValueList\022\030\n\020sk" +
-      "ewed_col_value\030\001 \003(\t\0327\n\031SkewedColValueLo" +
-      "cationMap\022\013\n\003key\030\001 \003(\t\022\r\n\005value\030\002 \002(\t\"\220\004" +
-      "\n\005Table\022\r\n\005owner\030\001 \001(\t\022\023\n\013create_time\030\002 ",
-      "\001(\003\022\030\n\020last_access_time\030\003 \001(\003\022\021\n\tretenti" +
-      "on\030\004 \001(\003\022\020\n\010location\030\005 \001(\t\022I\n\rsd_paramet" +
-      "ers\030\006 \001(\01322.org.apache.hadoop.hive.metas" +
-      "tore.hbase.Parameters\022\017\n\007sd_hash\030\007 \002(\014\022K" +
-      "\n\016partition_keys\030\010 \003(\01323.org.apache.hado" +
-      "op.hive.metastore.hbase.FieldSchema\022F\n\np" +
-      "arameters\030\t \001(\01322.org.apache.hadoop.hive" +
-      ".metastore.hbase.Parameters\022\032\n\022view_orig" +
-      "inal_text\030\n \001(\t\022\032\n\022view_expanded_text\030\013 " +
-      "\001(\t\022\022\n\ntable_type\030\014 \001(\t\022Q\n\nprivileges\030\r ",
-      "\001(\0132=.org.apache.hadoop.hive.metastore.h" +
-      "base.PrincipalPrivilegeSet\022\024\n\014is_tempora" +
-      "ry\030\016 \001(\010\"\353\004\n\026PartitionKeyComparator\022\r\n\005n" +
-      "ames\030\001 \002(\t\022\r\n\005types\030\002 \002(\t\022S\n\002op\030\003 \003(\0132G." +
-      "org.apache.hadoop.hive.metastore.hbase.P" +
-      "artitionKeyComparator.Operator\022S\n\005range\030" +
-      "\004 \003(\0132D.org.apache.hadoop.hive.metastore" +
-      ".hbase.PartitionKeyComparator.Range\032(\n\004M" +
-      "ark\022\r\n\005value\030\001 \002(\t\022\021\n\tinclusive\030\002 \002(\010\032\272\001" +
-      "\n\005Range\022\013\n\003key\030\001 \002(\t\022R\n\005start\030\002 \001(\0132C.or",
-      "g.apache.hadoop.hive.metastore.hbase.Par" +
-      "titionKeyComparator.Mark\022P\n\003end\030\003 \001(\0132C." +
-      "org.apache.hadoop.hive.metastore.hbase.P" +
-      "artitionKeyComparator.Mark\032\241\001\n\010Operator\022" +
-      "Z\n\004type\030\001 \002(\0162L.org.apache.hadoop.hive.m" +
-      "etastore.hbase.PartitionKeyComparator.Op" +
-      "erator.Type\022\013\n\003key\030\002 \002(\t\022\013\n\003val\030\003 \002(\t\"\037\n" +
-      "\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOTEQUALS\020\001*#\n\rPrinci" +
-      "palType\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001"
+      "(\010\032.\n\005Order\022\023\n\013column_name\030\001 \002(\t\022\020\n\005orde" +
+      "r\030\002 \001(\021:\0011\032|\n\tSerDeInfo\022\014\n\004name\030\001 \001(\t\022\031\n" +
+      "\021serialization_lib\030\002 \001(\t\022F\n\nparameters\030\003" +
+      " \001(\01322.org.apache.hadoop.hive.metastore." +
+      "hbase.Parameters\032\214\003\n\nSkewedInfo\022\030\n\020skewe" +
+      "d_col_names\030\001 \003(\t\022r\n\021skewed_col_values\030\002",
+      " \003(\0132W.org.apache.hadoop.hive.metastore." +
+      "hbase.StorageDescriptor.SkewedInfo.Skewe" +
+      "dColValueList\022\206\001\n\036skewed_col_value_locat" +
+      "ion_maps\030\003 \003(\0132^.org.apache.hadoop.hive." +
+      "metastore.hbase.StorageDescriptor.Skewed" +
+      "Info.SkewedColValueLocationMap\032.\n\022Skewed" +
+      "ColValueList\022\030\n\020skewed_col_value\030\001 \003(\t\0327" +
+      "\n\031SkewedColValueLocationMap\022\013\n\003key\030\001 \003(\t" +
+      "\022\r\n\005value\030\002 \002(\t\"\220\004\n\005Table\022\r\n\005owner\030\001 \001(\t" +
+      "\022\023\n\013create_time\030\002 \001(\003\022\030\n\020last_access_tim",
+      "e\030\003 \001(\003\022\021\n\tretention\030\004 \001(\003\022\020\n\010location\030\005" +
+      " \001(\t\022I\n\rsd_parameters\030\006 \001(\01322.org.apache" +
+      ".hadoop.hive.metastore.hbase.Parameters\022" +
+      "\017\n\007sd_hash\030\007 \002(\014\022K\n\016partition_keys\030\010 \003(\013" +
+      "23.org.apache.hadoop.hive.metastore.hbas" +
+      "e.FieldSchema\022F\n\nparameters\030\t \001(\01322.org." +
+      "apache.hadoop.hive.metastore.hbase.Param" +
+      "eters\022\032\n\022view_original_text\030\n \001(\t\022\032\n\022vie" +
+      "w_expanded_text\030\013 \001(\t\022\022\n\ntable_type\030\014 \001(" +
+      "\t\022Q\n\nprivileges\030\r \001(\0132=.org.apache.hadoo",
+      "p.hive.metastore.hbase.PrincipalPrivileg" +
+      "eSet\022\024\n\014is_temporary\030\016 \001(\010\"\353\004\n\026Partition" +
+      "KeyComparator\022\r\n\005names\030\001 \002(\t\022\r\n\005types\030\002 " +
+      "\002(\t\022S\n\002op\030\003 \003(\0132G.org.apache.hadoop.hive" +
+      ".metastore.hbase.PartitionKeyComparator." +
+      "Operator\022S\n\005range\030\004 \003(\0132D.org.apache.had" +
+      "oop.hive.metastore.hbase.PartitionKeyCom" +
+      "parator.Range\032(\n\004Mark\022\r\n\005value\030\001 \002(\t\022\021\n\t" +
+      "inclusive\030\002 \002(\010\032\272\001\n\005Range\022\013\n\003key\030\001 \002(\t\022R" +
+      "\n\005start\030\002 \001(\0132C.org.apache.hadoop.hive.m",
+      "etastore.hbase.PartitionKeyComparator.Ma" +
+      "rk\022P\n\003end\030\003 \001(\0132C.org.apache.hadoop.hive" +
+      ".metastore.hbase.PartitionKeyComparator." +
+      "Mark\032\241\001\n\010Operator\022Z\n\004type\030\001 \002(\0162L.org.ap" +
+      "ache.hadoop.hive.metastore.hbase.Partiti" +
+      "onKeyComparator.Operator.Type\022\013\n\003key\030\002 \002" +
+      "(\t\022\013\n\003val\030\003 \002(\t\"\037\n\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOT" +
+      "EQUALS\020\001*#\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004R" +
+      "OLE\020\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -35071,7 +34990,7 @@ public final class HbaseMetastoreProto {
           internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_Order_descriptor,
-              new java.lang.String[] { "ColumnName", "Order", "NullOrder", });
+              new java.lang.String[] { "ColumnName", "Order", });
           internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_descriptor =
             internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_descriptor.getNestedTypes().get(1);
           internal_static_org_apache_hadoop_hive_metastore_hbase_StorageDescriptor_SerDeInfo_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 6534d68..2695ffa 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -3101,10 +3101,6 @@ void Order::__set_order(const int32_t val) {
   this->order = val;
 }
 
-void Order::__set_nullOrder(const int32_t val) {
-  this->nullOrder = val;
-}
-
 uint32_t Order::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -3142,14 +3138,6 @@ uint32_t Order::read(::apache::thrift::protocol::TProtocol* iprot) {
           xfer += iprot->skip(ftype);
         }
         break;
-      case 3:
-        if (ftype == ::apache::thrift::protocol::T_I32) {
-          xfer += iprot->readI32(this->nullOrder);
-          this->__isset.nullOrder = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -3175,10 +3163,6 @@ uint32_t Order::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeI32(this->order);
   xfer += oprot->writeFieldEnd();
 
-  xfer += oprot->writeFieldBegin("nullOrder", ::apache::thrift::protocol::T_I32, 3);
-  xfer += oprot->writeI32(this->nullOrder);
-  xfer += oprot->writeFieldEnd();
-
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -3188,20 +3172,17 @@ void swap(Order &a, Order &b) {
   using ::std::swap;
   swap(a.col, b.col);
   swap(a.order, b.order);
-  swap(a.nullOrder, b.nullOrder);
   swap(a.__isset, b.__isset);
 }
 
 Order::Order(const Order& other139) {
   col = other139.col;
   order = other139.order;
-  nullOrder = other139.nullOrder;
   __isset = other139.__isset;
 }
 Order& Order::operator=(const Order& other140) {
   col = other140.col;
   order = other140.order;
-  nullOrder = other140.nullOrder;
   __isset = other140.__isset;
   return *this;
 }
@@ -3210,7 +3191,6 @@ void Order::printTo(std::ostream& out) const {
   out << "Order(";
   out << "col=" << to_string(col);
   out << ", " << "order=" << to_string(order);
-  out << ", " << "nullOrder=" << to_string(nullOrder);
   out << ")";
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 3fd2543..97c07a5 100644
--- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -1575,10 +1575,9 @@ inline std::ostream& operator<<(std::ostream& out, const SerDeInfo& obj)
 }
 
 typedef struct _Order__isset {
-  _Order__isset() : col(false), order(false), nullOrder(false) {}
+  _Order__isset() : col(false), order(false) {}
   bool col :1;
   bool order :1;
-  bool nullOrder :1;
 } _Order__isset;
 
 class Order {
@@ -1586,13 +1585,12 @@ class Order {
 
   Order(const Order&);
   Order& operator=(const Order&);
-  Order() : col(), order(0), nullOrder(0) {
+  Order() : col(), order(0) {
   }
 
   virtual ~Order() throw();
   std::string col;
   int32_t order;
-  int32_t nullOrder;
 
   _Order__isset __isset;
 
@@ -1600,16 +1598,12 @@ class Order {
 
   void __set_order(const int32_t val);
 
-  void __set_nullOrder(const int32_t val);
-
   bool operator == (const Order & rhs) const
   {
     if (!(col == rhs.col))
       return false;
     if (!(order == rhs.order))
       return false;
-    if (!(nullOrder == rhs.nullOrder))
-      return false;
     return true;
   }
   bool operator != (const Order &rhs) const {

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
index fd05de5..cc0e2dd 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
@@ -40,7 +40,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
 
   private static final org.apache.thrift.protocol.TField COL_FIELD_DESC = new org.apache.thrift.protocol.TField("col", org.apache.thrift.protocol.TType.STRING, (short)1);
   private static final org.apache.thrift.protocol.TField ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("order", org.apache.thrift.protocol.TType.I32, (short)2);
-  private static final org.apache.thrift.protocol.TField NULL_ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("nullOrder", org.apache.thrift.protocol.TType.I32, (short)3);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -50,13 +49,11 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
 
   private String col; // required
   private int order; // required
-  private int nullOrder; // required
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
     COL((short)1, "col"),
-    ORDER((short)2, "order"),
-    NULL_ORDER((short)3, "nullOrder");
+    ORDER((short)2, "order");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -75,8 +72,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
           return COL;
         case 2: // ORDER
           return ORDER;
-        case 3: // NULL_ORDER
-          return NULL_ORDER;
         default:
           return null;
       }
@@ -118,7 +113,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
 
   // isset id assignments
   private static final int __ORDER_ISSET_ID = 0;
-  private static final int __NULLORDER_ISSET_ID = 1;
   private byte __isset_bitfield = 0;
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
@@ -127,8 +121,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     tmpMap.put(_Fields.ORDER, new org.apache.thrift.meta_data.FieldMetaData("order", org.apache.thrift.TFieldRequirementType.DEFAULT, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
-    tmpMap.put(_Fields.NULL_ORDER, new org.apache.thrift.meta_data.FieldMetaData("nullOrder", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Order.class, metaDataMap);
   }
@@ -138,15 +130,12 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
 
   public Order(
     String col,
-    int order,
-    int nullOrder)
+    int order)
   {
     this();
     this.col = col;
     this.order = order;
     setOrderIsSet(true);
-    this.nullOrder = nullOrder;
-    setNullOrderIsSet(true);
   }
 
   /**
@@ -158,7 +147,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
       this.col = other.col;
     }
     this.order = other.order;
-    this.nullOrder = other.nullOrder;
   }
 
   public Order deepCopy() {
@@ -170,8 +158,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
     this.col = null;
     setOrderIsSet(false);
     this.order = 0;
-    setNullOrderIsSet(false);
-    this.nullOrder = 0;
   }
 
   public String getCol() {
@@ -219,28 +205,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ORDER_ISSET_ID, value);
   }
 
-  public int getNullOrder() {
-    return this.nullOrder;
-  }
-
-  public void setNullOrder(int nullOrder) {
-    this.nullOrder = nullOrder;
-    setNullOrderIsSet(true);
-  }
-
-  public void unsetNullOrder() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NULLORDER_ISSET_ID);
-  }
-
-  /** Returns true if field nullOrder is set (has been assigned a value) and false otherwise */
-  public boolean isSetNullOrder() {
-    return EncodingUtils.testBit(__isset_bitfield, __NULLORDER_ISSET_ID);
-  }
-
-  public void setNullOrderIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NULLORDER_ISSET_ID, value);
-  }
-
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case COL:
@@ -259,14 +223,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
       }
       break;
 
-    case NULL_ORDER:
-      if (value == null) {
-        unsetNullOrder();
-      } else {
-        setNullOrder((Integer)value);
-      }
-      break;
-
     }
   }
 
@@ -278,9 +234,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
     case ORDER:
       return getOrder();
 
-    case NULL_ORDER:
-      return getNullOrder();
-
     }
     throw new IllegalStateException();
   }
@@ -296,8 +249,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
       return isSetCol();
     case ORDER:
       return isSetOrder();
-    case NULL_ORDER:
-      return isSetNullOrder();
     }
     throw new IllegalStateException();
   }
@@ -333,15 +284,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
         return false;
     }
 
-    boolean this_present_nullOrder = true;
-    boolean that_present_nullOrder = true;
-    if (this_present_nullOrder || that_present_nullOrder) {
-      if (!(this_present_nullOrder && that_present_nullOrder))
-        return false;
-      if (this.nullOrder != that.nullOrder)
-        return false;
-    }
-
     return true;
   }
 
@@ -359,11 +301,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
     if (present_order)
       list.add(order);
 
-    boolean present_nullOrder = true;
-    list.add(present_nullOrder);
-    if (present_nullOrder)
-      list.add(nullOrder);
-
     return list.hashCode();
   }
 
@@ -395,16 +332,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetNullOrder()).compareTo(other.isSetNullOrder());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetNullOrder()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nullOrder, other.nullOrder);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     return 0;
   }
 
@@ -436,10 +363,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
     sb.append("order:");
     sb.append(this.order);
     first = false;
-    if (!first) sb.append(", ");
-    sb.append("nullOrder:");
-    sb.append(this.nullOrder);
-    first = false;
     sb.append(")");
     return sb.toString();
   }
@@ -501,14 +424,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 3: // NULL_ORDER
-            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
-              struct.nullOrder = iprot.readI32();
-              struct.setNullOrderIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -530,9 +445,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
       oprot.writeFieldBegin(ORDER_FIELD_DESC);
       oprot.writeI32(struct.order);
       oprot.writeFieldEnd();
-      oprot.writeFieldBegin(NULL_ORDER_FIELD_DESC);
-      oprot.writeI32(struct.nullOrder);
-      oprot.writeFieldEnd();
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -557,25 +469,19 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
       if (struct.isSetOrder()) {
         optionals.set(1);
       }
-      if (struct.isSetNullOrder()) {
-        optionals.set(2);
-      }
-      oprot.writeBitSet(optionals, 3);
+      oprot.writeBitSet(optionals, 2);
       if (struct.isSetCol()) {
         oprot.writeString(struct.col);
       }
       if (struct.isSetOrder()) {
         oprot.writeI32(struct.order);
       }
-      if (struct.isSetNullOrder()) {
-        oprot.writeI32(struct.nullOrder);
-      }
     }
 
     @Override
     public void read(org.apache.thrift.protocol.TProtocol prot, Order struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(3);
+      BitSet incoming = iprot.readBitSet(2);
       if (incoming.get(0)) {
         struct.col = iprot.readString();
         struct.setColIsSet(true);
@@ -584,10 +490,6 @@ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, jav
         struct.order = iprot.readI32();
         struct.setOrderIsSet(true);
       }
-      if (incoming.get(2)) {
-        struct.nullOrder = iprot.readI32();
-        struct.setNullOrderIsSet(true);
-      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php
index 4da4707..488a920 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -3076,10 +3076,6 @@ class Order {
    * @var int
    */
   public $order = null;
-  /**
-   * @var int
-   */
-  public $nullOrder = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -3092,10 +3088,6 @@ class Order {
           'var' => 'order',
           'type' => TType::I32,
           ),
-        3 => array(
-          'var' => 'nullOrder',
-          'type' => TType::I32,
-          ),
         );
     }
     if (is_array($vals)) {
@@ -3105,9 +3097,6 @@ class Order {
       if (isset($vals['order'])) {
         $this->order = $vals['order'];
       }
-      if (isset($vals['nullOrder'])) {
-        $this->nullOrder = $vals['nullOrder'];
-      }
     }
   }
 
@@ -3144,13 +3133,6 @@ class Order {
             $xfer += $input->skip($ftype);
           }
           break;
-        case 3:
-          if ($ftype == TType::I32) {
-            $xfer += $input->readI32($this->nullOrder);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -3174,11 +3156,6 @@ class Order {
       $xfer += $output->writeI32($this->order);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->nullOrder !== null) {
-      $xfer += $output->writeFieldBegin('nullOrder', TType::I32, 3);
-      $xfer += $output->writeI32($this->nullOrder);
-      $xfer += $output->writeFieldEnd();
-    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 4b20da9..10eaf4a 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -2237,20 +2237,17 @@ class Order:
   Attributes:
    - col
    - order
-   - nullOrder
   """
 
   thrift_spec = (
     None, # 0
     (1, TType.STRING, 'col', None, None, ), # 1
     (2, TType.I32, 'order', None, None, ), # 2
-    (3, TType.I32, 'nullOrder', None, None, ), # 3
   )
 
-  def __init__(self, col=None, order=None, nullOrder=None,):
+  def __init__(self, col=None, order=None,):
     self.col = col
     self.order = order
-    self.nullOrder = nullOrder
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -2271,11 +2268,6 @@ class Order:
           self.order = iprot.readI32()
         else:
           iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.I32:
-          self.nullOrder = iprot.readI32()
-        else:
-          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -2294,10 +2286,6 @@ class Order:
       oprot.writeFieldBegin('order', TType.I32, 2)
       oprot.writeI32(self.order)
       oprot.writeFieldEnd()
-    if self.nullOrder is not None:
-      oprot.writeFieldBegin('nullOrder', TType.I32, 3)
-      oprot.writeI32(self.nullOrder)
-      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -2309,7 +2297,6 @@ class Order:
     value = 17
     value = (value * 31) ^ hash(self.col)
     value = (value * 31) ^ hash(self.order)
-    value = (value * 31) ^ hash(self.nullOrder)
     return value
 
   def __repr__(self):

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 556c380..1cf40ae 100644
--- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -557,12 +557,10 @@ class Order
   include ::Thrift::Struct, ::Thrift::Struct_Union
   COL = 1
   ORDER = 2
-  NULLORDER = 3
 
   FIELDS = {
     COL => {:type => ::Thrift::Types::STRING, :name => 'col'},
-    ORDER => {:type => ::Thrift::Types::I32, :name => 'order'},
-    NULLORDER => {:type => ::Thrift::Types::I32, :name => 'nullOrder'}
+    ORDER => {:type => ::Thrift::Types::I32, :name => 'order'}
   }
 
   def struct_fields; FIELDS; end

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 3b31ee1..26fbd20 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.metastore;
 import static org.apache.commons.lang.StringUtils.join;
 import static org.apache.commons.lang.StringUtils.repeat;
 
-import com.google.common.collect.Lists;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.text.ParseException;
@@ -32,10 +31,12 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
+
 import javax.jdo.PersistenceManager;
 import javax.jdo.Query;
 import javax.jdo.Transaction;
 import javax.jdo.datastore.JDOConnection;
+
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -72,6 +73,8 @@ import org.datanucleus.store.rdbms.query.ForwardQueryResult;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.collect.Lists;
+
 /**
  * This class contains the optimizations for MetaStore that rely on direct SQL access to
  * the underlying database. It should use ANSI SQL and be compatible with common databases
@@ -672,7 +675,7 @@ class MetaStoreDirectSql {
       t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
     }
 
-    queryText = "select \"SD_ID\", \"COLUMN_NAME\", \"SORT_COLS\".\"ORDER\", \"SORT_COLS\".\"NULL_ORDER\""
+    queryText = "select \"SD_ID\", \"COLUMN_NAME\", \"SORT_COLS\".\"ORDER\""
         + " from \"SORT_COLS\""
         + " where \"SD_ID\" in (" + sdIds + ") and \"INTEGER_IDX\" >= 0"
         + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc";
@@ -680,8 +683,7 @@ class MetaStoreDirectSql {
       @Override
       public void apply(StorageDescriptor t, Object[] fields) {
         if (fields[2] == null) return;
-        assert fields[3] != null;
-        t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2]), extractSqlInt(fields[3])));
+        t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2])));
       }});
 
     queryText = "select \"SD_ID\", \"BUCKET_COL_NAME\" from \"BUCKETING_COLS\""

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 1d04ef2..70cb618 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -37,11 +37,8 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
-import java.util.Timer;
-import java.util.TimerTask;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.regex.Pattern;
@@ -57,10 +54,6 @@ import javax.jdo.Transaction;
 import javax.jdo.datastore.DataStoreCache;
 import javax.jdo.identity.IntIdentity;
 
-import com.google.common.annotations.VisibleForTesting;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -145,19 +138,22 @@ import org.apache.hadoop.hive.metastore.model.MType;
 import org.apache.hadoop.hive.metastore.model.MVersionTable;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
+import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hive.common.util.HiveStringUtils;
 import org.apache.thrift.TException;
+import org.datanucleus.AbstractNucleusContext;
 import org.datanucleus.ClassLoaderResolver;
 import org.datanucleus.NucleusContext;
-import org.datanucleus.AbstractNucleusContext;
 import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
 import org.datanucleus.store.rdbms.exceptions.MissingTableException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 
 /**
@@ -1367,8 +1363,7 @@ public class ObjectStore implements RawStore, Configurable {
     if (keys != null) {
       mkeys = new ArrayList<MOrder>(keys.size());
       for (Order part : keys) {
-        mkeys.add(new MOrder(HiveStringUtils.normalizeIdentifier(part.getCol()), part.getOrder(),
-                part.getNullOrder()));
+        mkeys.add(new MOrder(HiveStringUtils.normalizeIdentifier(part.getCol()), part.getOrder()));
       }
     }
     return mkeys;
@@ -1379,7 +1374,7 @@ public class ObjectStore implements RawStore, Configurable {
     if (mkeys != null) {
       keys = new ArrayList<Order>(mkeys.size());
       for (MOrder part : mkeys) {
-        keys.add(new Order(part.getCol(), part.getOrder(), part.getNullOrder()));
+        keys.add(new Order(part.getCol(), part.getOrder()));
       }
     }
     return keys;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
index a16997b..9ec7cd5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
@@ -18,12 +18,22 @@
  */
 package org.apache.hadoop.hive.metastore.hbase;
 
-import com.google.common.collect.Lists;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.InvalidProtocolBufferException;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
 import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
@@ -68,21 +78,12 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hive.common.util.BloomFilter;
 import org.apache.hive.common.util.HiveStringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
-import java.security.MessageDigest;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
+import com.google.common.collect.Lists;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
 
 /**
  * Utility functions
@@ -707,7 +708,7 @@ class HBaseUtils {
     sd.setBucketCols(new ArrayList<>(proto.getBucketColsList()));
     List<Order> sortCols = new ArrayList<>();
     for (HbaseMetastoreProto.StorageDescriptor.Order protoOrder : proto.getSortColsList()) {
-      sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder(), protoOrder.getNullOrder()));
+      sortCols.add(new Order(protoOrder.getColumnName(), protoOrder.getOrder()));
     }
     sd.setSortCols(sortCols);
     if (proto.hasSkewedInfo()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java
----------------------------------------------------------------------
diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java
index 5370c02..732c278 100644
--- a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java
+++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MOrder.java
@@ -21,16 +21,14 @@ package org.apache.hadoop.hive.metastore.model;
 public class MOrder {
   private String col;
   private int order;
-  private int nullOrder;
 
   /**
    * @param col
    * @param order
    */
-  public MOrder(String col, int order, int nullOrder) {
+  public MOrder(String col, int order) {
     this.col = col;
     this.order = order;
-    this.nullOrder = nullOrder;
   }
 
   /**
@@ -61,18 +59,4 @@ public class MOrder {
     this.order = order;
   }
 
-  /**
-   * @return the null order
-   */
-  public int getNullOrder() {
-    return nullOrder;
-  }
-
-  /**
-   * @param nullOrder the null order to set
-   */
-  public void setNullOrder(int nullOrder) {
-    this.nullOrder = nullOrder;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto
----------------------------------------------------------------------
diff --git a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto
index 552097b..466fdf9 100644
--- a/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto
+++ b/metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto
@@ -205,7 +205,6 @@ message StorageDescriptor {
   message Order {
     required string column_name = 1;
     optional sint32 order = 2 [default = 1];
-    optional sint32 nullOrder = 3 [default = 0];
   }
 
   message SerDeInfo {

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
index d938a03..e4723f6 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
@@ -402,7 +402,7 @@ public class TestHBaseStore {
     Map<String, String> params = new HashMap<String, String>();
     params.put("key", "value");
     StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17,
-        serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params);
+        serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
     Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null,
         emptyParameters, null, null, null);
     store.createTable(table);
@@ -424,7 +424,6 @@ public class TestHBaseStore {
     Assert.assertEquals(1, t.getSd().getSortColsSize());
     Assert.assertEquals("sortcol", t.getSd().getSortCols().get(0).getCol());
     Assert.assertEquals(1, t.getSd().getSortCols().get(0).getOrder());
-    Assert.assertEquals(0, t.getSd().getSortCols().get(0).getNullOrder());
     Assert.assertEquals(1, t.getSd().getParametersSize());
     Assert.assertEquals("value", t.getSd().getParameters().get("key"));
     Assert.assertEquals("me", t.getOwner());
@@ -1274,7 +1273,7 @@ public class TestHBaseStore {
     Map<String, String> params = new HashMap<String, String>();
     params.put("key", "value");
     StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17,
-        serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params);
+        serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
     int currentTime = (int)(System.currentTimeMillis() / 1000);
     Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols,
         emptyParameters, null, null, null);
@@ -1292,7 +1291,7 @@ public class TestHBaseStore {
     Map<String, String> params = new HashMap<String, String>();
     params.put("key", "value");
     StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17,
-        serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params);
+        serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
     int currentTime = (int)(System.currentTimeMillis() / 1000);
     Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols,
         emptyParameters, null, null, null);

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java
index 570d023..b1dc542 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java
@@ -583,7 +583,7 @@ public class TestHBaseStoreBitVector {
     Map<String, String> params = new HashMap<String, String>();
     params.put("key", "value");
     StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17,
-        serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1, 0)), params);
+        serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
     int currentTime = (int)(System.currentTimeMillis() / 1000);
     Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols,
         emptyParameters, null, null, null);

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java
index 8e856a1..bea0b34 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestSharedStorageDescriptor.java
@@ -79,22 +79,19 @@ public class TestSharedStorageDescriptor {
   @Test
   public void changeOrder() {
     StorageDescriptor sd = new StorageDescriptor();
-    sd.addToSortCols(new Order("fred", 1, 0));
+    sd.addToSortCols(new Order("fred", 1));
     SharedStorageDescriptor ssd = new SharedStorageDescriptor();
     ssd.setShared(sd);
     ssd.getSortCols().get(0).setOrder(2);
-    ssd.getSortCols().get(0).setNullOrder(3);
     Assert.assertFalse(sd.getSortCols() == ssd.getSortCols());
     Assert.assertEquals(2, ssd.getSortCols().get(0).getOrder());
     Assert.assertEquals(1, sd.getSortCols().get(0).getOrder());
-    Assert.assertEquals(3, ssd.getSortCols().get(0).getNullOrder());
-    Assert.assertEquals(0, sd.getSortCols().get(0).getNullOrder());
   }
 
   @Test
   public void unsetOrder() {
     StorageDescriptor sd = new StorageDescriptor();
-    sd.addToSortCols(new Order("fred", 1, 0));
+    sd.addToSortCols(new Order("fred", 1));
     SharedStorageDescriptor ssd = new SharedStorageDescriptor();
     ssd.setShared(sd);
     ssd.unsetSortCols();

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 70afe16..c51cfd6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -2140,11 +2140,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
             else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) {
               sortKeyDesc = sortKeyDesc + "DESC";
             }
-            if (sortCol.getNullOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST) {
-              sortKeyDesc = sortKeyDesc + " NULLS FIRST";
-            } else if (sortCol.getNullOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_LAST) {
-              sortKeyDesc = sortKeyDesc + " NULLS LAST";
-            }
             sortKeys.add(sortKeyDesc);
           }
           tbl_sort_bucket += StringUtils.join(sortKeys, ", \n");

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 74cb2e0..4a546d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
@@ -593,9 +592,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
           newSortCols.add(new Order(
             partn.getCols().get(sortCol.getIndexes().get(0)).getName(),
             sortCol.getSortOrder() == '+' ? BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC :
-              BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC,
-            sortCol.getNullSortOrder() == 'a' ? BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST :
-              BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_LAST));
+              BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC));
         } else {
           // If the table is sorted on a partition column, not valid for sorting
           updateSortCols = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index ad17096..fdc7956 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -946,7 +946,7 @@ public class Hive {
         FieldSchema col = cols.get(i);
         if (indexedCols.contains(col.getName())) {
           indexTblCols.add(col);
-          sortCols.add(new Order(col.getName(), 1, 0));
+          sortCols.add(new Order(col.getName(), 1));
           k++;
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
index 677649d..b57dc77 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
@@ -357,8 +357,7 @@ abstract public class AbstractSMBJoinProc extends AbstractBucketJoinProc impleme
       Order o = sortCols.get(pos);
 
       if (pos < sortColumnsFirstPartition.size()) {
-        if (o.getOrder() != sortColumnsFirstPartition.get(pos).getOrder() ||
-                o.getNullOrder() != sortColumnsFirstPartition.get(pos).getNullOrder()) {
+        if (o.getOrder() != sortColumnsFirstPartition.get(pos).getOrder()) {
           return false;
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
index 3d580d8..da261bb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
@@ -26,7 +26,6 @@ import java.util.Map;
 import java.util.Stack;
 
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -50,7 +49,6 @@ import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -170,35 +168,16 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
         List<FieldSchema> tabCols) {
       List<Integer> sortOrders = new ArrayList<Integer>();
       for (Order sortCol : tabSortCols) {
-        int pos = 0;
         for (FieldSchema tabCol : tabCols) {
           if (sortCol.getCol().equals(tabCol.getName())) {
             sortOrders.add(sortCol.getOrder());
             break;
           }
-          pos++;
         }
       }
       return sortOrders;
     }
 
-    private List<Integer> getNullSortOrder(
-        List<Order> tabSortCols,
-        List<FieldSchema> tabCols) {
-      List<Integer> nullSortOrders = new ArrayList<Integer>();
-      for (Order sortCol : tabSortCols) {
-        int pos = 0;
-        for (FieldSchema tabCol : tabCols) {
-          if (sortCol.getCol().equals(tabCol.getName())) {
-            nullSortOrders.add(sortCol.getNullOrder());
-            break;
-          }
-          pos++;
-        }
-      }
-      return nullSortOrders;
-    }
-
     // Return true if the partition is bucketed/sorted by the specified positions
     // The number of buckets, the sort order should also match along with the
     // columns which are bucketed/sorted
@@ -206,7 +185,6 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
         List<Integer> bucketPositionsDest,
         List<Integer> sortPositionsDest,
         List<Integer> sortOrderDest,
-        List<Integer> sortNullOrderDest,
         int numBucketsDest) {
       // The bucketing and sorting positions should exactly match
       int numBuckets = partition.getBucketCount();
@@ -220,12 +198,9 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
           getSortPositions(partition.getSortCols(), partition.getTable().getCols());
       List<Integer> sortOrder =
           getSortOrder(partition.getSortCols(), partition.getTable().getCols());
-      List<Integer> sortNullOrder =
-          getNullSortOrder(partition.getSortCols(), partition.getTable().getCols());
       return bucketPositionsDest.equals(partnBucketPositions) &&
           sortPositionsDest.equals(sortPositions) &&
-          sortOrderDest.equals(sortOrder) &&
-          sortNullOrderDest.equals(sortNullOrder);
+          sortOrderDest.equals(sortOrder);
     }
 
     // Return true if the table is bucketed/sorted by the specified positions
@@ -235,7 +210,6 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
         List<Integer> bucketPositionsDest,
         List<Integer> sortPositionsDest,
         List<Integer> sortOrderDest,
-        List<Integer> sortNullOrderDest,
         int numBucketsDest) {
       // The bucketing and sorting positions should exactly match
       int numBuckets = table.getNumBuckets();
@@ -249,12 +223,9 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
           getSortPositions(table.getSortCols(), table.getCols());
       List<Integer> sortOrder =
           getSortOrder(table.getSortCols(), table.getCols());
-      List<Integer> sortNullOrder =
-          getNullSortOrder(table.getSortCols(), table.getCols());
       return bucketPositionsDest.equals(tableBucketPositions) &&
           sortPositionsDest.equals(sortPositions) &&
-          sortOrderDest.equals(sortOrder) &&
-          sortNullOrderDest.equals(sortNullOrder);
+          sortOrderDest.equals(sortOrder);
     }
 
     // Store the bucket path to bucket number mapping in the table scan operator.
@@ -332,8 +303,7 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
     private boolean validateSMBJoinKeys(SMBJoinDesc smbJoinDesc,
         List<ExprNodeColumnDesc> sourceTableBucketCols,
         List<ExprNodeColumnDesc> sourceTableSortCols,
-        List<Integer> sortOrder,
-        List<Integer> sortNullOrder) {
+        List<Integer> sortOrder) {
       // The sort-merge join creates the output sorted and bucketized by the same columns.
       // This can be relaxed in the future if there is a requirement.
       if (!sourceTableBucketCols.equals(sourceTableSortCols)) {
@@ -475,8 +445,6 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
           getSortPositions(destTable.getSortCols(), destTable.getCols());
       List<Integer> sortOrder =
           getSortOrder(destTable.getSortCols(), destTable.getCols());
-      List<Integer> sortNullOrder =
-          getNullSortOrder(destTable.getSortCols(), destTable.getCols());
       boolean useBucketSortPositions = true;
 
       // Only selects and filters are allowed
@@ -511,7 +479,7 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
           }
 
           if (!validateSMBJoinKeys(smbJoinDesc, sourceTableBucketCols,
-              sourceTableSortCols, sortOrder, sortNullOrder)) {
+              sourceTableSortCols, sortOrder)) {
             return null;
           }
 
@@ -586,7 +554,7 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
               }
               for (Partition partition : partitions) {
                 if (!checkPartition(partition, newBucketPositions, newSortPositions, sortOrder,
-                    sortNullOrder, numBucketsDestination)) {
+                    numBucketsDestination)) {
                   return null;
                 }
               }
@@ -597,7 +565,7 @@ public class BucketingSortingReduceSinkOptimizer extends Transform {
             }
             else {
               if (!checkTable(srcTable, newBucketPositions, newSortPositions, sortOrder,
-                  sortNullOrder, numBucketsDestination)) {
+                  numBucketsDestination)) {
                 return null;
               }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
index 3e6c7c7..adfbb67 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
@@ -26,7 +26,6 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -50,7 +49,6 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
 import org.apache.hadoop.hive.ql.lib.Rule;
 import org.apache.hadoop.hive.ql.lib.RuleRegExp;
 import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
@@ -185,7 +183,6 @@ public class SortedDynPartitionOptimizer extends Transform {
           destTable.getCols());
       List<Integer> sortPositions = null;
       List<Integer> sortOrder = null;
-      List<Integer> sortNullOrder = null;
       if (fsOp.getConf().getWriteType() == AcidUtils.Operation.UPDATE ||
           fsOp.getConf().getWriteType() == AcidUtils.Operation.DELETE) {
         // When doing updates and deletes we always want to sort on the rowid because the ACID
@@ -193,11 +190,13 @@ public class SortedDynPartitionOptimizer extends Transform {
         // ignore whatever comes from the table and enforce this sort order instead.
         sortPositions = Arrays.asList(0);
         sortOrder = Arrays.asList(1); // 1 means asc, could really use enum here in the thrift if
-        sortNullOrder = Arrays.asList(0);
       } else {
         sortPositions = getSortPositions(destTable.getSortCols(), destTable.getCols());
         sortOrder = getSortOrders(destTable.getSortCols(), destTable.getCols());
-        sortNullOrder = getSortNullOrders(destTable.getSortCols(), destTable.getCols());
+      }
+      List<Integer> sortNullOrder = new ArrayList<Integer>();
+      for (int order : sortOrder) {
+        sortNullOrder.add(order == 1 ? 0 : 1); // for asc, nulls first; for desc, nulls last
       }
       LOG.debug("Got sort order");
       for (int i : sortPositions) LOG.debug("sort position " + i);
@@ -597,26 +596,6 @@ public class SortedDynPartitionOptimizer extends Transform {
       return sortOrders;
     }
 
-    /**
-     * Get the null sort order for the sort columns
-     * @param tabSortCols
-     * @param tabCols
-     * @return
-     */
-    private List<Integer> getSortNullOrders(List<Order> tabSortCols,
-        List<FieldSchema> tabCols) {
-      List<Integer> sortNullOrders = Lists.newArrayList();
-      for (Order sortCol : tabSortCols) {
-        for (FieldSchema tabCol : tabCols) {
-          if (sortCol.getCol().equals(tabCol.getName())) {
-            sortNullOrders.add(sortCol.getNullOrder());
-            break;
-          }
-        }
-      }
-      return sortNullOrders;
-    }
-
     private ArrayList<ExprNodeDesc> getPositionsToExprNodes(List<Integer> pos,
         List<ColumnInfo> colInfos) {
       ArrayList<ExprNodeDesc> cols = Lists.newArrayList();

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
index a95da0a..73ca9bf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
@@ -40,8 +40,6 @@ import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.util.ImmutableBitSet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -59,6 +57,8 @@ import org.apache.hadoop.hive.ql.plan.ColStatistics;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.Statistics;
 import org.apache.hadoop.hive.ql.stats.StatsUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
@@ -160,17 +160,13 @@ public class RelOptHiveTable extends RelOptAbstractTable {
         FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
         if (field.getName().equals(sortColumn.getCol())) {
           Direction direction;
+          NullDirection nullDirection;
           if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
             direction = Direction.ASCENDING;
-          }
-          else {
-            direction = Direction.DESCENDING;
-          }
-          NullDirection nullDirection;
-          if (sortColumn.getNullOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_NULLS_FIRST) {
             nullDirection = NullDirection.FIRST;
           }
           else {
+            direction = Direction.DESCENDING;
             nullDirection = NullDirection.LAST;
           }
           collationList.add(new RelFieldCollation(i,direction,nullDirection));

http://git-wip-us.apache.org/repos/asf/hive/blob/cc8893e4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java
index ea3e179..296fecb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java
@@ -201,16 +201,14 @@ public class BucketingSortingCtx implements NodeProcessorCtx {
     private List<Integer> indexes = new ArrayList<Integer>();
     // Sort order (+|-)
     private char sortOrder;
-    private char nullSortOrder;
 
-    public SortCol(String name, int index, char sortOrder, char nullSortOrder) {
-      this(sortOrder, nullSortOrder);
+    public SortCol(String name, int index, char sortOrder) {
+      this(sortOrder);
       addAlias(name, index);
     }
 
-    public SortCol(char sortOrder, char nullSortOrder) {
+    public SortCol(char sortOrder) {
       this.sortOrder = sortOrder;
-      this.nullSortOrder = nullSortOrder;
     }
 
 
@@ -234,16 +232,11 @@ public class BucketingSortingCtx implements NodeProcessorCtx {
       return sortOrder;
     }
 
-    public char getNullSortOrder() {
-      return nullSortOrder;
-    }
-
     @Override
     // Chooses a representative alias, index, and order to use as the String, the first is used
     // because it is set in the constructor
     public String toString() {
-      return "name: " + names.get(0) + " index: " + indexes.get(0) + " order: " + sortOrder
-              + " nullOrder: " + nullSortOrder;
+      return "name: " + names.get(0) + " index: " + indexes.get(0) + " order: " + sortOrder;
     }
   }
 }