You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/08/05 09:23:07 UTC

svn commit: r1615872 [11/12] - in /hive/branches/cbo: ./ bin/ common/ common/src/java/org/apache/hadoop/hive/conf/ conf/ contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/ contrib/src/test/queries/clientnegative/ contrib/src/test/queries/...

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java Tue Aug  5 07:23:02 2014
@@ -28,8 +28,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
-import com.google.common.annotations.VisibleForTesting;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -43,8 +41,8 @@ import org.apache.hadoop.hive.ql.io.orc.
 import org.apache.hadoop.hive.ql.io.orc.OrcProto.RowIndexEntry;
 import org.apache.hadoop.hive.ql.io.orc.OrcProto.StripeStatistics;
 import org.apache.hadoop.hive.ql.io.orc.OrcProto.Type;
+import org.apache.hadoop.hive.ql.io.orc.OrcProto.UserMetadataItem;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
-import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
 import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
@@ -73,6 +71,7 @@ import org.apache.hadoop.hive.serde2.typ
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedOutputStream;
@@ -2218,4 +2217,78 @@ class WriterImpl implements Writer, Memo
     }
     return rawWriter.getPos();
   }
+
+  void appendStripe(byte[] stripe, StripeInformation stripeInfo,
+      OrcProto.StripeStatistics stripeStatistics) throws IOException {
+    appendStripe(stripe, 0, stripe.length, stripeInfo, stripeStatistics);
+  }
+
+  void appendStripe(byte[] stripe, int offset, int length,
+      StripeInformation stripeInfo,
+      OrcProto.StripeStatistics stripeStatistics) throws IOException {
+    getStream();
+    long start = rawWriter.getPos();
+
+    long stripeLen = length;
+    long availBlockSpace = blockSize - (start % blockSize);
+
+    // see if stripe can fit in the current hdfs block, else pad the remaining
+    // space in the block
+    if (stripeLen < blockSize && stripeLen > availBlockSpace &&
+        addBlockPadding) {
+      byte[] pad = new byte[(int) Math.min(HDFS_BUFFER_SIZE, availBlockSpace)];
+      LOG.info(String.format("Padding ORC by %d bytes while merging..",
+          availBlockSpace));
+      start += availBlockSpace;
+      while (availBlockSpace > 0) {
+        int writeLen = (int) Math.min(availBlockSpace, pad.length);
+        rawWriter.write(pad, 0, writeLen);
+        availBlockSpace -= writeLen;
+      }
+    }
+
+    rawWriter.write(stripe);
+    rowsInStripe = stripeStatistics.getColStats(0).getNumberOfValues();
+    rowCount += rowsInStripe;
+
+    // since we have already written the stripe, just update stripe statistics
+    treeWriter.stripeStatsBuilders.add(stripeStatistics.toBuilder());
+
+    // update file level statistics
+    updateFileStatistics(stripeStatistics);
+
+    // update stripe information
+    OrcProto.StripeInformation dirEntry = OrcProto.StripeInformation
+        .newBuilder()
+        .setOffset(start)
+        .setNumberOfRows(rowsInStripe)
+        .setIndexLength(stripeInfo.getIndexLength())
+        .setDataLength(stripeInfo.getDataLength())
+        .setFooterLength(stripeInfo.getFooterLength())
+        .build();
+    stripes.add(dirEntry);
+
+    // reset it after writing the stripe
+    rowsInStripe = 0;
+  }
+
+  private void updateFileStatistics(OrcProto.StripeStatistics stripeStatistics) {
+    List<OrcProto.ColumnStatistics> cs = stripeStatistics.getColStatsList();
+
+    // root element
+    treeWriter.fileStatistics.merge(ColumnStatisticsImpl.deserialize(cs.get(0)));
+    TreeWriter[] childWriters = treeWriter.getChildrenWriters();
+    for (int i = 0; i < childWriters.length; i++) {
+      childWriters[i].fileStatistics.merge(
+          ColumnStatisticsImpl.deserialize(cs.get(i + 1)));
+    }
+  }
+
+  void appendUserMetadata(List<UserMetadataItem> userMetadata) {
+    if (userMetadata != null) {
+      for (UserMetadataItem item : userMetadata) {
+        this.userMetadata.put(item.getName(), item.getValue());
+      }
+    }
+  }
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeInputFormat.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeInputFormat.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileBlockMergeInputFormat.java Tue Aug  5 07:23:02 2014
@@ -19,22 +19,22 @@
 package org.apache.hadoop.hive.ql.io.rcfile.merge;
 
 import java.io.IOException;
-import org.apache.hadoop.mapred.FileInputFormat;
+
+import org.apache.hadoop.hive.ql.io.merge.MergeInputFormat;
 import org.apache.hadoop.mapred.FileSplit;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 
-@SuppressWarnings({ "deprecation", "unchecked" })
-public class RCFileBlockMergeInputFormat extends FileInputFormat {
+public class RCFileBlockMergeInputFormat extends MergeInputFormat {
 
   @Override
-  public RecordReader getRecordReader(InputSplit split, JobConf job,
-      Reporter reporter) throws IOException {
+  public RecordReader<RCFileKeyBufferWrapper, RCFileValueBufferWrapper>
+    getRecordReader(InputSplit split, JobConf job, Reporter reporter)
+    throws IOException {
 
     reporter.setStatus(split.toString());
-
     return new RCFileBlockMergeRecordReader(job, (FileSplit) split);
   }
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/RCFileMergeMapper.java Tue Aug  5 07:23:02 2014
@@ -22,89 +22,25 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.io.RCFile;
 import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
-import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hadoop.hive.ql.io.merge.MergeMapper;
 import org.apache.hadoop.hive.shims.CombineHiveKey;
-import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MapReduceBase;
 import org.apache.hadoop.mapred.Mapper;
 import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.Reporter;
 
 @SuppressWarnings("deprecation")
-public class RCFileMergeMapper extends MapReduceBase implements
+public class RCFileMergeMapper extends MergeMapper implements
     Mapper<Object, RCFileValueBufferWrapper, Object, Object> {
 
-  private JobConf jc;
-  Class<? extends Writable> outputClass;
   RCFile.Writer outWriter;
 
-  Path finalPath;
-  FileSystem fs;
-
-  boolean exception = false;
-  boolean autoDelete = false;
-  Path outPath;
-
   CompressionCodec codec = null;
   int columnNumber = 0;
-
-  boolean hasDynamicPartitions = false;
-  boolean isListBucketingDML = false;
-  boolean isListBucketingAlterTableConcatenate = false;
-  int listBucketingDepth; // used as depth for dir-calculation and if it is list bucketing case.
-  boolean tmpPathFixedConcatenate = false;
-  boolean tmpPathFixed = false;
-  Path tmpPath;
-  Path taskTmpPath;
-  Path dpPath;
-
   public final static Log LOG = LogFactory.getLog("RCFileMergeMapper");
 
-  public RCFileMergeMapper() {
-  }
-
-  @Override
-  public void configure(JobConf job) {
-    jc = job;
-    hasDynamicPartitions = HiveConf.getBoolVar(job,
-        HiveConf.ConfVars.HIVEMERGECURRENTJOBHASDYNAMICPARTITIONS);
-    isListBucketingAlterTableConcatenate = HiveConf.getBoolVar(job,
-        HiveConf.ConfVars.HIVEMERGECURRENTJOBCONCATENATELISTBUCKETING);
-    listBucketingDepth = HiveConf.getIntVar(job,
-        HiveConf.ConfVars.HIVEMERGECURRENTJOBCONCATENATELISTBUCKETINGDEPTH);
-
-    Path specPath = RCFileBlockMergeOutputFormat.getMergeOutputPath(job);
-    Path tmpPath = Utilities.toTempPath(specPath);
-    Path taskTmpPath = Utilities.toTaskTempPath(specPath);
-    updatePaths(tmpPath, taskTmpPath);
-    try {
-      fs = specPath.getFileSystem(job);
-      autoDelete = fs.deleteOnExit(outPath);
-    } catch (IOException e) {
-      this.exception = true;
-      throw new RuntimeException(e);
-    }
-  }
-
-  private void updatePaths(Path tmpPath, Path taskTmpPath) {
-    String taskId = Utilities.getTaskId(jc);
-    this.tmpPath = tmpPath;
-    this.taskTmpPath = taskTmpPath;
-    finalPath = new Path(tmpPath, taskId);
-    outPath = new Path(taskTmpPath, Utilities.toTempPath(taskId));
-  }
-
   @Override
   public void map(Object k, RCFileValueBufferWrapper value,
       OutputCollector<Object, Object> output, Reporter reporter)
@@ -118,35 +54,7 @@ public class RCFileMergeMapper extends M
         key = (RCFileKeyBufferWrapper) k;
       }
 
-      /**
-       * 1. boolean isListBucketingAlterTableConcatenate will be true only if it is alter table ...
-       * concatenate on stored-as-dir so it will handle list bucketing alter table merge in the if
-       * cause with the help of fixTmpPathConcatenate
-       * 2. If it is DML, isListBucketingAlterTableConcatenate will be false so that it will be
-       * handled by else cause. In this else cause, we have another if check.
-       * 2.1 the if check will make sure DP or LB, we will fix path with the help of fixTmpPath(..).
-       * Since both has sub-directories. it includes SP + LB.
-       * 2.2 only SP without LB, we dont fix path.
-       */
-      // Fix temp path for alter table ... concatenate
-      if (isListBucketingAlterTableConcatenate) {
-        if (this.tmpPathFixedConcatenate) {
-          checkPartitionsMatch(key.inputPath.getParent());
-        } else {
-          fixTmpPathConcatenate(key.inputPath.getParent());
-          tmpPathFixedConcatenate = true;
-        }
-      } else {
-        if (hasDynamicPartitions || (listBucketingDepth > 0)) {
-          if (tmpPathFixed) {
-            checkPartitionsMatch(key.inputPath.getParent());
-          } else {
-            // We haven't fixed the TMP path for this mapper yet
-            fixTmpPath(key.inputPath.getParent());
-            tmpPathFixed = true;
-          }
-        }
-      }
+      fixTmpPathAlterTable(key.inputPath.getParent());
 
       if (outWriter == null) {
         codec = key.codec;
@@ -172,106 +80,6 @@ public class RCFileMergeMapper extends M
     }
   }
 
-  /**
-   * Validates that each input path belongs to the same partition
-   * since each mapper merges the input to a single output directory
-   *
-   * @param inputPath
-   * @throws HiveException
-   */
-  private void checkPartitionsMatch(Path inputPath) throws HiveException {
-    if (!dpPath.equals(inputPath)) {
-      // Temp partition input path does not match exist temp path
-      String msg = "Multiple partitions for one block merge mapper: " +
-          dpPath + " NOT EQUAL TO " + inputPath;
-      LOG.error(msg);
-      throw new HiveException(msg);
-    }
-  }
-
-  /**
-   * Fixes tmpPath to point to the correct partition.
-   * Before this is called, tmpPath will default to the root tmp table dir
-   * fixTmpPath(..) works for DP + LB + multiple skewed values + merge. reason:
-   * 1. fixTmpPath(..) compares inputPath and tmpDepth, find out path difference and put it into
-   * newPath. Then add newpath to existing this.tmpPath and this.taskTmpPath.
-   * 2. The path difference between inputPath and tmpDepth can be DP or DP+LB. It will automatically
-   * handle it.
-   * 3. For example,
-   * if inputpath is <prefix>/-ext-10002/hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/
-   * HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME
-   * tmppath is <prefix>/_tmp.-ext-10000
-   * newpath will be hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME
-   * Then, this.tmpPath and this.taskTmpPath will be update correctly.
-   * We have list_bucket_dml_6.q cover this case: DP + LP + multiple skewed values + merge.
-   * @param inputPath
-   * @throws HiveException
-   * @throws IOException
-   */
-  private void fixTmpPath(Path inputPath)
-      throws HiveException, IOException {
-    dpPath = inputPath;
-    Path newPath = new Path(".");
-    int inputDepth = inputPath.depth();
-    int tmpDepth = tmpPath.depth();
-
-    // Build the path from bottom up
-    while (inputPath != null && inputPath.depth() > tmpDepth) {
-      newPath = new Path(inputPath.getName(), newPath);
-      inputDepth--;
-      inputPath = inputPath.getParent();
-    }
-
-    Path newTmpPath = new Path(tmpPath, newPath);
-    Path newTaskTmpPath = new Path(taskTmpPath, newPath);
-    if (!fs.exists(newTmpPath)) {
-      fs.mkdirs(newTmpPath);
-    }
-    updatePaths(newTmpPath, newTaskTmpPath);
-  }
-
-  /**
-   * Fixes tmpPath to point to the correct list bucketing sub-directories.
-   * Before this is called, tmpPath will default to the root tmp table dir
-   * Reason to add a new method instead of changing fixTmpPath()
-   * Reason 1: logic has slightly difference
-   * fixTmpPath(..) needs 2 variables in order to decide path delta which is in variable newPath.
-   * 1. inputPath.depth()
-   * 2. tmpPath.depth()
-   * fixTmpPathConcatenate needs 2 variables too but one of them is different from fixTmpPath(..)
-   * 1. inputPath.depth()
-   * 2. listBucketingDepth
-   * Reason 2: less risks
-   * The existing logic is a little not trivial around map() and fixTmpPath(). In order to ensure
-   * minimum impact on existing flow, we try to avoid change on existing code/flow but add new code
-   * for new feature.
-   *
-   * @param inputPath
-   * @throws HiveException
-   * @throws IOException
-   */
-  private void fixTmpPathConcatenate(Path inputPath)
-      throws HiveException, IOException {
-    dpPath = inputPath;
-    Path newPath = new Path(".");
-
-    int depth = listBucketingDepth;
-    // Build the path from bottom up. pick up list bucketing subdirectories
-    while ((inputPath != null) && (depth > 0)) {
-      newPath = new Path(inputPath.getName(), newPath);
-      inputPath = inputPath.getParent();
-      depth--;
-    }
-
-    Path newTmpPath = new Path(tmpPath, newPath);
-    Path newTaskTmpPath = new Path(taskTmpPath, newPath);
-    if (!fs.exists(newTmpPath)) {
-      fs.mkdirs(newTmpPath);
-    }
-    updatePaths(newTmpPath, newTaskTmpPath);
-  }
-
-
   @Override
   public void close() throws IOException {
     // close writer
@@ -282,42 +90,7 @@ public class RCFileMergeMapper extends M
     outWriter.close();
     outWriter = null;
 
-    if (!exception) {
-      FileStatus fss = fs.getFileStatus(outPath);
-      LOG.info("renamed path " + outPath + " to " + finalPath
-          + " . File size is " + fss.getLen());
-      if (!fs.rename(outPath, finalPath)) {
-        throw new IOException("Unable to rename output to " + finalPath);
-      }
-    } else {
-      if (!autoDelete) {
-        fs.delete(outPath, true);
-      }
-    }
-  }
-
-  public static String BACKUP_PREFIX = "_backup.";
-
-  public static Path backupOutputPath(FileSystem fs, Path outpath, JobConf job)
-      throws IOException, HiveException {
-    if (fs.exists(outpath)) {
-      Path backupPath = new Path(outpath.getParent(), BACKUP_PREFIX
-          + outpath.getName());
-      Utilities.rename(fs, outpath, backupPath);
-      return backupPath;
-    } else {
-      return null;
-    }
-  }
-
-  public static void jobClose(Path outputPath, boolean success, JobConf job,
-      LogHelper console, DynamicPartitionCtx dynPartCtx, Reporter reporter
-      ) throws HiveException, IOException {
-    FileSystem fs = outputPath.getFileSystem(job);
-    Path backupPath = backupOutputPath(fs, outputPath, job);
-    Utilities.mvFileToFinalPath(outputPath, job, success, LOG, dynPartCtx, null,
-      reporter);
-    fs.delete(backupPath, true);
+    super.close();
   }
 
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java Tue Aug  5 07:23:02 2014
@@ -207,6 +207,16 @@ public class DbLockManager implements Hi
     }
   }
 
+  /**
+   * Clear the memory of the locks in this object.  This won't clear the locks from the database.
+   * It is for use with
+   * {@link #DbLockManager(org.apache.hadoop.hive.metastore.HiveMetaStoreClient).commitTxn} and
+   * {@link #DbLockManager(org.apache.hadoop.hive.metastore.HiveMetaStoreClient).rollbackTxn}.
+   */
+  void clearLocalLockRecords() {
+    locks.clear();
+  }
+
   // Sleep before we send checkLock again, but do it with a back off
   // off so we don't sit and hammer the metastore in a tight loop
   private void backoff() {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java Tue Aug  5 07:23:02 2014
@@ -203,6 +203,7 @@ public class DbTxnManager extends HiveTx
           "transaction");
     }
     try {
+      lockMgr.clearLocalLockRecords();
       LOG.debug("Committing txn " + txnId);
       client.commitTxn(txnId);
     } catch (NoSuchTxnException e) {
@@ -226,6 +227,7 @@ public class DbTxnManager extends HiveTx
           "transaction");
     }
     try {
+      lockMgr.clearLocalLockRecords();
       LOG.debug("Rolling back txn " + txnId);
       client.rollbackTxn(txnId);
     } catch (NoSuchTxnException e) {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Tue Aug  5 07:23:02 2014
@@ -43,6 +43,7 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -63,6 +64,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -141,6 +143,10 @@ public class Hive {
     }
   };
 
+  public static Hive get(Configuration c, Class<?> clazz) throws HiveException {
+    return get(c instanceof HiveConf ? (HiveConf)c : new HiveConf(c, clazz));
+  }
+
   /**
    * Gets hive object for the current thread. If one is not initialized then a
    * new one is created If the new configuration is different in metadata conf
@@ -153,20 +159,13 @@ public class Hive {
    *
    */
   public static Hive get(HiveConf c) throws HiveException {
-    boolean needsRefresh = false;
     Hive db = hiveDB.get();
-    if (db != null) {
-      for (HiveConf.ConfVars oneVar : HiveConf.metaVars) {
-        // Since metaVars are all of different types, use string for comparison
-        String oldVar = db.getConf().get(oneVar.varname, "");
-        String newVar = c.get(oneVar.varname, "");
-        if (oldVar.compareToIgnoreCase(newVar) != 0) {
-          needsRefresh = true;
-          break;
-        }
-      }
+    if (db == null ||
+        (db.metaStoreClient != null && !db.metaStoreClient.isCompatibleWith(c))) {
+      return get(c, true);
     }
-    return get(c, needsRefresh);
+    db.conf = c;
+    return db;
   }
 
   /**
@@ -195,7 +194,8 @@ public class Hive {
   public static Hive get() throws HiveException {
     Hive db = hiveDB.get();
     if (db == null) {
-      db = new Hive(new HiveConf(Hive.class));
+      SessionState session = SessionState.get();
+      db = new Hive(session == null ? new HiveConf(Hive.class) : session.getConf());
       hiveDB.set(db);
     }
     return db;
@@ -2577,6 +2577,16 @@ private void constructOneLBLocationMap(F
     }
   }
 
+  public AggrStats getAggrColStatsFor(String dbName, String tblName,
+    List<String> colNames, List<String> partName) {
+    try {
+      return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName);
+    } catch (Exception e) {
+      LOG.debug(StringUtils.stringifyException(e));
+      return null;
+    }
+  }
+
   public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName)
     throws HiveException {
     try {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java Tue Aug  5 07:23:02 2014
@@ -933,7 +933,11 @@ public class Table implements Serializab
    * @return include the db name
    */
   public String getCompleteName() {
-    return getDbName() + "@" + getTableName();
+    return getCompleteName(getDbName(), getTableName());
+  }
+
+  public static String getCompleteName(String dbName, String tabName) {
+    return dbName + "@" + tabName;
   }
 
   /**

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java Tue Aug  5 07:23:02 2014
@@ -98,7 +98,8 @@ public class GenMRFileSink1 implements N
 
     if (chDir) {
       // Merge the files in the destination table/partitions by creating Map-only merge job
-      // If underlying data is RCFile a RCFileBlockMerge task would be created.
+      // If underlying data is RCFile or OrcFile, RCFileBlockMerge task or
+      // OrcFileStripeMerge task would be created.
       LOG.info("using CombineHiveInputformat for the merge job");
       GenMapRedUtils.createMRWorkForMergingFiles(fsOp, finalName,
           ctx.getDependencyTaskForMultiInsert(), ctx.getMvTask(),

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java Tue Aug  5 07:23:02 2014
@@ -31,15 +31,16 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
-import java.lang.StringBuffer;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.ql.Context;
+import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.ConditionalTask;
 import org.apache.hadoop.hive.ql.exec.DemuxOperator;
@@ -64,11 +65,10 @@ import org.apache.hadoop.hive.ql.exec.mr
 import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
-import org.apache.hadoop.hive.ql.io.rcfile.merge.MergeWork;
+import org.apache.hadoop.hive.ql.io.merge.MergeWork;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMRUnionCtx;
 import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
 import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPruner;
@@ -1245,23 +1245,33 @@ public final class GenMapRedUtils {
     MapWork cplan;
     Serializable work;
 
-    if (conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) &&
-        fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) {
+    if ((conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) &&
+        fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) ||
+        (conf.getBoolVar(ConfVars.HIVEMERGEORCFILESTRIPELEVEL) &&
+            fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class))) {
 
       // Check if InputFormatClass is valid
-      String inputFormatClass = conf.getVar(ConfVars.HIVEMERGEINPUTFORMATBLOCKLEVEL);
+      final String inputFormatClass;
+      if (fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) {
+        inputFormatClass = conf.getVar(ConfVars.HIVEMERGEINPUTFORMATBLOCKLEVEL);
+      } else {
+        inputFormatClass = conf.getVar(ConfVars.HIVEMERGEINPUTFORMATSTRIPELEVEL);
+      }
       try {
         Class c = Class.forName(inputFormatClass);
 
-        LOG.info("RCFile format- Using block level merge");
-        cplan = GenMapRedUtils.createRCFileMergeTask(fsInputDesc, finalName,
+        if(fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class)) {
+          LOG.info("OrcFile format - Using stripe level merge");
+        } else {
+          LOG.info("RCFile format- Using block level merge");
+        }
+        cplan = GenMapRedUtils.createMergeTask(fsInputDesc, finalName,
             dpCtx != null && dpCtx.getNumDPCols() > 0);
         work = cplan;
       } catch (ClassNotFoundException e) {
         String msg = "Illegal input format class: " + inputFormatClass;
         throw new SemanticException(msg);
       }
-
     } else {
       cplan = createMRWorkForMergingFiles(conf, tsMerge, fsInputDesc);
       if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
@@ -1474,20 +1484,22 @@ public final class GenMapRedUtils {
   }
 
   /**
-   * Create a block level merge task for RCFiles.
+   * Create a block level merge task for RCFiles or stripe level merge task for
+   * ORCFiles
    *
    * @param fsInputDesc
    * @param finalName
-   * @return MergeWork if table is stored as RCFile,
+   * @return MergeWork if table is stored as RCFile or ORCFile,
    *         null otherwise
    */
-  public static MapWork createRCFileMergeTask(FileSinkDesc fsInputDesc,
+  public static MapWork createMergeTask(FileSinkDesc fsInputDesc,
       Path finalName, boolean hasDynamicPartitions) throws SemanticException {
 
     Path inputDir = fsInputDesc.getFinalDirName();
     TableDesc tblDesc = fsInputDesc.getTableInfo();
 
-    if (tblDesc.getInputFileFormatClass().equals(RCFileInputFormat.class)) {
+    if (tblDesc.getInputFileFormatClass().equals(RCFileInputFormat.class) ||
+        tblDesc.getInputFileFormatClass().equals(OrcInputFormat.class)) {
       ArrayList<Path> inputDirs = new ArrayList<Path>(1);
       ArrayList<String> inputDirstr = new ArrayList<String>(1);
       if (!hasDynamicPartitions
@@ -1497,7 +1509,8 @@ public final class GenMapRedUtils {
       }
 
       MergeWork work = new MergeWork(inputDirs, finalName,
-          hasDynamicPartitions, fsInputDesc.getDynPartCtx());
+          hasDynamicPartitions, fsInputDesc.getDynPartCtx(),
+          tblDesc.getInputFileFormatClass());
       LinkedHashMap<String, ArrayList<String>> pathToAliases =
           new LinkedHashMap<String, ArrayList<String>>();
       pathToAliases.put(inputDir.toString(), (ArrayList<String>) inputDirstr.clone());
@@ -1515,7 +1528,8 @@ public final class GenMapRedUtils {
       return work;
     }
 
-    throw new SemanticException("createRCFileMergeTask called on non-RCFile table");
+    throw new SemanticException("createMergeTask called on a table with file"
+        + " format other than RCFile or ORCFile");
   }
 
   /**
@@ -1697,12 +1711,8 @@ public final class GenMapRedUtils {
       // generate the temporary file
       // it must be on the same file system as the current destination
       Context baseCtx = parseCtx.getContext();
-  	  // if we are on viewfs we don't want to use /tmp as tmp dir since rename from /tmp/..
-      // to final location /user/hive/warehouse/ will fail later, so instead pick tmp dir
-      // on same namespace as tbl dir.
-      Path tmpDir = dest.toUri().getScheme().equals("viewfs") ?
-        baseCtx.getExtTmpPathRelTo(dest.toUri()) :
-        baseCtx.getExternalTmpPath(dest.toUri());
+
+      Path tmpDir = baseCtx.getExternalTmpPath(dest);
 
       FileSinkDesc fileSinkDesc = fsOp.getConf();
       // Change all the linked file sink descriptors

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java Tue Aug  5 07:23:02 2014
@@ -179,17 +179,23 @@ public class RelOptHiveTable extends Rel
         }
       } else {
         // 2.2 Obtain col stats for full table scan
-        Statistics stats = StatsUtils.collectStatistics(m_hiveConf, partitionList,
-            m_hiveTblMetadata, m_hiveNonPartitionCols, nonPartColNamesThatRqrStats, true, true);
-        m_rowCount = stats.getNumRows();
-        hiveColStats = new ArrayList<ColStatistics>();
-        for (String c : nonPartColNamesThatRqrStats) {
-          ColStatistics cs = stats.getColumnStatisticsFromColName(c);
-          if (cs != null) {
-            hiveColStats.add(cs);
-          } else {
-            colNamesFailedStats.add(c);
+        try {
+          Statistics stats = StatsUtils.collectStatistics(m_hiveConf, partitionList,
+              m_hiveTblMetadata, m_hiveNonPartitionCols, nonPartColNamesThatRqrStats, true, true);
+          m_rowCount = stats.getNumRows();
+          hiveColStats = new ArrayList<ColStatistics>();
+          for (String c : nonPartColNamesThatRqrStats) {
+            ColStatistics cs = stats.getColumnStatisticsFromColName(c);
+            if (cs != null) {
+              hiveColStats.add(cs);
+            } else {
+              colNamesFailedStats.add(c);
+            }
           }
+        } catch (HiveException e) {
+          String logMsg = "Collecting stats failed.";
+          LOG.error(logMsg);
+          throw new RuntimeException(logMsg);
         }
       }
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java Tue Aug  5 07:23:02 2014
@@ -100,9 +100,9 @@ public class StatsRulesProcFactory {
       }
       Table table = aspCtx.getParseContext().getTopToTable().get(tsop);
 
-      // gather statistics for the first time and the attach it to table scan operator
-      Statistics stats = StatsUtils.collectStatistics(aspCtx.getConf(), partList, table, tsop);
       try {
+        // gather statistics for the first time and the attach it to table scan operator
+        Statistics stats = StatsUtils.collectStatistics(aspCtx.getConf(), partList, table, tsop);
         tsop.setStatistics(stats.clone());
 
         if (LOG.isDebugEnabled()) {
@@ -110,6 +110,9 @@ public class StatsRulesProcFactory {
         }
       } catch (CloneNotSupportedException e) {
         throw new SemanticException(ErrorMsg.STATISTICS_CLONING_FAILED.getMsg());
+      } catch (HiveException e) {
+        LOG.debug(e);
+        throw new SemanticException(e);
       }
       return null;
     }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTablePartMergeFilesDesc.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTablePartMergeFilesDesc.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTablePartMergeFilesDesc.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/AlterTablePartMergeFilesDesc.java Tue Aug  5 07:23:02 2014
@@ -25,6 +25,7 @@ import java.util.List;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.plan.Explain;
 import org.apache.hadoop.hive.ql.plan.ListBucketingCtx;
+import org.apache.hadoop.mapred.InputFormat;
 
 @Explain(displayName = "Alter Table Partition Merge Files")
 public class AlterTablePartMergeFilesDesc {
@@ -35,6 +36,7 @@ public class AlterTablePartMergeFilesDes
 
   private List<Path> inputDir = new ArrayList<Path>();
   private Path outputDir = null;
+  private Class<? extends InputFormat> inputFormatClass;
 
   public AlterTablePartMergeFilesDesc(String tableName,
       HashMap<String, String> partSpec) {
@@ -90,4 +92,12 @@ public class AlterTablePartMergeFilesDes
     this.lbCtx = lbCtx;
   }
 
+  public Class<? extends InputFormat> getInputFormatClass() {
+    return inputFormatClass;
+  }
+
+  public void setInputFormatClass(Class<? extends InputFormat> inputFormatClass) {
+    this.inputFormatClass = inputFormatClass;
+  }
+
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Tue Aug  5 07:23:02 2014
@@ -69,6 +69,7 @@ import org.apache.hadoop.hive.ql.index.H
 import org.apache.hadoop.hive.ql.index.HiveIndexHandler;
 import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
 import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
 import org.apache.hadoop.hive.ql.lockmgr.LockException;
@@ -969,7 +970,7 @@ public class DDLSemanticAnalyzer extends
         TableDesc tblDesc = Utilities.getTableDesc(table);
         // Write the output to temporary directory and move it to the final location at the end
         // so the operation is atomic.
-        Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc.toUri());
+        Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc);
         truncateTblDesc.setOutputDir(queryTmpdir);
         LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc,
             partSpec == null ? new HashMap<String, String>() : partSpec);
@@ -1520,11 +1521,13 @@ public class DDLSemanticAnalyzer extends
             tblObj.getSkewedColValueLocationMaps(), tblObj.isStoredAsSubDirectories(), conf);
       }
 
-      // throw a HiveException for non-rcfile.
-      if (!inputFormatClass.equals(RCFileInputFormat.class)) {
+      // throw a HiveException for other than rcfile and orcfile.
+      if (!((inputFormatClass.equals(RCFileInputFormat.class) ||
+          (inputFormatClass.equals(OrcInputFormat.class))))) {
         throw new SemanticException(
-            "Only RCFileFormat is supportted right now.");
+            "Only RCFile and ORCFile Formats are supportted right now.");
       }
+      mergeDesc.setInputFormatClass(inputFormatClass);
 
       // throw a HiveException if the table/partition is bucketized
       if (bucketCols != null && bucketCols.size() > 0) {
@@ -1549,7 +1552,7 @@ public class DDLSemanticAnalyzer extends
       ddlWork.setNeedLock(true);
       Task<? extends Serializable> mergeTask = TaskFactory.get(ddlWork, conf);
       TableDesc tblDesc = Utilities.getTableDesc(tblObj);
-      Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc.toUri());
+      Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc);
       mergeDesc.setOutputDir(queryTmpdir);
       LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc,
           partSpec == null ? new HashMap<String, String>() : partSpec);

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java Tue Aug  5 07:23:02 2014
@@ -313,7 +313,8 @@ public class GenTezUtils {
 
     if (chDir) {
       // Merge the files in the destination table/partitions by creating Map-only merge job
-      // If underlying data is RCFile a RCFileBlockMerge task would be created.
+      // If underlying data is RCFile or OrcFile, RCFileBlockMerge task or
+      // OrcFileStripeMerge task would be created.
       LOG.info("using CombineHiveInputformat for the merge job");
       GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName,
           context.dependencyTask, context.moveTask,

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java Tue Aug  5 07:23:02 2014
@@ -276,7 +276,7 @@ public class ImportSemanticAnalyzer exte
 
   private Task<?> loadTable(URI fromURI, Table table) {
     Path dataPath = new Path(fromURI.toString(), "data");
-    Path tmpPath = ctx.getExternalTmpPath(fromURI);
+    Path tmpPath = ctx.getExternalTmpPath(new Path(fromURI));
     Task<?> copyTask = TaskFactory.get(new CopyWork(dataPath,
        tmpPath, false), conf);
     LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath,
@@ -321,7 +321,7 @@ public class ImportSemanticAnalyzer exte
       LOG.debug("adding dependent CopyWork/AddPart/MoveWork for partition "
           + partSpecToString(partSpec.getPartSpec())
           + " with source location: " + srcLocation);
-      Path tmpPath = ctx.getExternalTmpPath(fromURI);
+      Path tmpPath = ctx.getExternalTmpPath(new Path(fromURI));
       Task<?> copyTask = TaskFactory.get(new CopyWork(new Path(srcLocation),
           tmpPath, false), conf);
       Task<?> addPartTask = TaskFactory.get(new DDLWork(getInputs(),

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Tue Aug  5 07:23:02 2014
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.parse;
 
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS;
-import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DATABASE_WAREHOUSE_SUFFIX;
 
 import java.io.IOException;
 import java.io.Serializable;
@@ -33,10 +32,10 @@ import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
+import java.util.UUID;
 import java.util.regex.Pattern;
 import java.util.regex.PatternSyntaxException;
 
@@ -1540,7 +1539,7 @@ public class SemanticAnalyzer extends Ba
               }
               try {
                 fname = ctx.getExternalTmpPath(
-                    FileUtils.makeQualified(location, conf).toUri()).toString();
+                    FileUtils.makeQualified(location, conf)).toString();
               } catch (Exception e) {
                 throw new SemanticException(generateErrorMessage(ast,
                     "Error creating temporary folder on: " + location.toString()), e);
@@ -2442,13 +2441,13 @@ public class SemanticAnalyzer extends Ba
    * for inner joins push a 'is not null predicate' to the join sources for
    * every non nullSafe predicate.
    */
-  private Operator genNotNullFilterForJoinSourcePlan(QB qb, Operator input, 
+  private Operator genNotNullFilterForJoinSourcePlan(QB qb, Operator input,
       QBJoinTree joinTree, ExprNodeDesc[] joinKeys) throws SemanticException {
 
     if (qb == null || joinTree == null) {
       return input;
     }
-    
+
     if (!joinTree.getNoOuterJoin()) {
       return input;
     }
@@ -5776,12 +5775,7 @@ public class SemanticAnalyzer extends Ba
       if (isNonNativeTable) {
         queryTmpdir = dest_path;
       } else {
-    	// if we are on viewfs we don't want to use /tmp as tmp dir since rename from /tmp/..
-        // to final /user/hive/warehouse/ will fail later, so instead pick tmp dir
-        // on same namespace as tbl dir.
-        queryTmpdir = dest_path.toUri().getScheme().equals("viewfs") ?
-          ctx.getExtTmpPathRelTo(dest_path.getParent().toUri()) :
-          ctx.getExternalTmpPath(dest_path.toUri());
+        queryTmpdir = ctx.getExternalTmpPath(dest_path);
       }
       if (dpCtx != null) {
         // set the root of the temporary path where dynamic partition columns will populate
@@ -5894,12 +5888,7 @@ public class SemanticAnalyzer extends Ba
       dest_path = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
           .getAuthority(), partPath.toUri().getPath());
 
-      // if we are on viewfs we don't want to use /tmp as tmp dir since rename from /tmp/..
-      // to final /user/hive/warehouse/ will fail later, so instead pick tmp dir
-      // on same namespace as tbl dir.
-      queryTmpdir = dest_path.toUri().getScheme().equals("viewfs") ?
-        ctx.getExtTmpPathRelTo(dest_path.getParent().toUri()) :
-        ctx.getExternalTmpPath(dest_path.toUri());
+      queryTmpdir = ctx.getExternalTmpPath(dest_path);
       table_desc = Utilities.getTableDesc(dest_tab);
 
       // Add sorting/bucketing if needed
@@ -5956,7 +5945,7 @@ public class SemanticAnalyzer extends Ba
 
         try {
           Path qPath = FileUtils.makeQualified(dest_path, conf);
-          queryTmpdir = ctx.getExternalTmpPath(qPath.toUri());
+          queryTmpdir = ctx.getExternalTmpPath(qPath);
         } catch (Exception e) {
           throw new SemanticException("Error creating temporary folder on: "
               + dest_path, e);
@@ -6117,7 +6106,7 @@ public class SemanticAnalyzer extends Ba
     // it should be the same as the MoveWork's sourceDir.
     fileSinkDesc.setStatsAggPrefix(fileSinkDesc.getDirName().toString());
     if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {
-      String statsTmpLoc = ctx.getExternalTmpPath(queryTmpdir.toUri()).toString();
+      String statsTmpLoc = ctx.getExternalTmpPath(queryTmpdir).toString();
       LOG.info("Set stats collection dir : " + statsTmpLoc);
       conf.set(StatsSetupConst.STATS_TMP_LOC, statsTmpLoc);
     }
@@ -9129,7 +9118,7 @@ public class SemanticAnalyzer extends Ba
       tsDesc.setGatherStats(false);
     } else {
       if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {
-        String statsTmpLoc = ctx.getExternalTmpPath(tab.getPath().toUri()).toString();
+        String statsTmpLoc = ctx.getExternalTmpPath(tab.getPath()).toString();
         LOG.info("Set stats collection dir : " + statsTmpLoc);
         conf.set(StatsSetupConst.STATS_TMP_LOC, statsTmpLoc);
       }
@@ -9678,7 +9667,11 @@ public class SemanticAnalyzer extends Ba
 
     // Generate column access stats if required - wait until column pruning takes place
     // during optimization
-    if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS) == true) {
+    boolean isColumnInfoNeedForAuth = SessionState.get().isAuthorizationModeV2()
+        && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED);
+
+    if (isColumnInfoNeedForAuth
+        || HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS) == true) {
       ColumnAccessAnalyzer columnAccessAnalyzer = new ColumnAccessAnalyzer(pCtx);
       setColumnAccessInfo(columnAccessAnalyzer.analyzeColumnAccess());
     }
@@ -9725,7 +9718,7 @@ public class SemanticAnalyzer extends Ba
             if (((TableScanDesc)topOp.getConf()).getIsMetadataOnly()) {
               continue;
             }
-            PrunedPartitionList parts = pCtx.getOpToPartList().get((TableScanOperator) topOp);
+            PrunedPartitionList parts = pCtx.getOpToPartList().get(topOp);
             if (parts.getPartitions().size() > scanLimit) {
               throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, ""
                   + parts.getPartitions().size(), "" + parts.getSourceTable().getTableName(), ""
@@ -10346,7 +10339,7 @@ public class SemanticAnalyzer extends Ba
     String dbName = qualified.length == 1 ? SessionState.get().getCurrentDatabase() : qualified[0];
     Database database  = getDatabase(dbName);
     outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED));
- 
+
     if (isTemporary) {
       if (partCols.size() > 0) {
         throw new SemanticException("Partition columns are not supported on temporary tables");

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java Tue Aug  5 07:23:02 2014
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hive.ql.security.authorization;
 
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
@@ -24,8 +28,8 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
 import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.hooks.Entity;
 import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.hooks.Entity;
 import org.apache.hadoop.hive.ql.hooks.Entity.Type;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity.WriteType;
@@ -42,9 +46,6 @@ import org.apache.hadoop.hive.ql.securit
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 
-import java.util.ArrayList;
-import java.util.List;
-
 /**
  * Utility code shared by hive internal code and sql standard authorization plugin implementation
  */
@@ -173,7 +174,7 @@ public class AuthorizationUtils {
   }
 
   public static HivePrivilegeObject getHivePrivilegeObject(
-      PrivilegeObjectDesc privSubjectDesc, List<String> columns) throws HiveException {
+      PrivilegeObjectDesc privSubjectDesc, Set<String> columns) throws HiveException {
 
     // null means ALL for show grants, GLOBAL for grant/revoke
     HivePrivilegeObjectType objectType = null;

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationProvider.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationProvider.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationProvider.java Tue Aug  5 07:23:02 2014
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.ql.security.authorization;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 
@@ -27,7 +26,7 @@ public class DefaultHiveAuthorizationPro
     BitSetCheckedAuthorizationProvider {
 
   public void init(Configuration conf) throws HiveException {
-    hive_db = new HiveProxy(Hive.get(new HiveConf(conf, HiveAuthorizationProvider.class)));
+    hive_db = new HiveProxy(Hive.get(conf, HiveAuthorizationProvider.class));
   }
 
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java Tue Aug  5 07:23:02 2014
@@ -35,7 +35,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.Database;
@@ -83,7 +82,7 @@ public class StorageBasedAuthorizationPr
         // till we explicitly initialize it as being from the client side. So, we have a
         // chicken-and-egg problem. So, we now track whether or not we're running from client-side
         // in the SBAP itself.
-        hive_db = new HiveProxy(Hive.get(new HiveConf(getConf(), StorageBasedAuthorizationProvider.class)));
+        hive_db = new HiveProxy(Hive.get(getConf(), StorageBasedAuthorizationProvider.class));
         this.wh = new Warehouse(getConf());
         if (this.wh == null){
           // If wh is still null after just having initialized it, bail out - something's very wrong.
@@ -117,7 +116,7 @@ public class StorageBasedAuthorizationPr
 
     // Update to previous comment: there does seem to be one place that uses this
     // and that is to authorize "show databases" in hcat commandline, which is used
-    // by webhcat. And user-level auth seems to be a resonable default in this case.
+    // by webhcat. And user-level auth seems to be a reasonable default in this case.
     // The now deprecated HdfsAuthorizationProvider in hcatalog approached this in
     // another way, and that was to see if the user had said above appropriate requested
     // privileges for the hive root warehouse directory. That seems to be the best

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java Tue Aug  5 07:23:02 2014
@@ -17,9 +17,12 @@
  */
 package org.apache.hadoop.hive.ql.security.authorization.plugin;
 
-import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable;
@@ -57,9 +60,18 @@ public class HivePrivilegeObject impleme
     return compare;
   }
 
-  private int compare(List<String> o1, List<String> o2) {
-    for (int i = 0; i < Math.min(o1.size(), o2.size()); i++) {
-      int compare = o1.get(i).compareTo(o2.get(i));
+  private int compare(Collection<String> o1, Collection<String> o2) {
+    Iterator<String> it1 = o1.iterator();
+    Iterator<String> it2 = o2.iterator();
+    while (it1.hasNext()) {
+      if (!it2.hasNext()) {
+        break;
+      }
+      String s1 = it1.next();
+      String s2 = it2.next();
+      int compare = s1 != null ?
+          (s2 != null ? s1.compareTo(s2) : 1) :
+            (s2 != null ? -1 : 0);
       if (compare != 0) {
         return compare;
       }
@@ -79,7 +91,7 @@ public class HivePrivilegeObject impleme
   private final String objectName;
   private final List<String> commandParams;
   private final List<String> partKeys;
-  private final List<String> columns;
+  private Set<String> columns;
   private final HivePrivObjectActionType actionType;
 
   public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String objectName) {
@@ -94,9 +106,8 @@ public class HivePrivilegeObject impleme
   public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String objectName,
       List<String> partKeys, String column) {
     this(type, dbname, objectName, partKeys,
-        column == null ? null : new ArrayList<String>(Arrays.asList(column)),
+        column == null ? null : new HashSet<String>(Arrays.asList(column)),
         HivePrivObjectActionType.OTHER, null);
-
   }
 
   /**
@@ -110,12 +121,12 @@ public class HivePrivilegeObject impleme
   }
 
   public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String objectName,
-    List<String> partKeys, List<String> columns, List<String> commandParams) {
+    List<String> partKeys, Set<String> columns, List<String> commandParams) {
     this(type, dbname, objectName, partKeys, columns, HivePrivObjectActionType.OTHER, commandParams);
   }
 
   public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String objectName,
-      List<String> partKeys, List<String> columns, HivePrivObjectActionType actionType,
+      List<String> partKeys, Set<String> columns, HivePrivObjectActionType actionType,
       List<String> commandParams) {
     this.type = type;
     this.dbname = dbname;
@@ -153,7 +164,13 @@ public class HivePrivilegeObject impleme
     return partKeys;
   }
 
-  public List<String> getColumns() {
+  /**
+   * Applicable columns in this object
+   * In case of DML read operations, this is the set of columns being used.
+   * Column information is not set for DDL operations and for tables being written into
+   * @return list of applicable columns
+   */
+  public Set<String> getColumns() {
     return columns;
   }
 
@@ -202,4 +219,8 @@ public class HivePrivilegeObject impleme
     return (dbname == null ? "" : dbname + ".") + objectName;
   }
 
+  public void setColumns(Set<String> columnms) {
+    this.columns = columnms;
+  }
+
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveRoleGrant.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveRoleGrant.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveRoleGrant.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveRoleGrant.java Tue Aug  5 07:23:02 2014
@@ -123,5 +123,7 @@ public class HiveRoleGrant implements Co
 
   }
 
-
+  public String toString() {
+    return roleName + "[" + principalName + ":" + principalType + (grantOption ? ":WITH GRANT]" : "]");
+  }
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java Tue Aug  5 07:23:02 2014
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.securi
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -318,7 +319,7 @@ public class HiveV1Authorizer implements
           privs.addAll(hive.showPrivilegeGrant(HiveObjectType.DATABASE,
               name, type, dbObj.getName(), null, null, null));
         } else {
-          List<String> columns = privObj.getColumns();
+          Set<String> columns = privObj.getColumns();
           if (columns != null && !columns.isEmpty()) {
             // show column level privileges
             for (String columnName : columns) {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java Tue Aug  5 07:23:02 2014
@@ -412,5 +412,8 @@ public class SQLAuthorizationUtils {
     }
   }
 
+  static HiveAuthzPluginException getPluginException(String prefix, Exception e) {
+    return new HiveAuthzPluginException(prefix + ": " + e.getMessage(), e);
+  }
 
 }

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java Tue Aug  5 07:23:02 2014
@@ -105,6 +105,7 @@ public class SQLStdHiveAccessController 
     }
     this.currentUserName = newUserName;
     this.currentRoles = getRolesFromMS();
+    LOG.info("Current user : " + currentUserName + ", Current Roles : " + currentRoles);
   }
 
   private List<HiveRoleGrant> getRolesFromMS() throws HiveAuthzPluginException {
@@ -122,8 +123,8 @@ public class SQLStdHiveAccessController 
       }
       return currentRoles;
     } catch (Exception e) {
-      throw new HiveAuthzPluginException("Failed to retrieve roles for " + currentUserName + ": "
-          + e.getMessage(), e);
+      throw SQLAuthorizationUtils.getPluginException("Failed to retrieve roles for "
+          + currentUserName, e);
     }
   }
 
@@ -178,7 +179,7 @@ public class SQLStdHiveAccessController 
     try {
       metastoreClient.grant_privileges(privBag);
     } catch (Exception e) {
-      throw new HiveAuthzPluginException("Error granting privileges: " + e.getMessage(), e);
+      throw SQLAuthorizationUtils.getPluginException("Error granting privileges", e);
     }
   }
 
@@ -238,7 +239,7 @@ public class SQLStdHiveAccessController 
       // that has desired behavior.
       metastoreClient.revoke_privileges(new PrivilegeBag(revokePrivs), grantOption);
     } catch (Exception e) {
-      throw new HiveAuthzPluginException("Error revoking privileges", e);
+      throw SQLAuthorizationUtils.getPluginException("Error revoking privileges", e);
     }
   }
 
@@ -259,7 +260,7 @@ public class SQLStdHiveAccessController 
       metastoreClientFactory.getHiveMetastoreClient().create_role(
         new Role(roleName, 0, grantorName));
     } catch (TException e) {
-      throw new HiveAuthzPluginException("Error create role : " + e.getMessage(), e);
+      throw SQLAuthorizationUtils.getPluginException("Error create role", e);
     }
   }
 
@@ -273,7 +274,7 @@ public class SQLStdHiveAccessController 
     try {
       metastoreClientFactory.getHiveMetastoreClient().drop_role(roleName);
     } catch (Exception e) {
-      throw new HiveAuthzPluginException("Error dropping role", e);
+      throw SQLAuthorizationUtils.getPluginException("Error dropping role", e);
     }
   }
 
@@ -294,11 +295,11 @@ public class SQLStdHiveAccessController 
               grantorPrinc.getName(),
               AuthorizationUtils.getThriftPrincipalType(grantorPrinc.getType()), grantOption);
         } catch (MetaException e) {
-          throw new HiveAuthzPluginException(e.getMessage(), e);
+          throw SQLAuthorizationUtils.getPluginException("Error granting role", e);
         } catch (Exception e) {
           String msg = "Error granting roles for " + hivePrincipal.getName() + " to role "
-              + roleName + ": " + e.getMessage();
-          throw new HiveAuthzPluginException(msg, e);
+              + roleName;
+          throw SQLAuthorizationUtils.getPluginException(msg, e);
         }
       }
     }
@@ -320,8 +321,8 @@ public class SQLStdHiveAccessController 
               AuthorizationUtils.getThriftPrincipalType(hivePrincipal.getType()), grantOption);
         } catch (Exception e) {
           String msg = "Error revoking roles for " + hivePrincipal.getName() + " to role "
-              + roleName + ": " + e.getMessage();
-          throw new HiveAuthzPluginException(msg, e);
+              + roleName;
+          throw SQLAuthorizationUtils.getPluginException(msg, e);
         }
       }
     }
@@ -337,7 +338,7 @@ public class SQLStdHiveAccessController 
     try {
       return metastoreClientFactory.getHiveMetastoreClient().listRoleNames();
     } catch (Exception e) {
-      throw new HiveAuthzPluginException("Error listing all roles", e);
+      throw SQLAuthorizationUtils.getPluginException("Error listing all roles", e);
     }
   }
 
@@ -352,10 +353,12 @@ public class SQLStdHiveAccessController 
     try {
       return getHiveRoleGrants(metastoreClientFactory.getHiveMetastoreClient(), roleName);
     } catch (Exception e) {
-      throw new HiveAuthzPluginException("Error getting principals for all roles", e);
+      throw SQLAuthorizationUtils.getPluginException("Error getting principals for all roles", e);
     }
   }
 
+
+
   public static List<HiveRoleGrant> getHiveRoleGrants(IMetaStoreClient client, String roleName)
       throws Exception {
     GetPrincipalsInRoleRequest request = new GetPrincipalsInRoleRequest(roleName);
@@ -434,7 +437,7 @@ public class SQLStdHiveAccessController 
       return resPrivInfos;
 
     } catch (Exception e) {
-      throw new HiveAuthzPluginException("Error showing privileges: "+ e.getMessage(), e);
+      throw SQLAuthorizationUtils.getPluginException("Error showing privileges", e);
     }
 
   }
@@ -532,6 +535,7 @@ public class SQLStdHiveAccessController 
       currentRoles.add(adminRole);
       return;
     }
+    LOG.info("Current user : " + currentUserName + ", Current Roles : " + currentRoles);
     // If we are here it means, user is requesting a role he doesn't belong to.
     throw new HiveAccessControlException(currentUserName +" doesn't belong to role "
       +roleName);
@@ -548,11 +552,7 @@ public class SQLStdHiveAccessController 
    */
   boolean isUserAdmin() throws HiveAuthzPluginException {
     List<HiveRoleGrant> roles;
-    try {
-      roles = getCurrentRoles();
-    } catch (Exception e) {
-      throw new HiveAuthzPluginException(e);
-    }
+    roles = getCurrentRoles();
     for (HiveRoleGrant role : roles) {
       if (role.getRoleName().equalsIgnoreCase(HiveMetaStore.ADMIN)) {
         return true;
@@ -563,11 +563,7 @@ public class SQLStdHiveAccessController 
 
   private boolean doesUserHasAdminOption(List<String> roleNames) throws HiveAuthzPluginException {
     List<HiveRoleGrant> currentRoles;
-    try {
-      currentRoles = getCurrentRoles();
-    } catch (Exception e) {
-        throw new HiveAuthzPluginException(e);
-    }
+    currentRoles = getCurrentRoles();
     for (String roleName : roleNames) {
       boolean roleFound = false;
       for (HiveRoleGrant currentRole : currentRoles) {
@@ -604,8 +600,8 @@ public class SQLStdHiveAccessController 
       }
       return hiveRoleGrants;
     } catch (Exception e) {
-      throw new HiveAuthzPluginException("Error getting role grant information for user "
-          + principal.getName() + ": " + e.getMessage(), e);
+      throw SQLAuthorizationUtils.getPluginException("Error getting role grant information for user "
+          + principal.getName(), e);
     }
   }
 

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java Tue Aug  5 07:23:02 2014
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.stats;
 
 import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
@@ -30,7 +29,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -102,7 +101,7 @@ public class StatsUtils {
    * @throws HiveException
    */
   public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList,
-      Table table, TableScanOperator tableScanOperator) {
+      Table table, TableScanOperator tableScanOperator) throws HiveException {
 
     // column level statistics are required only for the columns that are needed
     List<ColumnInfo> schema = tableScanOperator.getSchema().getSignature();
@@ -112,7 +111,7 @@ public class StatsUtils {
   }
 
   private static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList,
-      Table table, List<ColumnInfo> schema, List<String> neededColumns) {
+      Table table, List<ColumnInfo> schema, List<String> neededColumns) throws HiveException {
 
     boolean fetchColStats =
         HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_FETCH_COLUMN_STATS);
@@ -124,7 +123,7 @@ public class StatsUtils {
 
   public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList,
       Table table, List<ColumnInfo> schema, List<String> neededColumns,
-      boolean fetchColStats, boolean fetchPartStats) {
+      boolean fetchColStats, boolean fetchPartStats) throws HiveException {
 
     Statistics stats = new Statistics();
 
@@ -222,17 +221,26 @@ public class StatsUtils {
         for (Partition part : partList.getNotDeniedPartns()) {
           partNames.add(part.getName());
         }
-        Map<String, List<ColStatistics>> partStats =
-            getPartColumnStats(table, schema, partNames, neededColumns);
-        if (partStats != null) {
-          for (String partName : partNames) {
-            List<ColStatistics> partStat = partStats.get(partName);
-            haveFullStats &= (partStat != null);
-            if (partStat != null) {
-              stats.updateColumnStatsState(deriveStatType(partStat, neededColumns));
-              stats.addToColumnStats(partStat);
-            }
+        Map<String, String> colToTabAlias = new HashMap<String, String>();
+        neededColumns = processNeededColumns(schema, neededColumns, colToTabAlias);
+        AggrStats aggrStats = Hive.get().getAggrColStatsFor(table.getDbName(), table.getTableName(), neededColumns, partNames);
+        if (null == aggrStats) {
+          haveFullStats = false;
+        } else {
+          List<ColumnStatisticsObj> colStats = aggrStats.getColStats();
+          if (colStats.size() != neededColumns.size()) {
+            LOG.debug("Column stats requested for : " + neededColumns.size() + " columns. Able to retrieve"
+                + " for " + colStats.size() + " columns");
+          }
+          List<ColStatistics> columnStats = convertColStats(colStats, table.getTableName(), colToTabAlias);
+          stats.addToColumnStats(columnStats);
+          State colState = deriveStatType(columnStats, neededColumns);
+          if (aggrStats.getPartsFound() != partNames.size() && colState != State.NONE) {
+            LOG.debug("Column stats requested for : " + partNames.size() +" partitions. "
+              + "Able to retrieve for " + aggrStats.getPartsFound() + " partitions");
+            stats.updateColumnStatsState(State.PARTIAL);
           }
+          stats.setColumnStatsState(colState);
         }
       }
       // There are some partitions with no state (or we didn't fetch any state).
@@ -476,12 +484,7 @@ public class StatsUtils {
     try {
       List<ColumnStatisticsObj> colStat = Hive.get().getTableColumnStatistics(
           dbName, tabName, neededColsInTable);
-      stats = new ArrayList<ColStatistics>(colStat.size());
-      for (ColumnStatisticsObj statObj : colStat) {
-        ColStatistics cs = getColStatistics(statObj, tabName, statObj.getColName());
-        cs.setTableAlias(colToTabAlias.get(cs.getColumnName()));
-        stats.add(cs);
-      }
+      stats = convertColStats(colStat, tabName, colToTabAlias);
     } catch (HiveException e) {
       LOG.error("Failed to retrieve table statistics: ", e);
       stats = null;
@@ -489,43 +492,16 @@ public class StatsUtils {
     return stats;
   }
 
-  /**
-   * Get table level column statistics from metastore for needed columns
-   * @param table
-   *          - table
-   * @param schema
-   *          - output schema
-   * @param neededColumns
-   *          - list of needed columns
-   * @return column statistics
-   */
-  public static Map<String, List<ColStatistics>> getPartColumnStats(Table table,
-      List<ColumnInfo> schema, List<String> partNames, List<String> neededColumns) {
-    String dbName = table.getDbName();
-    String tabName = table.getTableName();
-    Map<String, String> colToTabAlias = new HashMap<String, String>(schema.size());
-    List<String> neededColsInTable = processNeededColumns(schema, neededColumns, colToTabAlias);
-    Map<String, List<ColStatistics>> stats = null;
-    try {
-      Map<String, List<ColumnStatisticsObj>> colStat = Hive.get().getPartitionColumnStatistics(
-          dbName, tabName, partNames, neededColsInTable);
-      stats = new HashMap<String, List<ColStatistics>>(colStat.size());
-      for (Map.Entry<String, List<ColumnStatisticsObj>> entry : colStat.entrySet()) {
-        List<ColStatistics> partStat = new ArrayList<ColStatistics>(entry.getValue().size());
-        for (ColumnStatisticsObj statObj : entry.getValue()) {
-          ColStatistics cs = getColStatistics(statObj, tabName, statObj.getColName());
-          cs.setTableAlias(colToTabAlias.get(cs.getColumnName()));
-          partStat.add(cs);
-        }
-        stats.put(entry.getKey(), partStat);
-      }
-    } catch (HiveException e) {
-      LOG.error("Failed to retrieve partitions statistics: ", e);
-      stats = null;
+  private static List<ColStatistics> convertColStats(List<ColumnStatisticsObj> colStats, String tabName,
+    Map<String,String> colToTabAlias) {
+    List<ColStatistics> stats = new ArrayList<ColStatistics>(colStats.size());
+    for (ColumnStatisticsObj statObj : colStats) {
+      ColStatistics cs = getColStatistics(statObj, tabName, statObj.getColName());
+      cs.setTableAlias(colToTabAlias.get(cs.getColumnName()));
+      stats.add(cs);
     }
     return stats;
   }
-
   private static List<String> processNeededColumns(List<ColumnInfo> schema,
       List<String> neededColumns, Map<String, String> colToTabAlias) {
     for (ColumnInfo col : schema) {

Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java (original)
+++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java Tue Aug  5 07:23:02 2014
@@ -126,12 +126,13 @@ public class TestDbTxnManager {
   public void testSingleWriteTable() throws Exception {
     WriteEntity we = addTableOutput(WriteEntity.WriteType.INSERT);
     QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.openTxn("fred");
     txnMgr.acquireLocks(qp, ctx, "fred");
     List<HiveLock> locks = ctx.getHiveLocks();
     Assert.assertEquals(1, locks.size());
     Assert.assertEquals(1,
         TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
-    txnMgr.getLockManager().unlock(locks.get(0));
+    txnMgr.commitTxn();
     locks = txnMgr.getLockManager().getLocks(false, false);
     Assert.assertEquals(0, locks.size());
   }
@@ -144,12 +145,13 @@ public class TestDbTxnManager {
     addPartitionInput(t);
     WriteEntity we = addTableOutput(WriteEntity.WriteType.INSERT);
     QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.openTxn("fred");
     txnMgr.acquireLocks(qp, ctx, "fred");
     List<HiveLock> locks = ctx.getHiveLocks();
     Assert.assertEquals(1, locks.size());
     Assert.assertEquals(4,
         TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
-    txnMgr.getLockManager().unlock(locks.get(0));
+    txnMgr.commitTxn();
     locks = txnMgr.getLockManager().getLocks(false, false);
     Assert.assertEquals(0, locks.size());
   }
@@ -158,12 +160,13 @@ public class TestDbTxnManager {
   public void testUpdate() throws Exception {
     WriteEntity we = addTableOutput(WriteEntity.WriteType.UPDATE);
     QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.openTxn("fred");
     txnMgr.acquireLocks(qp, ctx, "fred");
     List<HiveLock> locks = ctx.getHiveLocks();
     Assert.assertEquals(1, locks.size());
     Assert.assertEquals(1,
         TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
-    txnMgr.getLockManager().unlock(locks.get(0));
+    txnMgr.commitTxn();
     locks = txnMgr.getLockManager().getLocks(false, false);
     Assert.assertEquals(0, locks.size());
   }
@@ -172,12 +175,28 @@ public class TestDbTxnManager {
   public void testDelete() throws Exception {
     WriteEntity we = addTableOutput(WriteEntity.WriteType.DELETE);
     QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.openTxn("fred");
     txnMgr.acquireLocks(qp, ctx, "fred");
     List<HiveLock> locks = ctx.getHiveLocks();
     Assert.assertEquals(1, locks.size());
     Assert.assertEquals(1,
         TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
-    txnMgr.getLockManager().unlock(locks.get(0));
+    txnMgr.commitTxn();
+    locks = txnMgr.getLockManager().getLocks(false, false);
+    Assert.assertEquals(0, locks.size());
+  }
+
+  @Test
+  public void testRollback() throws Exception {
+    WriteEntity we = addTableOutput(WriteEntity.WriteType.DELETE);
+    QueryPlan qp = new MockQueryPlan(this);
+    txnMgr.openTxn("fred");
+    txnMgr.acquireLocks(qp, ctx, "fred");
+    List<HiveLock> locks = ctx.getHiveLocks();
+    Assert.assertEquals(1, locks.size());
+    Assert.assertEquals(1,
+        TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
+    txnMgr.rollbackTxn();
     locks = txnMgr.getLockManager().getLocks(false, false);
     Assert.assertEquals(0, locks.size());
   }

Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java (original)
+++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java Tue Aug  5 07:23:02 2014
@@ -509,6 +509,7 @@ public class TestHive extends TestCase {
 
   public void testHiveRefreshOnConfChange() throws Throwable{
     Hive prevHiveObj = Hive.get();
+    prevHiveObj.getDatabaseCurrent();
     Hive newHiveObj;
 
     //if HiveConf has not changed, same object should be returned
@@ -522,6 +523,7 @@ public class TestHive extends TestCase {
 
     //if HiveConf has changed, new object should be returned
     prevHiveObj = Hive.get();
+    prevHiveObj.getDatabaseCurrent();
     //change value of a metavar config param in new hive conf
     newHconf = new HiveConf(hiveConf);
     newHconf.setIntVar(ConfVars.METASTORETHRIFTCONNECTIONRETRIES,

Modified: hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_all_role.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_all_role.q?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_all_role.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_all_role.q Tue Aug  5 07:23:02 2014
@@ -1,4 +1,3 @@
-set hive.users.in.admin.role=hive_admin_user;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
 set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
 set user.name=hive_admin_user;

Modified: hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_default_role.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_default_role.q?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_default_role.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_default_role.q Tue Aug  5 07:23:02 2014
@@ -1,4 +1,3 @@
-set hive.users.in.admin.role=hive_admin_user;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
 set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
 set user.name=hive_admin_user;

Modified: hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_none_role.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_none_role.q?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_none_role.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_cannot_create_none_role.q Tue Aug  5 07:23:02 2014
@@ -1,4 +1,3 @@
-set hive.users.in.admin.role=hive_admin_user;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
 set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
 set user.name=hive_admin_user;

Modified: hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_caseinsensitivity.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_caseinsensitivity.q?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_caseinsensitivity.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_caseinsensitivity.q Tue Aug  5 07:23:02 2014
@@ -1,4 +1,3 @@
-set hive.users.in.admin.role=hive_admin_user;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
 set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
 set user.name=hive_admin_user;

Modified: hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_db_cascade.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_db_cascade.q?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_db_cascade.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_db_cascade.q Tue Aug  5 07:23:02 2014
@@ -1,4 +1,3 @@
-set hive.users.in.admin.role=hive_admin_user;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
 set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
 set hive.security.authorization.enabled=true;

Modified: hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_db_empty.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_db_empty.q?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_db_empty.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_db_empty.q Tue Aug  5 07:23:02 2014
@@ -1,4 +1,3 @@
-set hive.users.in.admin.role=hive_admin_user;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
 set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
 set hive.security.authorization.enabled=true;

Modified: hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_role_no_admin.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_role_no_admin.q?rev=1615872&r1=1615871&r2=1615872&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_role_no_admin.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientnegative/authorization_drop_role_no_admin.q Tue Aug  5 07:23:02 2014
@@ -1,4 +1,3 @@
-set hive.users.in.admin.role=hive_admin_user;
 set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
 set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
 set user.name=hive_admin_user;