You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2017/10/14 14:42:52 UTC

[15/27] hive git commit: HIVE-17733 Move RawStore to standalone metastore. This closes #258 github PR. (Alan Gates, reviewed by Sergey Shelukhin, Vihang Karajgaonkar, and Zoltan Haindrich)

http://git-wip-us.apache.org/repos/asf/hive/blob/133d3c47/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java b/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java
deleted file mode 100644
index 54099f6..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hadoop.hive.metastore.columnstats.merge;
-
-import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Decimal;
-import org.apache.hadoop.hive.metastore.columnstats.cache.DecimalColumnStatsDataInspector;
-
-public class DecimalColumnStatsMerger extends ColumnStatsMerger {
-  @Override
-  public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) {
-    DecimalColumnStatsDataInspector aggregateData =
-        (DecimalColumnStatsDataInspector) aggregateColStats.getStatsData().getDecimalStats();
-    DecimalColumnStatsDataInspector newData =
-        (DecimalColumnStatsDataInspector) newColStats.getStatsData().getDecimalStats();
-    Decimal lowValue = aggregateData.getLowValue() != null
-        && (aggregateData.getLowValue().compareTo(newData.getLowValue()) > 0) ? aggregateData
-        .getLowValue() : newData.getLowValue();
-    aggregateData.setLowValue(lowValue);
-    Decimal highValue = aggregateData.getHighValue() != null
-        && (aggregateData.getHighValue().compareTo(newData.getHighValue()) > 0) ? aggregateData
-        .getHighValue() : newData.getHighValue();
-    aggregateData.setHighValue(highValue);
-    aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
-    if (aggregateData.getNdvEstimator() == null || newData.getNdvEstimator() == null) {
-      aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
-    } else {
-      NumDistinctValueEstimator oldEst = aggregateData.getNdvEstimator();
-      NumDistinctValueEstimator newEst = newData.getNdvEstimator();
-      long ndv = -1;
-      if (oldEst.canMerge(newEst)) {
-        oldEst.mergeEstimators(newEst);
-        ndv = oldEst.estimateNumDistinctValues();
-        aggregateData.setNdvEstimator(oldEst);
-      } else {
-        ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs());
-      }
-      LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of "
-          + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv);
-      aggregateData.setNumDVs(ndv);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/133d3c47/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java b/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java
deleted file mode 100644
index 817a55d..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hadoop.hive.metastore.columnstats.merge;
-
-import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.columnstats.cache.DoubleColumnStatsDataInspector;
-
-public class DoubleColumnStatsMerger extends ColumnStatsMerger {
-  @Override
-  public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) {
-    DoubleColumnStatsDataInspector aggregateData =
-        (DoubleColumnStatsDataInspector) aggregateColStats.getStatsData().getDoubleStats();
-    DoubleColumnStatsDataInspector newData =
-        (DoubleColumnStatsDataInspector) newColStats.getStatsData().getDoubleStats();
-    aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
-    aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue()));
-    aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
-    if (aggregateData.getNdvEstimator() == null || newData.getNdvEstimator() == null) {
-      aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
-    } else {
-      NumDistinctValueEstimator oldEst = aggregateData.getNdvEstimator();
-      NumDistinctValueEstimator newEst = newData.getNdvEstimator();
-      long ndv = -1;
-      if (oldEst.canMerge(newEst)) {
-        oldEst.mergeEstimators(newEst);
-        ndv = oldEst.estimateNumDistinctValues();
-        aggregateData.setNdvEstimator(oldEst);
-      } else {
-        ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs());
-      }
-      LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of "
-          + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv);
-      aggregateData.setNumDVs(ndv);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/133d3c47/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java b/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java
deleted file mode 100644
index dc048e0..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hadoop.hive.metastore.columnstats.merge;
-
-import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
-
-public class LongColumnStatsMerger extends ColumnStatsMerger {
-  @Override
-  public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) {
-    LongColumnStatsDataInspector aggregateData =
-        (LongColumnStatsDataInspector) aggregateColStats.getStatsData().getLongStats();
-    LongColumnStatsDataInspector newData =
-        (LongColumnStatsDataInspector) newColStats.getStatsData().getLongStats();
-    aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
-    aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue()));
-    aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
-    if (aggregateData.getNdvEstimator() == null || newData.getNdvEstimator() == null) {
-      aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
-    } else {
-      NumDistinctValueEstimator oldEst = aggregateData.getNdvEstimator();
-      NumDistinctValueEstimator newEst = newData.getNdvEstimator();
-      long ndv = -1;
-      if (oldEst.canMerge(newEst)) {
-        oldEst.mergeEstimators(newEst);
-        ndv = oldEst.estimateNumDistinctValues();
-        aggregateData.setNdvEstimator(oldEst);
-      } else {
-        ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs());
-      }
-      LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of "
-          + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv);
-      aggregateData.setNumDVs(ndv);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/133d3c47/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java b/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java
deleted file mode 100644
index e353b8f..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hadoop.hive.metastore.columnstats.merge;
-
-import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector;
-
-public class StringColumnStatsMerger extends ColumnStatsMerger {
-  @Override
-  public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) {
-    StringColumnStatsDataInspector aggregateData =
-        (StringColumnStatsDataInspector) aggregateColStats.getStatsData().getStringStats();
-    StringColumnStatsDataInspector newData =
-        (StringColumnStatsDataInspector) newColStats.getStatsData().getStringStats();
-    aggregateData.setMaxColLen(Math.max(aggregateData.getMaxColLen(), newData.getMaxColLen()));
-    aggregateData.setAvgColLen(Math.max(aggregateData.getAvgColLen(), newData.getAvgColLen()));
-    aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
-    if (aggregateData.getNdvEstimator() == null || newData.getNdvEstimator() == null) {
-      aggregateData.setNumDVs(Math.max(aggregateData.getNumDVs(), newData.getNumDVs()));
-    } else {
-      NumDistinctValueEstimator oldEst = aggregateData.getNdvEstimator();
-      NumDistinctValueEstimator newEst = newData.getNdvEstimator();
-      long ndv = -1;
-      if (oldEst.canMerge(newEst)) {
-        oldEst.mergeEstimators(newEst);
-        ndv = oldEst.estimateNumDistinctValues();
-        aggregateData.setNdvEstimator(oldEst);
-      } else {
-        ndv = Math.max(aggregateData.getNumDVs(), newData.getNumDVs());
-      }
-      LOG.debug("Use bitvector to merge column " + aggregateColStats.getColName() + "'s ndvs of "
-          + aggregateData.getNumDVs() + " and " + newData.getNumDVs() + " to be " + ndv);
-      aggregateData.setNumDVs(ndv);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/133d3c47/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java
deleted file mode 100644
index d427fef..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore.hbase;
-
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.List;
-
-public interface MetadataStore {
-  /**
-   * @param fileIds file ID list.
-   * @param result The ref parameter, used to return the serialized file metadata.
-   */
-  void getFileMetadata(List<Long> fileIds, ByteBuffer[] result) throws IOException;
-
-  /**
-   * @param fileIds file ID list.
-   * @param metadataBuffers Serialized file metadata, one per file ID.
-   * @param addedCols The column names for additional columns created by file-format-specific
-   *                  metadata handler, to be stored in the cache.
-   * @param addedVals The values for addedCols; one value per file ID per added column.
-   */
-  void storeFileMetadata(List<Long> fileIds, List<ByteBuffer> metadataBuffers,
-      ByteBuffer[] addedCols, ByteBuffer[][] addedVals) throws IOException, InterruptedException;
-
-  /**
-   * @param fileId The file ID.
-   * @param metadata Serialized file metadata.
-   * @param addedCols The column names for additional columns created by file-format-specific
-   *                  metadata handler, to be stored in the cache.
-   * @param addedVals The values for addedCols; one value per added column.
-   */
-  void storeFileMetadata(long fileId, ByteBuffer metadata, ByteBuffer[] addedCols,
-      ByteBuffer[] addedVals) throws IOException, InterruptedException;
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/133d3c47/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
deleted file mode 100644
index 12773ac..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
+++ /dev/null
@@ -1,605 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.parser;
-
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-import java.util.Stack;
-
-import org.antlr.runtime.ANTLRStringStream;
-import org.antlr.runtime.CharStream;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.ColumnType;
-import org.apache.hadoop.hive.metastore.HiveMetaStore;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Sets;
-
-/**
- * The Class representing the filter as a  binary tree. The tree has TreeNode's
- * at intermediate level and the leaf level nodes are of type LeafNode.
- */
-public class ExpressionTree {
-  /** The empty tree that can be returned for an empty filter. */
-  public static final ExpressionTree EMPTY_TREE = new ExpressionTree();
-
-  /** The logical operations supported. */
-  public enum LogicalOperator {
-    AND,
-    OR
-  }
-
-  /** The operators supported. */
-  public enum Operator {
-    EQUALS  ("=", "==", "="),
-    GREATERTHAN  (">"),
-    LESSTHAN  ("<"),
-    LESSTHANOREQUALTO ("<="),
-    GREATERTHANOREQUALTO (">="),
-    LIKE ("LIKE", "matches", "like"),
-    NOTEQUALS2 ("!=", "!=", "<>"),
-    NOTEQUALS ("<>", "!=", "<>");
-
-    private final String op;
-    private final String jdoOp;
-    private final String sqlOp;
-
-    // private constructor
-    private Operator(String op){
-      this.op = op;
-      this.jdoOp = op;
-      this.sqlOp = op;
-    }
-
-    private Operator(String op, String jdoOp, String sqlOp){
-      this.op = op;
-      this.jdoOp = jdoOp;
-      this.sqlOp = sqlOp;
-    }
-
-    public String getOp() {
-      return op;
-    }
-
-    public String getJdoOp() {
-      return jdoOp;
-    }
-
-    public String getSqlOp() {
-      return sqlOp;
-    }
-
-    public static Operator fromString(String inputOperator) {
-      for(Operator op : Operator.values()) {
-        if(op.getOp().equals(inputOperator)){
-          return op;
-        }
-      }
-
-      throw new Error("Invalid value " + inputOperator +
-          " for " + Operator.class.getSimpleName());
-    }
-
-    @Override
-    public String toString() {
-      return op;
-    }
-
-  }
-
-  /**
-   * Depth first traversal of ExpressionTree.
-   * The users should override the subset of methods to do their stuff.
-   */
-  public static class TreeVisitor {
-    private void visit(TreeNode node) throws MetaException {
-      if (shouldStop()) return;
-      assert node != null && node.getLhs() != null && node.getRhs() != null;
-      beginTreeNode(node);
-      node.lhs.accept(this);
-      midTreeNode(node);
-      node.rhs.accept(this);
-      endTreeNode(node);
-    }
-
-    protected void beginTreeNode(TreeNode node) throws MetaException {}
-    protected void midTreeNode(TreeNode node) throws MetaException {}
-    protected void endTreeNode(TreeNode node) throws MetaException {}
-    protected void visit(LeafNode node) throws MetaException {}
-    protected boolean shouldStop() {
-      return false;
-    }
-  }
-
-  /**
-   * Helper class that wraps the stringbuilder used to build the filter over the tree,
-   * as well as error propagation in two modes - expect errors, i.e. filter might as well
-   * be unbuildable and that's not a failure condition; or don't expect errors, i.e. filter
-   * must be buildable.
-   */
-  public static class FilterBuilder {
-    private final StringBuilder result = new StringBuilder();
-    private String errorMessage = null;
-    private boolean expectNoErrors = false;
-
-    public FilterBuilder(boolean expectNoErrors) {
-      this.expectNoErrors = expectNoErrors;
-    }
-
-    public String getFilter() throws MetaException {
-      assert errorMessage == null;
-      if (errorMessage != null) {
-        throw new MetaException("Trying to get result after error: " + errorMessage);
-      }
-      return result.toString();
-    }
-
-    @Override
-    public String toString() {
-      try {
-        return getFilter();
-      } catch (MetaException ex) {
-        throw new RuntimeException(ex);
-      }
-    }
-
-    public String getErrorMessage() {
-      return errorMessage;
-    }
-
-    public boolean hasError() {
-      return errorMessage != null;
-    }
-
-    public FilterBuilder append(String filterPart) {
-      this.result.append(filterPart);
-      return this;
-    }
-
-    public void setError(String errorMessage) throws MetaException {
-      this.errorMessage = errorMessage;
-      if (expectNoErrors) {
-        throw new MetaException(errorMessage);
-      }
-    }
-  }
-
-  /**
-   * The Class representing a Node in the ExpressionTree.
-   */
-  public static class TreeNode {
-    private TreeNode lhs;
-    private LogicalOperator andOr;
-    private TreeNode rhs;
-
-    public TreeNode() {
-    }
-
-    public TreeNode(TreeNode lhs, LogicalOperator andOr, TreeNode rhs) {
-      this.lhs = lhs;
-      this.andOr = andOr;
-      this.rhs = rhs;
-    }
-
-    public TreeNode getLhs() {
-      return lhs;
-    }
-
-    public LogicalOperator getAndOr() {
-      return andOr;
-    }
-
-    public TreeNode getRhs() {
-      return rhs;
-    }
-
-    /** Double dispatch for TreeVisitor. */
-    protected void accept(TreeVisitor visitor) throws MetaException {
-      visitor.visit(this);
-    }
-
-    /**
-     * Generates a JDO filter statement
-     * @param table
-     *        The table on which the filter is applied.  If table is not null,
-     *        then this method generates a JDO statement to get all partitions
-     *        of the table that match the filter.
-     *        If table is null, then this method generates a JDO statement to get all
-     *        tables that match the filter.
-     * @param params
-     *        A map of parameter key to values for the filter statement.
-     * @param filterBuffer The filter builder that is used to build filter.
-     * @throws MetaException
-     */
-    public void generateJDOFilter(Configuration conf, Table table,
-        Map<String, Object> params, FilterBuilder filterBuffer) throws MetaException {
-      if (filterBuffer.hasError()) return;
-      if (lhs != null) {
-        filterBuffer.append (" (");
-        lhs.generateJDOFilter(conf, table, params, filterBuffer);
-
-        if (rhs != null) {
-          if( andOr == LogicalOperator.AND ) {
-            filterBuffer.append(" && ");
-          } else {
-            filterBuffer.append(" || ");
-          }
-
-          rhs.generateJDOFilter(conf, table, params, filterBuffer);
-        }
-        filterBuffer.append (") ");
-      }
-    }
-  }
-
-  /**
-   * The Class representing the leaf level nodes in the ExpressionTree.
-   */
-  public static class LeafNode extends TreeNode {
-    public String keyName;
-    public Operator operator;
-    /** Constant expression side of the operator. Can currently be a String or a Long. */
-    public Object value;
-    public boolean isReverseOrder = false;
-    private static final String PARAM_PREFIX = "hive_filter_param_";
-
-    @Override
-    protected void accept(TreeVisitor visitor) throws MetaException {
-      visitor.visit(this);
-    }
-
-    @Override
-    public void generateJDOFilter(Configuration conf, Table table, Map<String, Object> params,
-        FilterBuilder filterBuilder) throws MetaException {
-      if (table != null) {
-        generateJDOFilterOverPartitions(conf, table, params, filterBuilder);
-      } else {
-        generateJDOFilterOverTables(params, filterBuilder);
-      }
-    }
-
-    //can only support "=" and "!=" for now, because our JDO lib is buggy when
-    // using objects from map.get()
-    private static final Set<Operator> TABLE_FILTER_OPS = Sets.newHashSet(
-        Operator.EQUALS, Operator.NOTEQUALS, Operator.NOTEQUALS2, Operator.LIKE);
-
-    private void generateJDOFilterOverTables(Map<String, Object> params,
-        FilterBuilder filterBuilder) throws MetaException {
-      if (keyName.equals(hive_metastoreConstants.HIVE_FILTER_FIELD_OWNER)) {
-        keyName = "this.owner";
-      } else if (keyName.equals(hive_metastoreConstants.HIVE_FILTER_FIELD_LAST_ACCESS)) {
-        //lastAccessTime expects an integer, so we cannot use the "like operator"
-        if (operator == Operator.LIKE) {
-          filterBuilder.setError("Like is not supported for HIVE_FILTER_FIELD_LAST_ACCESS");
-          return;
-        }
-        keyName = "this.lastAccessTime";
-      } else if (keyName.startsWith(hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS)) {
-        if (!TABLE_FILTER_OPS.contains(operator)) {
-          filterBuilder.setError("Only " + TABLE_FILTER_OPS + " are supported " +
-            "operators for HIVE_FILTER_FIELD_PARAMS");
-          return;
-        }
-        String paramKeyName = keyName.substring(hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS.length());
-        keyName = "this.parameters.get(\"" + paramKeyName + "\")";
-        //value is persisted as a string in the db, so make sure it's a string here
-        // in case we get a long.
-        value = value.toString();
-      } else {
-        filterBuilder.setError("Invalid key name in filter.  " +
-          "Use constants from org.apache.hadoop.hive.metastore.api");
-        return;
-      }
-      generateJDOFilterGeneral(params, filterBuilder);
-    }
-
-    /**
-     * Generates a general filter.  Given a map of <key, value>,
-     * generates a statement of the form:
-     * key1 operator value2 (&& | || ) key2 operator value2 ...
-     *
-     * Currently supported types for value are String and Long.
-     * The LIKE operator for Longs is unsupported.
-     */
-    private void generateJDOFilterGeneral(Map<String, Object> params,
-        FilterBuilder filterBuilder) throws MetaException {
-      String paramName = PARAM_PREFIX + params.size();
-      params.put(paramName, value);
-
-      if (isReverseOrder) {
-        if (operator == Operator.LIKE) {
-          filterBuilder.setError("Value should be on the RHS for LIKE operator : " +
-              "Key <" + keyName + ">");
-        } else {
-          filterBuilder.append(paramName + " " + operator.getJdoOp() + " " + keyName);
-        }
-      } else {
-        if (operator == Operator.LIKE) {
-          filterBuilder.append(" " + keyName + "." + operator.getJdoOp() + "(" + paramName + ") ");
-        } else {
-          filterBuilder.append(" " + keyName + " " + operator.getJdoOp() + " " + paramName);
-        }
-      }
-    }
-
-    private void generateJDOFilterOverPartitions(Configuration conf, Table table,
-        Map<String, Object> params,  FilterBuilder filterBuilder) throws MetaException {
-      int partitionColumnCount = table.getPartitionKeys().size();
-      int partitionColumnIndex = getPartColIndexForFilter(table, filterBuilder);
-      if (filterBuilder.hasError()) return;
-
-      boolean canPushDownIntegral =
-          HiveConf.getBoolVar(conf, HiveConf.ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN);
-      String valueAsString = getJdoFilterPushdownParam(
-          table, partitionColumnIndex, filterBuilder, canPushDownIntegral);
-      if (filterBuilder.hasError()) return;
-
-      String paramName = PARAM_PREFIX + params.size();
-      params.put(paramName, valueAsString);
-
-      boolean isOpEquals = operator == Operator.EQUALS;
-      if (isOpEquals || operator == Operator.NOTEQUALS || operator == Operator.NOTEQUALS2) {
-        makeFilterForEquals(keyName, valueAsString, paramName, params,
-            partitionColumnIndex, partitionColumnCount, isOpEquals, filterBuilder);
-        return;
-      }
-      //get the value for a partition key form MPartition.values (PARTITION_KEY_VALUES)
-      String valString = "values.get(" + partitionColumnIndex + ")";
-
-      if (operator == Operator.LIKE) {
-        if (isReverseOrder) {
-          // For LIKE, the value should be on the RHS.
-          filterBuilder.setError(
-              "Value should be on the RHS for LIKE operator : Key <" + keyName + ">");
-        }
-        // TODO: in all likelihood, this won't actually work. Keep it for backward compat.
-        filterBuilder.append(" " + valString + "." + operator.getJdoOp() + "(" + paramName + ") ");
-      } else {
-        filterBuilder.append(isReverseOrder
-            ? paramName + " " + operator.getJdoOp() + " " + valString
-            : " " + valString + " " + operator.getJdoOp() + " " + paramName);
-      }
-    }
-
-    /**
-     * @return true iff filter pushdown for this operator can be done for integral types.
-     */
-    public boolean canJdoUseStringsWithIntegral() {
-      return (operator == Operator.EQUALS)
-          || (operator == Operator.NOTEQUALS)
-          || (operator == Operator.NOTEQUALS2);
-    }
-
-    /**
-     * Get partition column index in the table partition column list that
-     * corresponds to the key that is being filtered on by this tree node.
-     * @param table The table.
-     * @param filterBuilder filter builder used to report error, if any.
-     * @return The index.
-     */
-    public int getPartColIndexForFilter(
-        Table table, FilterBuilder filterBuilder) throws MetaException {
-      int partitionColumnIndex;
-      assert (table.getPartitionKeys().size() > 0);
-      for (partitionColumnIndex = 0; partitionColumnIndex < table.getPartitionKeys().size();
-          ++partitionColumnIndex) {
-        if (table.getPartitionKeys().get(partitionColumnIndex).getName().
-            equalsIgnoreCase(keyName)) {
-          break;
-        }
-      }
-      if( partitionColumnIndex == table.getPartitionKeys().size()) {
-        filterBuilder.setError("Specified key <" + keyName +
-            "> is not a partitioning key for the table");
-        return -1;
-      }
-
-      return partitionColumnIndex;
-    }
-
-    /**
-     * Validates and gets the query parameter for JDO filter pushdown based on the column
-     * and the constant stored in this node.
-     * @param table The table.
-     * @param partColIndex The index of the column to check.
-     * @param filterBuilder filter builder used to report error, if any.
-     * @return The parameter string.
-     */
-    private String getJdoFilterPushdownParam(Table table, int partColIndex,
-        FilterBuilder filterBuilder, boolean canPushDownIntegral) throws MetaException {
-      boolean isIntegralSupported = canPushDownIntegral && canJdoUseStringsWithIntegral();
-      String colType = table.getPartitionKeys().get(partColIndex).getType();
-      // Can only support partitions whose types are string, or maybe integers
-      if (!colType.equals(ColumnType.STRING_TYPE_NAME)
-          && (!isIntegralSupported || !ColumnType.IntegralTypes.contains(colType))) {
-        filterBuilder.setError("Filtering is supported only on partition keys of type " +
-            "string" + (isIntegralSupported ? ", or integral types" : ""));
-        return null;
-      }
-
-      // There's no support for date cast in JDO. Let's convert it to string; the date
-      // columns have been excluded above, so it will either compare w/string or fail.
-      Object val = value;
-      if (value instanceof Date) {
-        val = HiveMetaStore.PARTITION_DATE_FORMAT.get().format((Date)value);
-      }
-      boolean isStringValue = val instanceof String;
-      if (!isStringValue && (!isIntegralSupported || !(val instanceof Long))) {
-        filterBuilder.setError("Filtering is supported only on partition keys of type " +
-            "string" + (isIntegralSupported ? ", or integral types" : ""));
-        return null;
-      }
-
-      return isStringValue ? (String)val : Long.toString((Long)val);
-    }
-  }
-
-  public void accept(TreeVisitor treeVisitor) throws MetaException {
-    if (this.root != null) {
-      this.root.accept(treeVisitor);
-    }
-  }
-
-  /**
-   * For equals and not-equals, we can make the JDO query much faster by filtering
-   * based on the partition name. For a condition like ds="2010-10-01", we can see
-   * if there are any partitions with a name that contains the substring "ds=2010-10-01/"
-   * False matches aren't possible since "=" is escaped for partition names
-   * and the trailing '/' ensures that we won't get a match with ds=2010-10-011
-   * Note that filters on integral type equality also work correctly by virtue of
-   * comparing them as part of ds=1234 string.
-   *
-   * Two cases to keep in mind: Case with only one partition column (no '/'s)
-   * Case where the partition key column is at the end of the name. (no
-   * tailing '/')
-   *
-   * @param keyName name of the partition column e.g. ds.
-   * @param value The value to compare to.
-   * @param paramName name of the parameter to use for JDOQL.
-   * @param params a map from the parameter name to their values.
-   * @param keyPos The index of the requisite partition column in the list of such columns.
-   * @param keyCount Partition column count for the table.
-   * @param isEq whether the operator is equals, or not-equals.
-   * @param fltr Filter builder used to append the filter, or report errors.
-   */
-  private static void makeFilterForEquals(String keyName, String value, String paramName,
-      Map<String, Object> params, int keyPos, int keyCount, boolean isEq, FilterBuilder fltr)
-          throws MetaException {
-    Map<String, String> partKeyToVal = new HashMap<String, String>();
-    partKeyToVal.put(keyName, value);
-    // If a partition has multiple partition keys, we make the assumption that
-    // makePartName with one key will return a substring of the name made
-    // with both all the keys.
-    String escapedNameFragment = Warehouse.makePartName(partKeyToVal, false);
-    if (keyCount == 1) {
-      // Case where this is no other partition columns
-      params.put(paramName, escapedNameFragment);
-      fltr.append("partitionName ").append(isEq ? "== " : "!= ").append(paramName);
-    } else if (keyPos + 1 == keyCount) {
-      // Case where the partition column is at the end of the name. There will
-      // be a leading '/' but no trailing '/'
-      params.put(paramName, "/" + escapedNameFragment);
-      fltr.append(isEq ? "" : "!").append("partitionName.endsWith(")
-        .append(paramName).append(")");
-    } else if (keyPos == 0) {
-      // Case where the partition column is at the beginning of the name. There will
-      // be a trailing '/' but no leading '/'
-      params.put(paramName, escapedNameFragment + "/");
-      fltr.append(isEq ? "" : "!").append("partitionName.startsWith(")
-        .append(paramName).append(")");
-    } else {
-      // Case where the partition column is in the middle of the name. There will
-      // be a leading '/' and an trailing '/'
-      params.put(paramName, "/" + escapedNameFragment + "/");
-      fltr.append("partitionName.indexOf(").append(paramName).append(")")
-        .append(isEq ? ">= 0" : "< 0");
-    }
-  }
-
-  /**
-   * The root node for the tree.
-   */
-  private TreeNode root = null;
-
-  /**
-   * The node stack used to keep track of the tree nodes during parsing.
-   */
-  private final Stack<TreeNode> nodeStack = new Stack<TreeNode>();
-
-  public TreeNode getRoot() {
-    return this.root;
-  }
-
-  @VisibleForTesting
-  public void setRootForTest(TreeNode tn) {
-    this.root = tn;
-  }
-
-
-  /**
-   * Adds a intermediate node of either type(AND/OR). Pops last two nodes from
-   * the stack and sets them as children of the new node and pushes itself
-   * onto the stack.
-   * @param andOr the operator type
-   */
-  public void addIntermediateNode(LogicalOperator andOr) {
-
-    TreeNode rhs = nodeStack.pop();
-    TreeNode lhs = nodeStack.pop();
-    TreeNode newNode = new TreeNode(lhs, andOr, rhs);
-    nodeStack.push(newNode);
-    root = newNode;
-  }
-
-  /**
-   * Adds a leaf node, pushes the new node onto the stack.
-   * @param newNode the new node
-   */
-  public void addLeafNode(LeafNode newNode) {
-    if( root == null ) {
-      root = newNode;
-    }
-    nodeStack.push(newNode);
-  }
-
-  /** Generate the JDOQL filter for the given expression tree
-   * @param table the table being queried
-   * @param params the input map which is updated with the
-   *     the parameterized values. Keys are the parameter names and values
-   *     are the parameter values
-   * @param filterBuilder the filter builder to append to.
-   */
-  public void generateJDOFilterFragment(Configuration conf, Table table,
-      Map<String, Object> params, FilterBuilder filterBuilder) throws MetaException {
-    if (root == null) {
-      return;
-    }
-
-    filterBuilder.append(" && ( ");
-    root.generateJDOFilter(conf, table, params, filterBuilder);
-    filterBuilder.append(" )");
-  }
-
-  /** Case insensitive ANTLR string stream */
-  public static class ANTLRNoCaseStringStream extends ANTLRStringStream {
-    public ANTLRNoCaseStringStream (String input) {
-      super(input);
-    }
-
-    @Override
-    public int LA (int i) {
-      int returnChar = super.LA (i);
-
-      if (returnChar == CharStream.EOF) {
-        return returnChar;
-      }
-      else if (returnChar == 0) {
-        return returnChar;
-      }
-
-      return Character.toUpperCase ((char) returnChar);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/133d3c47/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
deleted file mode 100644
index 81111a0..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g
+++ /dev/null
@@ -1,484 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-grammar Filter;
-
-options
-{
-  k=3;
-}
-
-
-// Package headers
-@header {
-package org.apache.hadoop.hive.metastore.parser;
-
-import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
-import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
-import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
-import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator;
-}
-
-@lexer::header {
-package org.apache.hadoop.hive.metastore.parser;
-
-import java.sql.Date;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-}
-
-@lexer::members {
-  public String errorMsg;
-
-  private static final Pattern datePattern = Pattern.compile(".*(\\d\\d\\d\\d-\\d\\d-\\d\\d).*");
-  private static final ThreadLocal<SimpleDateFormat> dateFormat =
-       new ThreadLocal<SimpleDateFormat>() {
-    @Override
-    protected SimpleDateFormat initialValue() {
-      SimpleDateFormat val = new SimpleDateFormat("yyyy-MM-dd");
-      val.setLenient(false); // Without this, 2020-20-20 becomes 2021-08-20.
-      return val;
-    };
-  };
-
-  public static java.sql.Date ExtractDate (String input) {
-    Matcher m = datePattern.matcher(input);
-    if (!m.matches()) {
-      return null;
-    }
-    try {
-      return new java.sql.Date(dateFormat.get().parse(m.group(1)).getTime());
-    } catch (ParseException pe) {
-      return null;
-    }
-  }
-
-  @Override
-  public void emitErrorMessage(String msg) {
-    // save for caller to detect invalid filter
-    errorMsg = msg;
-  }
-}
-
-@members {
-  public ExpressionTree tree = new ExpressionTree();
-
-  public static String TrimQuotes (String input) {
-    if (input.length () > 1) {
-      if ((input.charAt (0) == '"' && input.charAt (input.length () - 1) == '"')
-        || (input.charAt (0) == '\'' && input.charAt (input.length () - 1) == '\'')) {
-        return input.substring (1, input.length () - 1);
-      }
-    }
-    return input;
-  }
-}
-
-@rulecatch{
-  catch (RecognitionException e){
-    throw e;
-  }
-}
-
-//main rule
-filter
-    :
-    orExpression 
-    ;
-
-orExpression
-    :
-    andExpression (KW_OR andExpression { tree.addIntermediateNode(LogicalOperator.OR); } )*
-    ;
-
-andExpression
-    :
-    expression (KW_AND expression  { tree.addIntermediateNode(LogicalOperator.AND); } )*
-    ;
-
-expression
-    :
-    LPAREN orExpression RPAREN
-    |
-    operatorExpression
-    ;
-
-operatorExpression
-    :
-    betweenExpression
-    |
-    inExpression
-    |
-    multiColInExpression
-    |
-    binOpExpression
-    ;
-
-binOpExpression
-@init {
-    boolean isReverseOrder = false;
-    Object val = null;
-}
-    :
-    (
-       (
-         (key = Identifier op = operator  value = DateLiteral)
-         |
-         (value = DateLiteral  op = operator key = Identifier) { isReverseOrder = true; }
-       ) { val = FilterLexer.ExtractDate(value.getText()); }
-       |
-       (
-         (key = Identifier op = operator  value = StringLiteral)
-         |
-         (value = StringLiteral  op = operator key = Identifier) { isReverseOrder = true; }
-       ) { val = TrimQuotes(value.getText()); }
-       |
-       (
-         (key = Identifier op = operator value = IntegralLiteral)
-         |
-         (value = IntegralLiteral op = operator key = Identifier) { isReverseOrder = true; }
-       ) { val = Long.parseLong(value.getText()); }
-    )
-    {
-        LeafNode node = new LeafNode();
-        node.keyName = key.getText();
-        node.value = val;
-        node.operator = op;
-        node.isReverseOrder = isReverseOrder;
-
-        tree.addLeafNode(node);
-    };
-
-operator returns [Operator op]
-   :
-   t = (LESSTHAN | LESSTHANOREQUALTO | GREATERTHAN | GREATERTHANOREQUALTO | KW_LIKE | EQUAL | NOTEQUAL)
-   {
-      $op = Operator.fromString(t.getText().toUpperCase());
-   };
-
-betweenExpression
-@init {
-    Object leftV = null;
-    Object rightV = null;
-    boolean isPositive = true;
-}
-    :
-    (
-       key = Identifier (KW_NOT { isPositive = false; } )? BETWEEN
-       (
-         (left = DateLiteral KW_AND right = DateLiteral) {
-            leftV = FilterLexer.ExtractDate(left.getText());
-            rightV = FilterLexer.ExtractDate(right.getText());
-         }
-         |
-         (left = StringLiteral KW_AND right = StringLiteral) { leftV = TrimQuotes(left.getText());
-            rightV = TrimQuotes(right.getText());
-         }
-         |
-         (left = IntegralLiteral KW_AND right = IntegralLiteral) { leftV = Long.parseLong(left.getText());
-            rightV = Long.parseLong(right.getText());
-         }
-       )
-    )
-    {
-        LeafNode leftNode = new LeafNode(), rightNode = new LeafNode();
-        leftNode.keyName = rightNode.keyName = key.getText();
-        leftNode.value = leftV;
-        rightNode.value = rightV;
-        leftNode.operator = isPositive ? Operator.GREATERTHANOREQUALTO : Operator.LESSTHAN;
-        rightNode.operator = isPositive ? Operator.LESSTHANOREQUALTO : Operator.GREATERTHAN;
-        tree.addLeafNode(leftNode);
-        tree.addLeafNode(rightNode);
-        tree.addIntermediateNode(isPositive ? LogicalOperator.AND : LogicalOperator.OR);
-    };
-
-inExpression
-@init {
-    List constants = new ArrayList();
-    Object constantV = null;
-    boolean isPositive = true;
-}
-    :
-    (
-        LPAREN key = Identifier RPAREN ( KW_NOT { isPositive = false; } )? IN LPAREN
-        (
-            (
-                constant = DateLiteral
-                {
-                    constantV = FilterLexer.ExtractDate(constant.getText());
-                    constants.add(constantV);
-                }
-                (
-                    COMMA constant = DateLiteral
-                    {
-                        constantV = FilterLexer.ExtractDate(constant.getText());
-                        constants.add(constantV);
-                    }
-                )*
-            )
-            |
-            (
-                constant = StringLiteral
-                {
-                    constantV = TrimQuotes(constant.getText());
-                    constants.add(constantV);
-                }
-                (
-                    COMMA constant = StringLiteral
-                    {
-                        constantV = TrimQuotes(constant.getText());
-                        constants.add(constantV);
-                    }
-                )*
-            )
-            |
-            (
-                constant = IntegralLiteral
-                {
-                    constantV = Long.parseLong(constant.getText());
-                    constants.add(constantV);
-                }
-                (
-                    COMMA constant = IntegralLiteral
-                    {
-                        constantV = Long.parseLong(constant.getText());
-                        constants.add(constantV);
-                    }
-                )*
-            )
-        ) RPAREN
-    )
-    {
-        for (int i = 0; i < constants.size(); i++) {
-            Object value = constants.get(i);
-            LeafNode leaf = new LeafNode();
-            leaf.keyName = key.getText();
-            leaf.value = value;
-            leaf.operator = isPositive ? Operator.EQUALS : Operator.NOTEQUALS2;
-            tree.addLeafNode(leaf);
-            if (i != 0) {
-                tree.addIntermediateNode(isPositive ? LogicalOperator.OR : LogicalOperator.AND);
-            }
-        }
-    };
-
-multiColInExpression
-@init {
-    List<String> keyNames = new ArrayList<String>();
-    List constants = new ArrayList();
-    List partialConstants;
-    String keyV = null;
-    Object constantV = null;
-    boolean isPositive = true;
-}
-    :
-    (
-        LPAREN
-        (
-            KW_STRUCT LPAREN key = Identifier
-            {
-                keyV = key.getText();
-                keyNames.add(keyV);
-            }
-            (
-                COMMA key = Identifier
-                {
-                    keyV = key.getText();
-                    keyNames.add(keyV);
-                }
-            )* RPAREN
-        ) RPAREN ( KW_NOT { isPositive = false; } )? IN LPAREN KW_CONST KW_STRUCT LPAREN
-        {
-            partialConstants = new ArrayList();
-        }
-        (
-            constant = DateLiteral
-            {
-                constantV = FilterLexer.ExtractDate(constant.getText());
-                partialConstants.add(constantV);
-            }
-            | constant = StringLiteral
-            {
-                constantV = TrimQuotes(constant.getText());
-                partialConstants.add(constantV);
-            }
-            | constant = IntegralLiteral
-            {
-                constantV = Long.parseLong(constant.getText());
-                partialConstants.add(constantV);
-            }
-        )
-        (
-            COMMA
-            (
-                constant = DateLiteral
-                {
-                    constantV = FilterLexer.ExtractDate(constant.getText());
-                    partialConstants.add(constantV);
-                }
-                | constant = StringLiteral
-                {
-                    constantV = TrimQuotes(constant.getText());
-                    partialConstants.add(constantV);
-                }
-                | constant = IntegralLiteral
-                {
-                    constantV = Long.parseLong(constant.getText());
-                    partialConstants.add(constantV);
-                }
-            )
-        )*
-        {
-            constants.add(partialConstants);
-        }
-        RPAREN
-        (
-            COMMA KW_CONST KW_STRUCT LPAREN
-            {
-                partialConstants = new ArrayList();
-            }
-            (
-                constant = DateLiteral
-                {
-                    constantV = FilterLexer.ExtractDate(constant.getText());
-                    partialConstants.add(constantV);
-                }
-                | constant = StringLiteral
-                {
-                    constantV = TrimQuotes(constant.getText());
-                    partialConstants.add(constantV);
-                }
-                | constant = IntegralLiteral
-                {
-                    constantV = Long.parseLong(constant.getText());
-                    partialConstants.add(constantV);
-                }
-            )
-            (
-                COMMA
-                (
-                    constant = DateLiteral
-                    {
-                        constantV = FilterLexer.ExtractDate(constant.getText());
-                        partialConstants.add(constantV);
-                    }
-                    | constant = StringLiteral
-                    {
-                        constantV = TrimQuotes(constant.getText());
-                        partialConstants.add(constantV);
-                    }
-                    | constant = IntegralLiteral
-                    {
-                        constantV = Long.parseLong(constant.getText());
-                        partialConstants.add(constantV);
-                    }
-                )
-            )*
-            {
-                constants.add(partialConstants);
-            }
-            RPAREN
-        )* RPAREN
-    )
-    {
-        for (int i = 0; i < constants.size(); i++) {
-            List list = (List) constants.get(i);
-            assert keyNames.size() == list.size();
-            for (int j=0; j < list.size(); j++) {
-                String keyName = keyNames.get(j);
-                Object value = list.get(j);
-                LeafNode leaf = new LeafNode();
-                leaf.keyName = keyName;
-                leaf.value = value;
-                leaf.operator = isPositive ? Operator.EQUALS : Operator.NOTEQUALS2;
-                tree.addLeafNode(leaf);
-                if (j != 0) {
-                    tree.addIntermediateNode(isPositive ? LogicalOperator.AND : LogicalOperator.OR);
-                }
-            }
-            if (i != 0) {
-                tree.addIntermediateNode(isPositive ? LogicalOperator.OR : LogicalOperator.AND);
-            }
-        }
-    };
-
-// Keywords
-KW_NOT : 'NOT';
-KW_AND : 'AND';
-KW_OR : 'OR';
-KW_LIKE : 'LIKE';
-KW_DATE : 'date';
-KW_CONST : 'CONST';
-KW_STRUCT : 'STRUCT';
-
-// Operators
-LPAREN : '(' ;
-RPAREN : ')' ;
-COMMA : ',' ;
-EQUAL : '=';
-NOTEQUAL : '<>' | '!=';
-LESSTHANOREQUALTO : '<=';
-LESSTHAN : '<';
-GREATERTHANOREQUALTO : '>=';
-GREATERTHAN : '>';
-BETWEEN : 'BETWEEN';
-IN : 'IN';
-
-// LITERALS
-fragment
-Letter
-    : 'a'..'z' | 'A'..'Z'
-    ;
-
-fragment
-Digit
-    :
-    '0'..'9'
-    ;
-
-fragment DateString
-    :
-    (Digit)(Digit)(Digit)(Digit) '-' (Digit)(Digit) '-' (Digit)(Digit)
-    ;
-
-/* When I figure out how to make lexer backtrack after validating predicate, dates would be able 
-to support single quotes [( '\'' DateString '\'' ) |]. For now, what we do instead is have a hack
-to parse the string in metastore code from StringLiteral. */
-DateLiteral
-    :
-    KW_DATE? DateString { ExtractDate(getText()) != null }?
-    ;
-
-StringLiteral
-    :
-    ( '\'' ( ~('\''|'\\') | ('\\' .) )* '\''
-    | '\"' ( ~('\"'|'\\') | ('\\' .) )* '\"'
-    )
-    ;
-
-IntegralLiteral
-    :
-    ('-')? (Digit)+
-    ;
-
-Identifier
-    :
-    (Letter | Digit) (Letter | Digit | '_')*
-    ;
-
-WS  :   (' '|'\r'|'\t'|'\n')+ { skip(); } ;

http://git-wip-us.apache.org/repos/asf/hive/blob/133d3c47/metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java
deleted file mode 100644
index 29f1f0b..0000000
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * This package implements the parser for parsing the string filter
- * for the listPartitionsByFilter API.
- */
-package org.apache.hadoop.hive.metastore.parser;

http://git-wip-us.apache.org/repos/asf/hive/blob/133d3c47/metastore/src/test/org/apache/hadoop/hive/metastore/TestDeadline.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestDeadline.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestDeadline.java
deleted file mode 100644
index ea11ed5..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestDeadline.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Test the Deadline class used for long running requests.
- */
-public class TestDeadline {
-
-  private static final Logger LOG = LoggerFactory.getLogger(TestDeadline.class);
-  private long timeout = 1000;
-  private long resetTimeout = 200;
-  private long duration = 500;
-  private boolean isFailed;
-  private String errorMsg;
-
-  @Test
-  public void testDeadline() throws Exception {
-    isFailed = false;
-    errorMsg = "";
-
-    Thread threadTimeout = new Thread(createRunnable());
-    threadTimeout.setDaemon(true);
-    threadTimeout.start();
-    threadTimeout.join(60000);
-
-    if (isFailed) {
-      Assert.fail(errorMsg);
-    }
-  }
-
-  private Runnable createRunnable() {
-    return new Runnable() {
-      @Override
-      public void run() {
-        Deadline.registerIfNot(timeout);
-        try {
-          // normal
-          start();
-          try {
-            Deadline.checkTimeout();
-          } catch (MetaException e) {
-            failInThread("should not timeout", e);
-            return;
-          }
-          Deadline.stopTimer();
-
-          // normal. Check stopTimer() works.
-          start();
-          try {
-            Deadline.checkTimeout();
-          } catch (MetaException e) {
-            failInThread("should not timeout", e);
-            return;
-          }
-          Deadline.stopTimer();
-
-          // reset
-          Deadline.resetTimeout(resetTimeout);
-
-          // timeout
-          start();
-          try {
-            Deadline.checkTimeout();
-            failInThread("should timeout.", null);
-            return;
-          } catch (MetaException e) {
-            if (e.getCause() instanceof DeadlineException) {
-              Deadline.clear();
-            } else {
-              failInThread("new MetaException failed.", e);
-              return;
-            }
-          }
-          if (Deadline.getCurrentDeadline() != null) {
-            failInThread("the threadlocal object should be removed after timeout.", null);
-          }
-        } catch (MetaException e) {
-          failInThread("error happens in start, end, or reset. Check the exception.", e);
-        }
-      }
-    };
-  }
-
-  private void start() throws MetaException {
-    Deadline.startTimer("test");
-    try {
-      Thread.sleep(duration);
-    } catch (InterruptedException e) {
-      failInThread("Odd. Sleeping thread is interrupted.", e);
-    }
-  }
-
-  private void failInThread(String msg, Exception e) {
-    isFailed = true;
-
-    if (e != null) {
-      LOG.error(msg, e);
-      errorMsg = msg + ": " + e.getMessage();
-    } else {
-      LOG.error(msg);
-      errorMsg = msg;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/133d3c47/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
deleted file mode 100644
index 67b798f..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-
-import com.codahale.metrics.Counter;
-import com.google.common.base.Supplier;
-import com.google.common.collect.ImmutableList;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor;
-import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.NotificationEvent;
-import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
-import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
-import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.messaging.EventMessage;
-import org.apache.hadoop.hive.metastore.metrics.Metrics;
-import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
-import org.apache.hadoop.hive.metastore.model.MNotificationLog;
-import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.jdo.Query;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.BrokenBarrierException;
-import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-public class TestObjectStore {
-  private ObjectStore objectStore = null;
-
-  private static final String DB1 = "testobjectstoredb1";
-  private static final String DB2 = "testobjectstoredb2";
-  private static final String TABLE1 = "testobjectstoretable1";
-  private static final String KEY1 = "testobjectstorekey1";
-  private static final String KEY2 = "testobjectstorekey2";
-  private static final String OWNER = "testobjectstoreowner";
-  private static final String USER1 = "testobjectstoreuser1";
-  private static final String ROLE1 = "testobjectstorerole1";
-  private static final String ROLE2 = "testobjectstorerole2";
-  private static final Logger LOG = LoggerFactory.getLogger(TestObjectStore.class.getName());
-
-  private static final class LongSupplier implements Supplier<Long> {
-    public long value = 0;
-
-    @Override
-    public Long get() {
-      return value;
-    }
-  }
-
-  public static class MockPartitionExpressionProxy implements PartitionExpressionProxy {
-    @Override
-    public String convertExprToFilter(byte[] expr) throws MetaException {
-      return null;
-    }
-
-    @Override
-    public boolean filterPartitionsByExpr(List<FieldSchema> partColumns,
-        byte[] expr, String defaultPartitionName, List<String> partitionNames)
-        throws MetaException {
-      return false;
-    }
-
-    @Override
-    public FileMetadataExprType getMetadataType(String inputFormat) {
-      return null;
-    }
-
-    @Override
-    public SearchArgument createSarg(byte[] expr) {
-      return null;
-    }
-
-    @Override
-    public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
-      return null;
-    }
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    HiveConf conf = new HiveConf();
-    conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, MockPartitionExpressionProxy.class.getName());
-
-    objectStore = new ObjectStore();
-    objectStore.setConf(conf);
-    dropAllStoreObjects(objectStore);
-  }
-
-  @After
-  public void tearDown() {
-  }
-
-  /**
-   * Test notification operations
-   */
-  @Test
-  public void testNotificationOps() throws InterruptedException {
-    final int NO_EVENT_ID = 0;
-    final int FIRST_EVENT_ID = 1;
-    final int SECOND_EVENT_ID = 2;
-
-    NotificationEvent event =
-        new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), "");
-    NotificationEventResponse eventResponse;
-    CurrentNotificationEventId eventId;
-
-    // Verify that there is no notifications available yet
-    eventId = objectStore.getCurrentNotificationEventId();
-    Assert.assertEquals(NO_EVENT_ID, eventId.getEventId());
-
-    // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID
-    objectStore.addNotificationEvent(event);
-    Assert.assertEquals(FIRST_EVENT_ID, event.getEventId());
-    objectStore.addNotificationEvent(event);
-    Assert.assertEquals(SECOND_EVENT_ID, event.getEventId());
-
-    // Verify that objectStore fetches the latest notification event ID
-    eventId = objectStore.getCurrentNotificationEventId();
-    Assert.assertEquals(SECOND_EVENT_ID, eventId.getEventId());
-
-    // Verify that getNextNotification() returns all events
-    eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
-    Assert.assertEquals(2, eventResponse.getEventsSize());
-    Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
-    Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId());
-
-    // Verify that getNextNotification(last) returns events after a specified event
-    eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID));
-    Assert.assertEquals(1, eventResponse.getEventsSize());
-    Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
-
-    // Verify that getNextNotification(last) returns zero events if there are no more notifications available
-    eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID));
-    Assert.assertEquals(0, eventResponse.getEventsSize());
-
-    // Verify that cleanNotificationEvents() cleans up all old notifications
-    Thread.sleep(1);
-    objectStore.cleanNotificationEvents(1);
-    eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
-    Assert.assertEquals(0, eventResponse.getEventsSize());
-  }
-
-  /**
-   * Test database operations
-   */
-  @Test
-  public void testDatabaseOps() throws MetaException, InvalidObjectException, NoSuchObjectException {
-    Database db1 = new Database(DB1, "description", "locationurl", null);
-    Database db2 = new Database(DB2, "description", "locationurl", null);
-    objectStore.createDatabase(db1);
-    objectStore.createDatabase(db2);
-
-    List<String> databases = objectStore.getAllDatabases();
-    LOG.info("databases: " + databases);
-    Assert.assertEquals(2, databases.size());
-    Assert.assertEquals(DB1, databases.get(0));
-    Assert.assertEquals(DB2, databases.get(1));
-
-    objectStore.dropDatabase(DB1);
-    databases = objectStore.getAllDatabases();
-    Assert.assertEquals(1, databases.size());
-    Assert.assertEquals(DB2, databases.get(0));
-
-    objectStore.dropDatabase(DB2);
-  }
-
-  /**
-   * Test table operations
-   */
-  @Test
-  public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
-    Database db1 = new Database(DB1, "description", "locationurl", null);
-    objectStore.createDatabase(db1);
-    StorageDescriptor sd1 = new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)),
-            "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null),
-            null, null, null);
-    HashMap<String, String> params = new HashMap<>();
-    params.put("EXTERNAL", "false");
-    Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE");
-    objectStore.createTable(tbl1);
-
-    List<String> tables = objectStore.getAllTables(DB1);
-    Assert.assertEquals(1, tables.size());
-    Assert.assertEquals(TABLE1, tables.get(0));
-
-    StorageDescriptor sd2 = new StorageDescriptor(ImmutableList.of(new FieldSchema("fk_col", "double", null)),
-            "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null),
-            null, null, null);
-    Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null, "MANAGED_TABLE");
-    objectStore.alterTable(DB1, TABLE1, newTbl1);
-    tables = objectStore.getTables(DB1, "new*");
-    Assert.assertEquals(1, tables.size());
-    Assert.assertEquals("new" + TABLE1, tables.get(0));
-
-    objectStore.createTable(tbl1);
-    tables = objectStore.getAllTables(DB1);
-    Assert.assertEquals(2, tables.size());
-
-    List<SQLForeignKey> foreignKeys = objectStore.getForeignKeys(DB1, TABLE1, null, null);
-    Assert.assertEquals(0, foreignKeys.size());
-
-    SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1,
-            "pk_const_1", false, false, false);
-    objectStore.addPrimaryKeys(ImmutableList.of(pk));
-    SQLForeignKey fk = new SQLForeignKey(DB1, TABLE1, "pk_col",
-            DB1, "new" + TABLE1, "fk_col", 1,
-            0, 0, "fk_const_1", "pk_const_1", false, false, false);
-    objectStore.addForeignKeys(ImmutableList.of(fk));
-
-    // Retrieve from PK side
-    foreignKeys = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1);
-    Assert.assertEquals(1, foreignKeys.size());
-
-    List<SQLForeignKey> fks = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1);
-    if (fks != null) {
-      for (SQLForeignKey fkcol : fks) {
-        objectStore.dropConstraint(fkcol.getFktable_db(), fkcol.getFktable_name(), fkcol.getFk_name());
-      }
-    }
-    // Retrieve from FK side
-    foreignKeys = objectStore.getForeignKeys(DB1, TABLE1, null, null);
-    Assert.assertEquals(0, foreignKeys.size());
-    // Retrieve from PK side
-    foreignKeys = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1);
-    Assert.assertEquals(0, foreignKeys.size());
-
-    objectStore.dropTable(DB1, TABLE1);
-    tables = objectStore.getAllTables(DB1);
-    Assert.assertEquals(1, tables.size());
-
-    objectStore.dropTable(DB1, "new" + TABLE1);
-    tables = objectStore.getAllTables(DB1);
-    Assert.assertEquals(0, tables.size());
-
-    objectStore.dropDatabase(DB1);
-  }
-
-  private StorageDescriptor createFakeSd(String location) {
-    return new StorageDescriptor(null, location, null, null, false, 0,
-        new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
-  }
-
-
-  /**
-   * Tests partition operations
-   */
-  @Test
-  public void testPartitionOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
-    Database db1 = new Database(DB1, "description", "locationurl", null);
-    objectStore.createDatabase(db1);
-    StorageDescriptor sd = createFakeSd("location");
-    HashMap<String, String> tableParams = new HashMap<>();
-    tableParams.put("EXTERNAL", "false");
-    FieldSchema partitionKey1 = new FieldSchema("Country", ColumnType.STRING_TYPE_NAME, "");
-    FieldSchema partitionKey2 = new FieldSchema("State", ColumnType.STRING_TYPE_NAME, "");
-    Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, null, "MANAGED_TABLE");
-    objectStore.createTable(tbl1);
-    HashMap<String, String> partitionParams = new HashMap<>();
-    partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true");
-    List<String> value1 = Arrays.asList("US", "CA");
-    Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams);
-    objectStore.addPartition(part1);
-    List<String> value2 = Arrays.asList("US", "MA");
-    Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams);
-    objectStore.addPartition(part2);
-
-    Deadline.startTimer("getPartition");
-    List<Partition> partitions = objectStore.getPartitions(DB1, TABLE1, 10);
-    Assert.assertEquals(2, partitions.size());
-    Assert.assertEquals(111, partitions.get(0).getCreateTime());
-    Assert.assertEquals(222, partitions.get(1).getCreateTime());
-
-    int numPartitions  = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "");
-    Assert.assertEquals(partitions.size(), numPartitions);
-
-    numPartitions  = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "country = \"US\"");
-    Assert.assertEquals(2, numPartitions);
-
-    objectStore.dropPartition(DB1, TABLE1, value1);
-    partitions = objectStore.getPartitions(DB1, TABLE1, 10);
-    Assert.assertEquals(1, partitions.size());
-    Assert.assertEquals(222, partitions.get(0).getCreateTime());
-
-    objectStore.dropPartition(DB1, TABLE1, value2);
-    objectStore.dropTable(DB1, TABLE1);
-    objectStore.dropDatabase(DB1);
-  }
-
-  /**
-   * Test master keys operation
-   */
-  @Test
-  public void testMasterKeyOps() throws MetaException, NoSuchObjectException {
-    int id1 = objectStore.addMasterKey(KEY1);
-    int id2 = objectStore.addMasterKey(KEY2);
-
-    String[] keys = objectStore.getMasterKeys();
-    Assert.assertEquals(2, keys.length);
-    Assert.assertEquals(KEY1, keys[0]);
-    Assert.assertEquals(KEY2, keys[1]);
-
-    objectStore.updateMasterKey(id1, "new" + KEY1);
-    objectStore.updateMasterKey(id2, "new" + KEY2);
-    keys = objectStore.getMasterKeys();
-    Assert.assertEquals(2, keys.length);
-    Assert.assertEquals("new" + KEY1, keys[0]);
-    Assert.assertEquals("new" + KEY2, keys[1]);
-
-    objectStore.removeMasterKey(id1);
-    keys = objectStore.getMasterKeys();
-    Assert.assertEquals(1, keys.length);
-    Assert.assertEquals("new" + KEY2, keys[0]);
-
-    objectStore.removeMasterKey(id2);
-  }
-
-  /**
-   * Test role operation
-   */
-  @Test
-  public void testRoleOps() throws InvalidObjectException, MetaException, NoSuchObjectException {
-    objectStore.addRole(ROLE1, OWNER);
-    objectStore.addRole(ROLE2, OWNER);
-    List<String> roles = objectStore.listRoleNames();
-    Assert.assertEquals(2, roles.size());
-    Assert.assertEquals(ROLE2, roles.get(1));
-    Role role1 = objectStore.getRole(ROLE1);
-    Assert.assertEquals(OWNER, role1.getOwnerName());
-    objectStore.grantRole(role1, USER1, PrincipalType.USER, OWNER, PrincipalType.ROLE, true);
-    objectStore.revokeRole(role1, USER1, PrincipalType.USER, false);
-    objectStore.removeRole(ROLE1);
-  }
-
-  @Test
-  public void testDirectSqlErrorMetrics() throws Exception {
-    HiveConf conf = new HiveConf();
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED, true);
-    Metrics.initialize(conf);
-    conf.setVar(HiveConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES,
-        "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
-            "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter"
-    );
-
-    // recall setup so that we get an object store with the metrics initalized
-    setUp();
-    Counter directSqlErrors =
-        Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS);
-
-    objectStore.new GetDbHelper("foo", true, true) {
-      @Override
-      protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
-        return null;
-      }
-
-      @Override
-      protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException,
-          NoSuchObjectException {
-        return null;
-      }
-    }.run(false);
-
-    Assert.assertEquals(0, directSqlErrors.getCount());
-
-    objectStore.new GetDbHelper("foo", true, true) {
-      @Override
-      protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
-        throw new RuntimeException();
-      }
-
-      @Override
-      protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException,
-          NoSuchObjectException {
-        return null;
-      }
-    }.run(false);
-
-    Assert.assertEquals(1, directSqlErrors.getCount());
-  }
-
-  private static void dropAllStoreObjects(RawStore store)
-      throws MetaException, InvalidObjectException, InvalidInputException {
-    try {
-      Deadline.registerIfNot(100000);
-      List<Function> functions = store.getAllFunctions();
-      for (Function func : functions) {
-        store.dropFunction(func.getDbName(), func.getFunctionName());
-      }
-      List<String> dbs = store.getAllDatabases();
-      for (String db : dbs) {
-        List<String> tbls = store.getAllTables(db);
-        for (String tbl : tbls) {
-          List<Index> indexes = store.getIndexes(db, tbl, 100);
-          for (Index index : indexes) {
-            store.dropIndex(db, tbl, index.getIndexName());
-          }
-        }
-        for (String tbl : tbls) {
-          Deadline.startTimer("getPartition");
-          List<Partition> parts = store.getPartitions(db, tbl, 100);
-          for (Partition part : parts) {
-            store.dropPartition(db, tbl, part.getValues());
-          }
-          // Find any constraints and drop them
-          Set<String> constraints = new HashSet<>();
-          List<SQLPrimaryKey> pk = store.getPrimaryKeys(db, tbl);
-          if (pk != null) {
-            for (SQLPrimaryKey pkcol : pk) {
-              constraints.add(pkcol.getPk_name());
-            }
-          }
-          List<SQLForeignKey> fks = store.getForeignKeys(null, null, db, tbl);
-          if (fks != null) {
-            for (SQLForeignKey fkcol : fks) {
-              constraints.add(fkcol.getFk_name());
-            }
-          }
-          for (String constraint : constraints) {
-            store.dropConstraint(db, tbl, constraint);
-          }
-          store.dropTable(db, tbl);
-        }
-        store.dropDatabase(db);
-      }
-      List<String> roles = store.listRoleNames();
-      for (String role : roles) {
-        store.removeRole(role);
-      }
-    } catch (NoSuchObjectException e) {
-    }
-  }
-
-  @Test
-  public void testQueryCloseOnError() throws Exception {
-    ObjectStore spy = Mockito.spy(objectStore);
-    spy.getAllDatabases();
-    spy.getAllFunctions();
-    spy.getAllTables(DB1);
-    spy.getPartitionCount();
-    Mockito.verify(spy, Mockito.times(3))
-        .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.<Query>anyObject());
-  }
-
-    @Ignore(
-        "This test is here to allow testing with other databases like mysql / postgres etc\n"
-            + " with  user changes to the code. This cannot be run on apache derby because of\n"
-            + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html"
-    )
-  @Test
-  public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException {
-
-    final int NUM_THREADS = 10;
-    CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS,
-        () -> LoggerFactory.getLogger("test")
-            .debug(NUM_THREADS + " threads going to add notification"));
-
-    HiveConf conf = new HiveConf();
-    conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS,
-        MockPartitionExpressionProxy.class.getName());
-    /*
-       Below are the properties that need to be set based on what database this test is going to be run
-     */
-
-//    conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver");
-//    conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
-//        "jdbc:mysql://localhost:3306/metastore_db");
-//    conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "");
-//    conf.setVar(HiveConf.ConfVars.METASTOREPWD, "");
-
-    /*
-     we have to  add this one manually as for tests the db is initialized via the metastoreDiretSQL
-     and we don't run the schema creation sql that includes the an insert for notification_sequence
-     which can be locked. the entry in notification_sequence happens via notification_event insertion.
-    */
-      objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute();
-      objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute();
-
-      objectStore.addNotificationEvent(
-        new NotificationEvent(0, 0,
-            EventMessage.EventType.CREATE_DATABASE.toString(),
-            "CREATE DATABASE DB initial"));
-
-    ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS);
-    for (int i = 0; i < NUM_THREADS; i++) {
-      final int n = i;
-
-      executorService.execute(
-          () -> {
-            ObjectStore store = new ObjectStore();
-            store.setConf(conf);
-
-            String eventType = EventMessage.EventType.CREATE_DATABASE.toString();
-            NotificationEvent dbEvent =
-                new NotificationEvent(0, 0, eventType,
-                    "CREATE DATABASE DB" + n);
-            System.out.println("ADDING NOTIFICATION");
-
-            try {
-              cyclicBarrier.await();
-            } catch (InterruptedException | BrokenBarrierException e) {
-              throw new RuntimeException(e);
-            }
-            store.addNotificationEvent(dbEvent);
-            System.out.println("FINISH NOTIFICATION");
-          });
-    }
-    executorService.shutdown();
-    assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS));
-
-    // we have to setup this again as the underlying PMF keeps getting reinitialized with original
-    // reference closed
-    ObjectStore store = new ObjectStore();
-    store.setConf(conf);
-
-    NotificationEventResponse eventResponse = store.getNextNotification(
-        new NotificationEventRequest());
-    assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize());
-    long previousId = 0;
-    for (NotificationEvent event : eventResponse.getEvents()) {
-      assertTrue("previous:" + previousId + " current:" + event.getEventId(),
-          previousId < event.getEventId());
-      assertTrue(previousId + 1 == event.getEventId());
-      previousId = event.getEventId();
-    }
-  }
-
-  @Test
-  public void testRetryingExecutorSleep() throws Exception {
-    RetryingExecutor re = new ObjectStore.RetryingExecutor(new HiveConf(), null);
-    assertTrue("invalid sleep value", re.getSleepInterval() >= 0);
-  }
-
-}