You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by vi...@apache.org on 2018/07/19 19:55:33 UTC

[34/51] [partial] hive git commit: HIVE-20188 : Split server-specific code outside of standalone metastore-common (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
deleted file mode 100644
index 76a8261..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.hive.metastore.api.InitializeTableWriteIdsRequest;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreEventContext;
-import org.apache.hadoop.hive.metastore.txn.TxnStore;
-import org.apache.hadoop.hive.metastore.txn.TxnUtils;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.utils.HiveStrictManagedUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class TransactionalValidationListener extends MetaStorePreEventListener {
-  public static final Logger LOG = LoggerFactory.getLogger(TransactionalValidationListener.class);
-
-  // These constants are also imported by org.apache.hadoop.hive.ql.io.AcidUtils.
-  public static final String DEFAULT_TRANSACTIONAL_PROPERTY = "default";
-  public static final String INSERTONLY_TRANSACTIONAL_PROPERTY = "insert_only";
-
-  private final Set<String> supportedCatalogs = new HashSet<String>();
-
-  TransactionalValidationListener(Configuration conf) {
-    super(conf);
-    supportedCatalogs.add("hive");
-  }
-
-  @Override
-  public void onEvent(PreEventContext context) throws MetaException, NoSuchObjectException,
-      InvalidOperationException {
-    switch (context.getEventType()) {
-      case CREATE_TABLE:
-        handle((PreCreateTableEvent) context);
-        break;
-      case ALTER_TABLE:
-        handle((PreAlterTableEvent) context);
-        break;
-      default:
-        //no validation required..
-    }
-  }
-
-  private void handle(PreAlterTableEvent context) throws MetaException {
-    if (supportedCatalogs.contains(getTableCatalog(context.getNewTable()))) {
-      handleAlterTableTransactionalProp(context);
-      HiveStrictManagedUtils.validateStrictManagedTableWithThrow(getConf(), context.getNewTable());
-    }
-  }
-
-  private void handle(PreCreateTableEvent context) throws MetaException {
-    if (supportedCatalogs.contains(getTableCatalog(context.getTable()))) {
-      handleCreateTableTransactionalProp(context);
-      HiveStrictManagedUtils.validateStrictManagedTableWithThrow(getConf(), context.getTable());
-    }
-  }
-
-  private String getTableCatalog(Table table) {
-    String catName = table.isSetCatName() ? table.getCatName() :
-      MetaStoreUtils.getDefaultCatalog(getConf());
-    return catName.toLowerCase();
-  }
-
-  /**
-   * once a table is marked transactional, you cannot go back.  Enforce this.
-   * Also in current version, 'transactional_properties' of the table cannot be altered after
-   * the table is created. Any attempt to alter it will throw a MetaException.
-   */
-  private void handleAlterTableTransactionalProp(PreAlterTableEvent context) throws MetaException {
-    Table newTable = context.getNewTable();
-    Map<String, String> parameters = newTable.getParameters();
-    if (parameters == null || parameters.isEmpty()) {
-      return;
-    }
-    Set<String> keys = new HashSet<>(parameters.keySet());
-    String transactionalValue = null;
-    boolean transactionalValuePresent = false;
-    boolean isTransactionalPropertiesPresent = false;
-    String transactionalPropertiesValue = null;
-    boolean hasValidTransactionalValue = false;
-
-    for (String key : keys) {
-      if(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.equalsIgnoreCase(key)) {
-        transactionalValuePresent = true;
-        transactionalValue = parameters.get(key);
-        parameters.remove(key);
-      }
-      if(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES.equalsIgnoreCase(key)) {
-        isTransactionalPropertiesPresent = true;
-        transactionalPropertiesValue = parameters.get(key);
-        // Do not remove the parameter yet, because we have separate initialization routine
-        // that will use it down below.
-      }
-    }
-    Table oldTable = context.getOldTable();
-    String oldTransactionalValue = null;
-    String oldTransactionalPropertiesValue = null;
-    for (String key : oldTable.getParameters().keySet()) {
-      if (hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.equalsIgnoreCase(key)) {
-        oldTransactionalValue = oldTable.getParameters().get(key);
-      }
-      if (hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES.equalsIgnoreCase(key)) {
-        oldTransactionalPropertiesValue = oldTable.getParameters().get(key);
-      }
-    }
-
-    if (transactionalValuePresent && "false".equalsIgnoreCase(transactionalValue)) {
-      transactionalValuePresent = false;
-      transactionalValue = null;
-    }
-
-    if (transactionalValuePresent) {
-      //normalize prop name
-      parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, transactionalValue);
-    }
-    if ("true".equalsIgnoreCase(transactionalValue) && !"true".equalsIgnoreCase(oldTransactionalValue)) {
-      if(!isTransactionalPropertiesPresent) {
-        normalizeTransactionalPropertyDefault(newTable);
-        isTransactionalPropertiesPresent = true;
-        transactionalPropertiesValue = DEFAULT_TRANSACTIONAL_PROPERTY;
-      }
-      // We only need to check conformance if alter table enabled acid.
-      // INSERT_ONLY tables don't have to conform to ACID requirement like ORC or bucketing.
-      boolean isFullAcid = transactionalPropertiesValue == null
-          || !"insert_only".equalsIgnoreCase(transactionalPropertiesValue);
-      if (isFullAcid && !conformToAcid(newTable)) {
-        throw new MetaException("The table must be stored using an ACID compliant "
-            + "format (such as ORC): " + Warehouse.getQualifiedName(newTable));
-      }
-
-      if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
-        throw new MetaException(Warehouse.getQualifiedName(newTable) +
-            " cannot be declared transactional because it's an external table");
-      }
-      if (isFullAcid) {
-        validateTableStructure(context.getHandler(), newTable);
-      }
-      hasValidTransactionalValue = true;
-    }
-
-
-
-    if (oldTransactionalValue == null ? transactionalValue == null
-                                     : oldTransactionalValue.equalsIgnoreCase(transactionalValue)) {
-      //this covers backward compat cases where this prop may have been set already
-      hasValidTransactionalValue = true;
-    }
-
-    if (!hasValidTransactionalValue && !MetaStoreUtils.isInsertOnlyTableParam(oldTable.getParameters())) {
-      // if here, there is attempt to set transactional to something other than 'true'
-      // and NOT the same value it was before
-      throw new MetaException("TBLPROPERTIES with 'transactional'='true' cannot be unset: "
-          + Warehouse.getQualifiedName(newTable));
-    }
-
-    if (isTransactionalPropertiesPresent) {
-      // Now validate transactional_properties for the table.
-      if (oldTransactionalValue == null) {
-        // If this is the first time the table is being initialized to 'transactional=true',
-        // any valid value can be set for the 'transactional_properties'.
-        initializeTransactionalProperties(newTable);
-      } else {
-        // If the table was already marked as 'transactional=true', then the new value of
-        // 'transactional_properties' must match the old value. Any attempt to alter the previous
-        // value will throw an error. An exception will still be thrown if the previous value was
-        // null and an attempt is made to set it. This behaviour can be changed in the future.
-        if ((oldTransactionalPropertiesValue == null
-            || !oldTransactionalPropertiesValue.equalsIgnoreCase(transactionalPropertiesValue))
-            && !MetaStoreUtils.isInsertOnlyTableParam(oldTable.getParameters())) {
-          throw new MetaException("TBLPROPERTIES with 'transactional_properties' cannot be "
-              + "altered after the table is created");
-        }
-      }
-    }
-    checkSorted(newTable);
-    if(TxnUtils.isAcidTable(newTable) && !TxnUtils.isAcidTable(oldTable)) {
-      /* we just made an existing table full acid which wasn't acid before and it passed all checks
-      initialize the Write ID sequence so that we can handle assigning ROW_IDs to 'original'
-      files already present in the table. */
-      TxnStore t = TxnUtils.getTxnStore(getConf());
-      //For now assume no partition may have > 10M files.  Perhaps better to count them.
-      t.seedWriteIdOnAcidConversion(new InitializeTableWriteIdsRequest(newTable.getDbName(),
-          newTable.getTableName(), 10000000));
-    }
-  }
-
-  private void checkSorted(Table newTable) throws MetaException {
-    if(!TxnUtils.isAcidTable(newTable)) {
-      return;
-    }
-    StorageDescriptor sd = newTable.getSd();
-    if (sd.getSortCols() != null && sd.getSortCols().size() > 0) {
-      throw new MetaException("Table " + Warehouse.getQualifiedName(newTable)
-        + " cannot support full ACID functionality since it is sorted.");
-    }
-  }
-
-  /**
-   * Want to make a a newly create table Acid (unless it explicitly has transactional=false param)
-   * if table can support it.  Also see SemanticAnalyzer.addDefaultProperties() which performs the
-   * same logic.  This code path is more general since it is activated even if you create a table
-   * via Thrift, WebHCat etc but some operations like CTAS create the table (metastore object) as
-   * the last step (i.e. after the data is written) but write itself is has to be aware of the type
-   * of table so this Listener is too late.
-   */
-  private void makeAcid(Table newTable) throws MetaException {
-    if(newTable.getParameters() != null &&
-        newTable.getParameters().containsKey(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)) {
-      LOG.info("Could not make " + Warehouse.getQualifiedName(newTable) + " acid: already has " +
-          hive_metastoreConstants.TABLE_IS_TRANSACTIONAL + "=" +
-          newTable.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL));
-      return;
-    }
-
-    Configuration conf = getConf();
-    boolean makeAcid =
-        //no point making an acid table if these other props are not set since it will just throw
-        //exceptions when someone tries to use the table.
-        MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID) &&
-        MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.HIVE_SUPPORT_CONCURRENCY) &&
-        "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager".equals(
-            MetastoreConf.getVar(conf, MetastoreConf.ConfVars.HIVE_TXN_MANAGER)
-        );
-
-    if(makeAcid) {
-      if(!conformToAcid(newTable)) {
-        LOG.info("Could not make " + Warehouse.getQualifiedName(newTable) + " acid: wrong IO format");
-        return;
-      }
-      if(!TableType.MANAGED_TABLE.toString().equalsIgnoreCase(newTable.getTableType())) {
-        //todo should this check be in conformToAcid()?
-        LOG.info("Could not make " + Warehouse.getQualifiedName(newTable) + " acid: it's " +
-            newTable.getTableType());
-        return;
-      }
-      if(newTable.getSd().getSortColsSize() > 0) {
-        LOG.info("Could not make " + Warehouse.getQualifiedName(newTable) + " acid: it's sorted");
-        return;
-      }
-      //check if orc and not sorted
-      Map<String, String> parameters = newTable.getParameters();
-      if (parameters == null || parameters.isEmpty()) {
-        parameters = new HashMap<>();
-      }
-      parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
-      newTable.setParameters(parameters);
-      LOG.info("Automatically chose to make " + Warehouse.getQualifiedName(newTable) + " acid.");
-    }
-  }
-  /**
-   * Normalize case and make sure:
-   * 1. 'true' is the only value to be set for 'transactional' (if set at all)
-   * 2. If set to 'true', we should also enforce bucketing and ORC format
-   */
-  private void handleCreateTableTransactionalProp(PreCreateTableEvent context) throws MetaException {
-    Table newTable = context.getTable();
-    Map<String, String> parameters = newTable.getParameters();
-    if (parameters == null || parameters.isEmpty()) {
-      makeAcid(newTable);
-      return;
-    }
-    String transactional = null;
-    String transactionalProperties = null;
-    Set<String> keys = new HashSet<>(parameters.keySet());
-    for(String key : keys) {
-      // Get the "transactional" tblproperties value
-      if (hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.equalsIgnoreCase(key)) {
-        transactional = parameters.get(key);
-        parameters.remove(key);
-      }
-
-      // Get the "transactional_properties" tblproperties value
-      if (hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES.equalsIgnoreCase(key)) {
-        transactionalProperties = parameters.get(key);
-      }
-    }
-
-    if (transactional == null) {
-      makeAcid(newTable);
-      return;
-    }
-
-    if ("false".equalsIgnoreCase(transactional)) {
-      // just drop transactional=false - absence of 'transactional' property is equivalent to
-      // transactional=false
-      return;
-    }
-
-    if ("true".equalsIgnoreCase(transactional)) {
-      if (!conformToAcid(newTable)) {
-        // INSERT_ONLY tables don't have to conform to ACID requirement like ORC or bucketing
-        if (transactionalProperties == null || !"insert_only".equalsIgnoreCase(transactionalProperties)) {
-          throw new MetaException("The table must be stored using an ACID compliant format (such as ORC): "
-              + Warehouse.getQualifiedName(newTable));
-        }
-      }
-
-      if (MetaStoreUtils.isExternalTable(newTable)) {
-        throw new MetaException(Warehouse.getQualifiedName(newTable) +
-            " cannot be declared transactional because it's an external table");
-      }
-
-      // normalize prop name
-      parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.TRUE.toString());
-      if(transactionalProperties == null) {
-        normalizeTransactionalPropertyDefault(newTable);
-      }
-      initializeTransactionalProperties(newTable);
-      checkSorted(newTable);
-      return;
-    }
-    // transactional is found, but the value is not in expected range
-    throw new MetaException("'transactional' property of TBLPROPERTIES may only have value 'true': "
-        + Warehouse.getQualifiedName(newTable));
-  }
-
-  /**
-   * When a table is marked transactional=true but transactional_properties is not set then
-   * transactional_properties should take on the default value.  Easier to make this explicit in
-   * table definition than keep checking everywhere if it's set or not.
-   */
-  private void normalizeTransactionalPropertyDefault(Table table) {
-    table.getParameters().put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES,
-        DEFAULT_TRANSACTIONAL_PROPERTY);
-
-  }
-  /**
-   * Check that InputFormatClass/OutputFormatClass should implement
-   * AcidInputFormat/AcidOutputFormat
-   */
-  public static boolean conformToAcid(Table table) throws MetaException {
-    StorageDescriptor sd = table.getSd();
-    try {
-      Class inputFormatClass = sd.getInputFormat() == null ? null :
-          Class.forName(sd.getInputFormat());
-      Class outputFormatClass = sd.getOutputFormat() == null ? null :
-          Class.forName(sd.getOutputFormat());
-
-      if (inputFormatClass == null || outputFormatClass == null ||
-          !Class.forName("org.apache.hadoop.hive.ql.io.AcidInputFormat").isAssignableFrom(inputFormatClass) ||
-          !Class.forName("org.apache.hadoop.hive.ql.io.AcidOutputFormat").isAssignableFrom(outputFormatClass)) {
-        return false;
-      }
-    } catch (ClassNotFoundException e) {
-      LOG.warn("Could not verify InputFormat=" + sd.getInputFormat() + " or OutputFormat=" +
-          sd.getOutputFormat() + "  for " + Warehouse.getQualifiedName(table));
-      return false;
-    }
-
-    return true;
-  }
-
-  private void initializeTransactionalProperties(Table table) throws MetaException {
-    // All new versions of Acid tables created after the introduction of Acid version/type system
-    // can have TRANSACTIONAL_PROPERTIES property defined. This parameter can be used to change
-    // the operational behavior of ACID. However if this parameter is not defined, the new Acid
-    // tables will still behave as the old ones. This is done so to preserve the behavior
-    // in case of rolling downgrade.
-
-    // Initialize transaction table properties with default string value.
-    String tableTransactionalProperties = null;
-
-    Map<String, String> parameters = table.getParameters();
-    if (parameters != null) {
-      Set<String> keys = parameters.keySet();
-      for (String key : keys) {
-        if (hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES.equalsIgnoreCase(key)) {
-          tableTransactionalProperties = parameters.get(key).toLowerCase();
-          parameters.remove(key);
-          String validationError = validateTransactionalProperties(tableTransactionalProperties);
-          if (validationError != null) {
-            throw new MetaException("Invalid transactional properties specified for "
-                + Warehouse.getQualifiedName(table) + " with the error " + validationError);
-          }
-          break;
-        }
-      }
-    }
-
-    if (tableTransactionalProperties != null) {
-      parameters.put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES,
-              tableTransactionalProperties);
-    }
-  }
-
-  private String validateTransactionalProperties(String transactionalProperties) {
-    boolean isValid = false;
-    switch (transactionalProperties) {
-      case DEFAULT_TRANSACTIONAL_PROPERTY:
-      case INSERTONLY_TRANSACTIONAL_PROPERTY:
-        isValid = true;
-        break;
-      default:
-        isValid = false;
-    }
-    if (!isValid) {
-      return "unknown value " + transactionalProperties +  " for transactional_properties";
-    }
-    return null; // All checks passed, return null.
-  }
-  private final Pattern ORIGINAL_PATTERN = Pattern.compile("[0-9]+_[0-9]+");
-  /**
-   * see org.apache.hadoop.hive.ql.exec.Utilities#COPY_KEYWORD
-   */
-  private static final Pattern ORIGINAL_PATTERN_COPY =
-    Pattern.compile("[0-9]+_[0-9]+" + "_copy_" + "[0-9]+");
-
-  /**
-   * It's assumed everywhere that original data files are named according to
-   * {@link #ORIGINAL_PATTERN} or{@link #ORIGINAL_PATTERN_COPY}
-   * This checks that when transaction=true is set and throws if it finds any files that don't
-   * follow convention.
-   */
-  private void validateTableStructure(IHMSHandler hmsHandler, Table table)
-    throws MetaException {
-    Path tablePath;
-    try {
-      Warehouse wh = hmsHandler.getWh();
-      if (table.getSd().getLocation() == null || table.getSd().getLocation().isEmpty()) {
-        String catName = getTableCatalog(table);
-        tablePath = wh.getDefaultTablePath(hmsHandler.getMS().getDatabase(
-            catName, table.getDbName()), table);
-      } else {
-        tablePath = wh.getDnsPath(new Path(table.getSd().getLocation()));
-      }
-      FileSystem fs = wh.getFs(tablePath);
-      //FileSystem fs = FileSystem.get(getConf());
-      RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(tablePath, true);
-      while (iterator.hasNext()) {
-        LocatedFileStatus fileStatus = iterator.next();
-        if (!fileStatus.isFile()) {
-          continue;
-        }
-        boolean validFile =
-          (ORIGINAL_PATTERN.matcher(fileStatus.getPath().getName()).matches() ||
-            ORIGINAL_PATTERN_COPY.matcher(fileStatus.getPath().getName()).matches()
-          );
-        if (!validFile) {
-          throw new IllegalStateException("Unexpected data file name format.  Cannot convert " +
-            Warehouse.getQualifiedName(table) + " to transactional table.  File: "
-            + fileStatus.getPath());
-        }
-      }
-    } catch (IOException|NoSuchObjectException e) {
-      String msg = "Unable to list files for " + Warehouse.getQualifiedName(table);
-      LOG.error(msg, e);
-      MetaException e1 = new MetaException(msg);
-      e1.initCause(e);
-      throw e1;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
deleted file mode 100755
index da5a71c..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++ /dev/null
@@ -1,756 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.AbstractList;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.hive.common.TableName;
-import org.apache.hadoop.hive.metastore.api.Catalog;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
-import org.apache.hadoop.hive.metastore.utils.FileUtils;
-import org.apache.hadoop.hive.metastore.utils.HdfsUtils;
-import org.apache.hadoop.hive.metastore.utils.JavaUtils;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hive.metastore.ReplChangeManager.RecycleType;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.util.ReflectionUtils;
-
-/**
- * This class represents a warehouse where data of Hive tables is stored
- */
-public class Warehouse {
-  public static final String DEFAULT_CATALOG_NAME = "hive";
-  public static final String DEFAULT_CATALOG_COMMENT = "Default catalog, for Hive";
-  public static final String DEFAULT_DATABASE_NAME = "default";
-  public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database";
-  public static final String DEFAULT_SERIALIZATION_FORMAT = "1";
-  public static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
-  private static final String CAT_DB_TABLE_SEPARATOR = ".";
-
-  private Path whRoot;
-  private Path whRootExternal;
-  private final Configuration conf;
-  private final String whRootString;
-  private final String whRootExternalString;
-
-  public static final Logger LOG = LoggerFactory.getLogger("hive.metastore.warehouse");
-
-  private MetaStoreFS fsHandler = null;
-  private boolean storageAuthCheck = false;
-  private ReplChangeManager cm = null;
-
-  public Warehouse(Configuration conf) throws MetaException {
-    this.conf = conf;
-    whRootString = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE);
-    if (StringUtils.isBlank(whRootString)) {
-      throw new MetaException(ConfVars.WAREHOUSE.getVarname()
-          + " is not set in the config or blank");
-    }
-    whRootExternalString = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE_EXTERNAL);
-    fsHandler = getMetaStoreFsHandler(conf);
-    cm = ReplChangeManager.getInstance(conf);
-    storageAuthCheck = MetastoreConf.getBoolVar(conf, ConfVars.AUTHORIZATION_STORAGE_AUTH_CHECKS);
-  }
-
-  private MetaStoreFS getMetaStoreFsHandler(Configuration conf)
-      throws MetaException {
-    String handlerClassStr = MetastoreConf.getVar(conf, ConfVars.FS_HANDLER_CLS);
-    try {
-      Class<? extends MetaStoreFS> handlerClass = (Class<? extends MetaStoreFS>) Class
-          .forName(handlerClassStr, true, JavaUtils.getClassLoader());
-      MetaStoreFS handler = ReflectionUtils.newInstance(handlerClass, conf);
-      return handler;
-    } catch (ClassNotFoundException e) {
-      throw new MetaException("Error in loading MetaStoreFS handler."
-          + e.getMessage());
-    }
-  }
-
-
-  /**
-   * Helper functions to convert IOException to MetaException
-   */
-  public static FileSystem getFs(Path f, Configuration conf) throws MetaException {
-    try {
-      return f.getFileSystem(conf);
-    } catch (IOException e) {
-      MetaStoreUtils.logAndThrowMetaException(e);
-    }
-    return null;
-  }
-
-  public FileSystem getFs(Path f) throws MetaException {
-    return getFs(f, conf);
-  }
-
-
-  /**
-   * Hadoop File System reverse lookups paths with raw ip addresses The File
-   * System URI always contains the canonical DNS name of the Namenode.
-   * Subsequently, operations on paths with raw ip addresses cause an exception
-   * since they don't match the file system URI.
-   *
-   * This routine solves this problem by replacing the scheme and authority of a
-   * path with the scheme and authority of the FileSystem that it maps to.
-   *
-   * @param path
-   *          Path to be canonicalized
-   * @return Path with canonical scheme and authority
-   */
-  public static Path getDnsPath(Path path, Configuration conf) throws MetaException {
-    FileSystem fs = getFs(path, conf);
-    return (new Path(fs.getUri().getScheme(), fs.getUri().getAuthority(), path
-        .toUri().getPath()));
-  }
-
-  public Path getDnsPath(Path path) throws MetaException {
-    return getDnsPath(path, conf);
-  }
-
-  /**
-   * Resolve the configured warehouse root dir with respect to the configuration
-   * This involves opening the FileSystem corresponding to the warehouse root
-   * dir (but that should be ok given that this is only called during DDL
-   * statements for non-external tables).
-   */
-  public Path getWhRoot() throws MetaException {
-    if (whRoot != null) {
-      return whRoot;
-    }
-    whRoot = getDnsPath(new Path(whRootString));
-    return whRoot;
-  }
-
-  public Path getWhRootExternal() throws MetaException {
-    if (whRootExternal != null) {
-      return whRootExternal;
-    }
-    if (!hasExternalWarehouseRoot()) {
-      whRootExternal = getWhRoot();
-    } else {
-      whRootExternal = getDnsPath(new Path(whRootExternalString));
-    }
-    return whRootExternal;
-  }
-
-  /**
-   * Build the database path based on catalog name and database name.  This should only be used
-   * when a database is being created or altered.  If you just want to find out the path a
-   * database is already using call {@link #getDatabasePath(Database)}.  If the passed in
-   * database already has a path set that will be used.  If not the location will be built using
-   * catalog's path and the database name.
-   * @param cat catalog the database is in
-   * @param db database object
-   * @return Path representing the directory for the database
-   * @throws MetaException when the file path cannot be properly determined from the configured
-   * file system.
-   */
-  public Path determineDatabasePath(Catalog cat, Database db) throws MetaException {
-    if (db.isSetLocationUri()) {
-      return getDnsPath(new Path(db.getLocationUri()));
-    }
-    if (cat == null || cat.getName().equalsIgnoreCase(DEFAULT_CATALOG_NAME)) {
-      if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
-        return getWhRoot();
-      } else {
-        return new Path(getWhRoot(), dbDirFromDbName(db));
-      }
-    } else {
-      return new Path(getDnsPath(new Path(cat.getLocationUri())), dbDirFromDbName(db));
-    }
-  }
-
-  private String dbDirFromDbName(Database db) throws MetaException {
-    return db.getName().toLowerCase() + DATABASE_WAREHOUSE_SUFFIX;
-  }
-
-  /**
-   * Get the path specified by the database.  In the case of the default database the root of the
-   * warehouse is returned.
-   * @param db database to get the path of
-   * @return path to the database directory
-   * @throws MetaException when the file path cannot be properly determined from the configured
-   * file system.
-   */
-  public Path getDatabasePath(Database db) throws MetaException {
-    if (db.getCatalogName().equalsIgnoreCase(DEFAULT_CATALOG_NAME) &&
-        db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
-      return getWhRoot();
-    }
-    return new Path(db.getLocationUri());
-  }
-
-  public Path getDefaultDatabasePath(String dbName) throws MetaException {
-    // TODO CAT - I am fairly certain that most calls to this are in error.  This should only be
-    // used when the database location is unset, which should never happen except when a
-    // new database is being created.  Once I have confirmation of this change calls of this to
-    // getDatabasePath(), since it does the right thing.  Also, merge this with
-    // determineDatabasePath() as it duplicates much of the logic.
-    if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
-      return getWhRoot();
-    }
-    return new Path(getWhRoot(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
-  }
-
-  public Path getDefaultExternalDatabasePath(String dbName) throws MetaException {
-    if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
-      return getWhRootExternal();
-    }
-    return new Path(getWhRootExternal(), dbName.toLowerCase() + DATABASE_WAREHOUSE_SUFFIX);
-  }
-
-  private boolean hasExternalWarehouseRoot() {
-    return !StringUtils.isBlank(whRootExternalString);
-  }
-
-  /**
-   * Returns the default location of the table path using the parent database's location
-   * @param db Database where the table is created
-   * @param tableName table name
-   * @return
-   * @throws MetaException
-   */
-  @Deprecated
-  public Path getDefaultTablePath(Database db, String tableName)
-      throws MetaException {
-    return getDefaultTablePath(db, tableName, false);
-  }
-
-  public Path getDefaultTablePath(Database db, String tableName, boolean isExternal) throws MetaException {
-    Path dbPath = null;
-    if (isExternal && hasExternalWarehouseRoot()) {
-      dbPath = getDefaultExternalDatabasePath(db.getName());
-    } else {
-      dbPath = getDatabasePath(db);
-    }
-    return getDnsPath(
-        new Path(dbPath, MetaStoreUtils.encodeTableName(tableName.toLowerCase())));
-  }
-
-  // A few situations where we need the default table path, without a DB object
-  public Path getDefaultTablePath(String dbName, String tableName, boolean isExternal) throws MetaException {
-    Path dbPath = null;
-    if (isExternal && hasExternalWarehouseRoot()) {
-      dbPath = getDefaultExternalDatabasePath(dbName);
-    } else {
-      dbPath = getDefaultDatabasePath(dbName);
-    }
-    return getDnsPath(
-        new Path(dbPath, MetaStoreUtils.encodeTableName(tableName.toLowerCase())));
-  }
-
-  public Path getDefaultTablePath(Database db, Table table) throws MetaException {
-    return getDefaultTablePath(db, table.getTableName(), MetaStoreUtils.isExternalTable(table));
-  }
-
-  @Deprecated // Use TableName
-  public static String getQualifiedName(Table table) {
-    return TableName.getDbTable(table.getDbName(), table.getTableName());
-  }
-
-  @Deprecated // Use TableName
-  public static String getQualifiedName(String dbName, String tableName) {
-    return TableName.getDbTable(dbName, tableName);
-  }
-
-  public static String getQualifiedName(Partition partition) {
-    return partition.getDbName() + "." + partition.getTableName() + partition.getValues();
-  }
-
-  /**
-   * Get table name in cat.db.table format.
-   * @param table table object
-   * @return fully qualified name.
-   */
-  public static String getCatalogQualifiedTableName(Table table) {
-    return TableName.getQualified(table.getCatName(), table.getDbName(), table.getTableName());
-  }
-
-  public boolean mkdirs(Path f) throws MetaException {
-    FileSystem fs;
-    try {
-      fs = getFs(f);
-      return FileUtils.mkdir(fs, f);
-    } catch (IOException e) {
-      MetaStoreUtils.logAndThrowMetaException(e);
-    }
-    return false;
-  }
-
-  public boolean renameDir(Path sourcePath, Path destPath, boolean needCmRecycle) throws MetaException {
-    try {
-      if (needCmRecycle) {
-        // Copy the source files to cmroot. As the client will move the source files to another
-        // location, we should make a copy of the files to cmroot instead of moving it.
-        cm.recycle(sourcePath, RecycleType.COPY, true);
-      }
-      FileSystem srcFs = getFs(sourcePath);
-      FileSystem destFs = getFs(destPath);
-      return FileUtils.rename(srcFs, destFs, sourcePath, destPath);
-    } catch (Exception ex) {
-      MetaStoreUtils.logAndThrowMetaException(ex);
-    }
-    return false;
-  }
-
-  void addToChangeManagement(Path file) throws MetaException {
-    try {
-      cm.recycle(file, RecycleType.COPY, true);
-    } catch (IOException e) {
-      throw new MetaException(org.apache.hadoop.util.StringUtils.stringifyException(e));
-    }
-  }
-
-  public boolean deleteDir(Path f, boolean recursive, Database db) throws MetaException {
-    return deleteDir(f, recursive, false, db);
-  }
-
-  public boolean deleteDir(Path f, boolean recursive, boolean ifPurge, Database db) throws MetaException {
-    return deleteDir(f, recursive, ifPurge, ReplChangeManager.isSourceOfReplication(db));
-  }
-
-  public boolean deleteDir(Path f, boolean recursive, boolean ifPurge, boolean needCmRecycle) throws MetaException {
-    if (needCmRecycle) {
-      try {
-        cm.recycle(f, RecycleType.MOVE, ifPurge);
-      } catch (IOException e) {
-        throw new MetaException(org.apache.hadoop.util.StringUtils.stringifyException(e));
-      }
-    }
-    FileSystem fs = getFs(f);
-    return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf);
-  }
-
-  public void recycleDirToCmPath(Path f, boolean ifPurge) throws MetaException {
-    try {
-      cm.recycle(f, RecycleType.MOVE, ifPurge);
-    } catch (IOException e) {
-      throw new MetaException(org.apache.hadoop.util.StringUtils.stringifyException(e));
-    }
-  }
-
-  public boolean isEmpty(Path path) throws IOException, MetaException {
-    ContentSummary contents = getFs(path).getContentSummary(path);
-    if (contents != null && contents.getFileCount() == 0 && contents.getDirectoryCount() == 1) {
-      return true;
-    }
-    return false;
-  }
-
-  public boolean isWritable(Path path) throws IOException {
-    if (!storageAuthCheck) {
-      // no checks for non-secure hadoop installations
-      return true;
-    }
-    if (path == null) { //what??!!
-      return false;
-    }
-    final FileStatus stat;
-    final FileSystem fs;
-    try {
-      fs = getFs(path);
-      stat = fs.getFileStatus(path);
-      HdfsUtils.checkFileAccess(fs, stat, FsAction.WRITE);
-      return true;
-    } catch (FileNotFoundException fnfe){
-      // File named by path doesn't exist; nothing to validate.
-      return true;
-    } catch (Exception e) {
-      // all other exceptions are considered as emanating from
-      // unauthorized accesses
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Exception when checking if path (" + path + ")", e);
-      }
-      return false;
-    }
-  }
-
-  private static String escapePathName(String path) {
-    return FileUtils.escapePathName(path);
-  }
-
-  private static String unescapePathName(String path) {
-    return FileUtils.unescapePathName(path);
-  }
-
-  /**
-   * Given a partition specification, return the path corresponding to the
-   * partition spec. By default, the specification does not include dynamic partitions.
-   * @param spec
-   * @return string representation of the partition specification.
-   * @throws MetaException
-   */
-  public static String makePartPath(Map<String, String> spec)
-      throws MetaException {
-    return makePartName(spec, true);
-  }
-
-  /**
-   * Makes a partition name from a specification
-   * @param spec
-   * @param addTrailingSeperator if true, adds a trailing separator e.g. 'ds=1/'
-   * @return partition name
-   * @throws MetaException
-   */
-  public static String makePartName(Map<String, String> spec,
-      boolean addTrailingSeperator)
-      throws MetaException {
-    StringBuilder suffixBuf = new StringBuilder();
-    int i = 0;
-    for (Entry<String, String> e : spec.entrySet()) {
-      if (e.getValue() == null || e.getValue().length() == 0) {
-        throw new MetaException("Partition spec is incorrect. " + spec);
-      }
-      if (i>0) {
-        suffixBuf.append(Path.SEPARATOR);
-      }
-      suffixBuf.append(escapePathName(e.getKey()));
-      suffixBuf.append('=');
-      suffixBuf.append(escapePathName(e.getValue()));
-      i++;
-    }
-    if (addTrailingSeperator) {
-      suffixBuf.append(Path.SEPARATOR);
-    }
-    return suffixBuf.toString();
-  }
-  /**
-   * Given a dynamic partition specification, return the path corresponding to the
-   * static part of partition specification. This is basically a copy of makePartName
-   * but we get rid of MetaException since it is not serializable.
-   * @param spec
-   * @return string representation of the static part of the partition specification.
-   */
-  public static String makeDynamicPartName(Map<String, String> spec) {
-    StringBuilder suffixBuf = new StringBuilder();
-    for (Entry<String, String> e : spec.entrySet()) {
-      if (e.getValue() != null && e.getValue().length() > 0) {
-        suffixBuf.append(escapePathName(e.getKey()));
-        suffixBuf.append('=');
-        suffixBuf.append(escapePathName(e.getValue()));
-        suffixBuf.append(Path.SEPARATOR);
-      } else { // stop once we see a dynamic partition
-        break;
-      }
-    }
-    return suffixBuf.toString();
-  }
-
-  static final Pattern pat = Pattern.compile("([^/]+)=([^/]+)");
-
-  private static final Pattern slash = Pattern.compile("/");
-
-  /**
-   * Extracts values from partition name without the column names.
-   * @param name Partition name.
-   * @param result The result. Must be pre-sized to the expected number of columns.
-   */
-  public static AbstractList<String> makeValsFromName(
-      String name, AbstractList<String> result) throws MetaException {
-    assert name != null;
-    String[] parts = slash.split(name, 0);
-    if (result == null) {
-      result = new ArrayList<>(parts.length);
-      for (int i = 0; i < parts.length; ++i) {
-        result.add(null);
-      }
-    } else if (parts.length != result.size()) {
-      throw new MetaException(
-          "Expected " + result.size() + " components, got " + parts.length + " (" + name + ")");
-    }
-    for (int i = 0; i < parts.length; ++i) {
-      int eq = parts[i].indexOf('=');
-      if (eq <= 0) {
-        throw new MetaException("Unexpected component " + parts[i]);
-      }
-      result.set(i, unescapePathName(parts[i].substring(eq + 1)));
-    }
-    return result;
-  }
-
-  public static LinkedHashMap<String, String> makeSpecFromName(String name)
-      throws MetaException {
-    if (name == null || name.isEmpty()) {
-      throw new MetaException("Partition name is invalid. " + name);
-    }
-    LinkedHashMap<String, String> partSpec = new LinkedHashMap<>();
-    makeSpecFromName(partSpec, new Path(name), null);
-    return partSpec;
-  }
-
-  public static boolean makeSpecFromName(Map<String, String> partSpec, Path currPath,
-      Set<String> requiredKeys) {
-    List<String[]> kvs = new ArrayList<>();
-    do {
-      String component = currPath.getName();
-      Matcher m = pat.matcher(component);
-      if (m.matches()) {
-        String k = unescapePathName(m.group(1));
-        String v = unescapePathName(m.group(2));
-        String[] kv = new String[2];
-        kv[0] = k;
-        kv[1] = v;
-        kvs.add(kv);
-      }
-      currPath = currPath.getParent();
-    } while (currPath != null && !currPath.getName().isEmpty());
-
-    // reverse the list since we checked the part from leaf dir to table's base dir
-    for (int i = kvs.size(); i > 0; i--) {
-      String key = kvs.get(i - 1)[0];
-      if (requiredKeys != null) {
-        requiredKeys.remove(key);
-      }
-      partSpec.put(key, kvs.get(i - 1)[1]);
-    }
-    if (requiredKeys == null || requiredKeys.isEmpty()) return true;
-    LOG.warn("Cannot create partition spec from " + currPath + "; missing keys " + requiredKeys);
-    return false;
-  }
-
-  public static Map<String, String> makeEscSpecFromName(String name) throws MetaException {
-
-    if (name == null || name.isEmpty()) {
-      throw new MetaException("Partition name is invalid. " + name);
-    }
-    LinkedHashMap<String, String> partSpec = new LinkedHashMap<>();
-
-    Path currPath = new Path(name);
-
-    List<String[]> kvs = new ArrayList<>();
-    do {
-      String component = currPath.getName();
-      Matcher m = pat.matcher(component);
-      if (m.matches()) {
-        String k = m.group(1);
-        String v = m.group(2);
-        String[] kv = new String[2];
-        kv[0] = k;
-        kv[1] = v;
-        kvs.add(kv);
-      }
-      currPath = currPath.getParent();
-    } while (currPath != null && !currPath.getName().isEmpty());
-
-    // reverse the list since we checked the part from leaf dir to table's base dir
-    for (int i = kvs.size(); i > 0; i--) {
-      partSpec.put(kvs.get(i - 1)[0], kvs.get(i - 1)[1]);
-    }
-
-    return partSpec;
-  }
-
-  /**
-   * Returns the default partition path of a table within a given database and partition key value
-   * pairs. It uses the database location and appends it the table name and the partition key,value
-   * pairs to create the Path for the partition directory
-   *
-   * @param db - parent database which is used to get the base location of the partition directory
-   * @param tableName - table name for the partitions
-   * @param pm - Partition key value pairs
-   * @return
-   * @throws MetaException
-   */
-  public Path getDefaultPartitionPath(Database db, Table table,
-      Map<String, String> pm) throws MetaException {
-    return getPartitionPath(getDefaultTablePath(db, table), pm);
-  }
-
-  /**
-   * Returns the path object for the given partition key-value pairs and the base location
-   *
-   * @param tblPath - the base location for the partitions. Typically the table location
-   * @param pm - Partition key value pairs
-   * @return
-   * @throws MetaException
-   */
-  public Path getPartitionPath(Path tblPath, Map<String, String> pm)
-      throws MetaException {
-    return new Path(tblPath, makePartPath(pm));
-  }
-
-  /**
-   * Given a database, a table and the partition key value pairs this method returns the Path object
-   * corresponding to the partition key value pairs. It uses the table location if available else
-   * uses the database location for constructing the path corresponding to the partition key-value
-   * pairs
-   *
-   * @param db - Parent database of the given table
-   * @param table - Table for which the partition key-values are given
-   * @param vals - List of values for the partition keys
-   * @return Path corresponding to the partition key-value pairs
-   * @throws MetaException
-   */
-  public Path getPartitionPath(Database db, Table table, List<String> vals)
-      throws MetaException {
-    List<FieldSchema> partKeys = table.getPartitionKeys();
-    if (partKeys == null || (partKeys.size() != vals.size())) {
-      throw new MetaException("Invalid number of partition keys found for " + table.getTableName());
-    }
-    Map<String, String> pm = new LinkedHashMap<>(vals.size());
-    int i = 0;
-    for (FieldSchema key : partKeys) {
-      pm.put(key.getName(), vals.get(i));
-      i++;
-    }
-
-    if (table.getSd().getLocation() != null) {
-      return getPartitionPath(getDnsPath(new Path(table.getSd().getLocation())), pm);
-    } else {
-      return getDefaultPartitionPath(db, table, pm);
-    }
-  }
-
-  public boolean isDir(Path f) throws MetaException {
-    FileSystem fs;
-    try {
-      fs = getFs(f);
-      FileStatus fstatus = fs.getFileStatus(f);
-      if (!fstatus.isDir()) {
-        return false;
-      }
-    } catch (FileNotFoundException e) {
-      return false;
-    } catch (IOException e) {
-      MetaStoreUtils.logAndThrowMetaException(e);
-    }
-    return true;
-  }
-
-  public static String makePartName(List<FieldSchema> partCols,
-      List<String> vals) throws MetaException {
-    return makePartName(partCols, vals, null);
-  }
-
-  /**
-   * @param desc
-   * @return array of FileStatus objects corresponding to the files
-   * making up the passed storage description
-   */
-  public List<FileStatus> getFileStatusesForSD(StorageDescriptor desc)
-      throws MetaException {
-    return getFileStatusesForLocation(desc.getLocation());
-  }
-
-  /**
-   * @param location
-   * @return array of FileStatus objects corresponding to the files
-   * making up the passed storage description
-   */
-  public List<FileStatus> getFileStatusesForLocation(String location)
-      throws MetaException {
-    try {
-      Path path = new Path(location);
-      FileSystem fileSys = path.getFileSystem(conf);
-      return FileUtils.getFileStatusRecurse(path, -1, fileSys);
-    } catch (IOException ioe) {
-      MetaStoreUtils.logAndThrowMetaException(ioe);
-    }
-    return null;
-  }
-
-  /**
-   * @param db database
-   * @param table table
-   * @return array of FileStatus objects corresponding to the files making up the passed
-   * unpartitioned table
-   */
-  public List<FileStatus> getFileStatusesForUnpartitionedTable(Database db, Table table)
-      throws MetaException {
-    Path tablePath = getDnsPath(new Path(table.getSd().getLocation()));
-    try {
-      FileSystem fileSys = tablePath.getFileSystem(conf);
-      return FileUtils.getFileStatusRecurse(tablePath, -1, fileSys);
-    } catch (IOException ioe) {
-      MetaStoreUtils.logAndThrowMetaException(ioe);
-    }
-    return null;
-  }
-
-  /**
-   * Makes a valid partition name.
-   * @param partCols The partition columns
-   * @param vals The partition values
-   * @param defaultStr
-   *    The default name given to a partition value if the respective value is empty or null.
-   * @return An escaped, valid partition name.
-   * @throws MetaException
-   */
-  public static String makePartName(List<FieldSchema> partCols,
-      List<String> vals, String defaultStr) throws MetaException {
-    if ((partCols.size() != vals.size()) || (partCols.size() == 0)) {
-      String errorStr = "Invalid partition key & values; keys [";
-      for (FieldSchema fs : partCols) {
-        errorStr += (fs.getName() + ", ");
-      }
-      errorStr += "], values [";
-      for (String val : vals) {
-        errorStr += (val + ", ");
-      }
-      throw new MetaException(errorStr + "]");
-    }
-    List<String> colNames = new ArrayList<>();
-    for (FieldSchema col: partCols) {
-      colNames.add(col.getName());
-    }
-    return FileUtils.makePartName(colNames, vals, defaultStr);
-  }
-
-  public static List<String> getPartValuesFromPartName(String partName)
-      throws MetaException {
-    LinkedHashMap<String, String> partSpec = Warehouse.makeSpecFromName(partName);
-    List<String> values = new ArrayList<>();
-    values.addAll(partSpec.values());
-    return values;
-  }
-
-  public static Map<String, String> makeSpecFromValues(List<FieldSchema> partCols,
-      List<String> values) {
-    Map<String, String> spec = new LinkedHashMap<>();
-    for (int i = 0; i < values.size(); i++) {
-      spec.put(partCols.get(i).getName(), values.get(i));
-    }
-    return spec;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/api/utils/DecimalUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/api/utils/DecimalUtils.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/api/utils/DecimalUtils.java
deleted file mode 100644
index e5d8b0b..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/api/utils/DecimalUtils.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore.api.utils;
-
-import java.nio.ByteBuffer;
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import org.apache.hadoop.hive.metastore.api.Decimal;
-
-/**
- * This class contains helper methods for handling thrift api's Decimal
- */
-public class DecimalUtils {
-
-  public static Decimal getDecimal(int number, int scale) {
-    ByteBuffer bb = ByteBuffer.allocate(4);
-    bb.asIntBuffer().put(number);
-    return new Decimal((short) scale, bb);
-  }
-
-  public static Decimal getDecimal(ByteBuffer unscaled, short scale) {
-    return new Decimal((short) scale, unscaled);
-  }
-
-  public static Decimal createThriftDecimal(String s) {
-    BigDecimal d = new BigDecimal(s);
-    return new Decimal((short) d.scale(), ByteBuffer.wrap(d.unscaledValue().toByteArray()));
-  }
-
-  public static String createJdoDecimalString(Decimal d) {
-    return new BigDecimal(new BigInteger(d.getUnscaled()), d.getScale()).toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java
deleted file mode 100644
index 2e92a4f..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.cache;
-
-import java.util.Arrays;
-
-/**
- * byte array with comparator
- */
-public class ByteArrayWrapper {
-  byte[] wrapped;
-
-  ByteArrayWrapper(byte[] b) {
-    wrapped = b;
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (other instanceof ByteArrayWrapper) {
-      return Arrays.equals(((ByteArrayWrapper)other).wrapped, wrapped);
-    } else {
-      return false;
-    }
-  }
-
-  @Override
-  public int hashCode() {
-    return Arrays.hashCode(wrapped);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/081fa368/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
deleted file mode 100644
index 944c813..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore.cache;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.cache.SharedCache.PartitionWrapper;
-import org.apache.hadoop.hive.metastore.cache.SharedCache.TableWrapper;
-import org.apache.hadoop.hive.metastore.utils.StringUtils;
-
-public class CacheUtils {
-  private static final String delimit = "\u0001";
-
-  public static String buildCatalogKey(String catName) {
-    return catName;
-  }
-
-  public static String buildDbKey(String catName, String dbName) {
-    return buildKey(catName.toLowerCase(), dbName.toLowerCase());
-  }
-
-  /**
-   * Builds a key for the partition cache which is concatenation of partition values, each value
-   * separated by a delimiter
-   *
-   */
-  public static String buildPartitionCacheKey(List<String> partVals) {
-    if (partVals == null || partVals.isEmpty()) {
-      return "";
-    }
-    return String.join(delimit, partVals);
-  }
-
-  public static String buildTableKey(String catName, String dbName, String tableName) {
-    return buildKey(catName.toLowerCase(), dbName.toLowerCase(), tableName.toLowerCase());
-  }
-
-  public static String buildTableColKey(String catName, String dbName, String tableName,
-                                        String colName) {
-    return buildKey(catName, dbName, tableName, colName);
-  }
-
-  private static String buildKey(String... elements) {
-    return org.apache.commons.lang.StringUtils.join(elements, delimit);
-  }
-
-  public static String[] splitDbName(String key) {
-    String[] names = key.split(delimit);
-    assert names.length == 2;
-    return names;
-  }
-
-  /**
-   * Builds a key for the partitions column cache which is concatenation of partition values, each
-   * value separated by a delimiter and the column name
-   *
-   */
-  public static String buildPartitonColStatsCacheKey(List<String> partVals, String colName) {
-    return buildPartitionCacheKey(partVals) + delimit + colName;
-  }
-
-  static Table assemble(TableWrapper wrapper, SharedCache sharedCache) {
-    Table t = wrapper.getTable().deepCopy();
-    if (wrapper.getSdHash() != null) {
-      StorageDescriptor sdCopy = sharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy();
-      if (sdCopy.getBucketCols() == null) {
-        sdCopy.setBucketCols(Collections.emptyList());
-      }
-      if (sdCopy.getSortCols() == null) {
-        sdCopy.setSortCols(Collections.emptyList());
-      }
-      if (sdCopy.getSkewedInfo() == null) {
-        sdCopy.setSkewedInfo(new SkewedInfo(Collections.emptyList(),
-          Collections.emptyList(), Collections.emptyMap()));
-      }
-      sdCopy.setLocation(wrapper.getLocation());
-      sdCopy.setParameters(wrapper.getParameters());
-      t.setSd(sdCopy);
-    }
-    return t;
-  }
-
-  static Partition assemble(PartitionWrapper wrapper, SharedCache sharedCache) {
-    Partition p = wrapper.getPartition().deepCopy();
-    if (wrapper.getSdHash() != null) {
-      StorageDescriptor sdCopy = sharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy();
-      if (sdCopy.getBucketCols() == null) {
-        sdCopy.setBucketCols(Collections.emptyList());
-      }
-      if (sdCopy.getSortCols() == null) {
-        sdCopy.setSortCols(Collections.emptyList());
-      }
-      if (sdCopy.getSkewedInfo() == null) {
-        sdCopy.setSkewedInfo(new SkewedInfo(Collections.emptyList(),
-          Collections.emptyList(), Collections.emptyMap()));
-      }
-      sdCopy.setLocation(wrapper.getLocation());
-      sdCopy.setParameters(wrapper.getParameters());
-      p.setSd(sdCopy);
-    }
-    return p;
-  }
-
-  public static boolean matches(String name, String pattern) {
-    String[] subpatterns = pattern.trim().split("\\|");
-    for (String subpattern : subpatterns) {
-      subpattern = "(?i)" + subpattern.replaceAll("\\?", ".{1}").replaceAll("\\*", ".*")
-          .replaceAll("\\^", "\\\\^").replaceAll("\\$", "\\\\$");
-      if (Pattern.matches(subpattern, StringUtils.normalizeIdentifier(name))) {
-        return true;
-      }
-    }
-    return false;
-  }
-}