You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2017/12/06 21:20:22 UTC
[07/12] hive git commit: HIVE-17980 Move HiveMetaStoreClient plus a
few remaining classes. This closes #272 (Alan Gates, reviewed by Daniel Dai)
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java b/metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
deleted file mode 100644
index 7df7ac5..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import java.io.IOException;
-import java.net.ConnectException;
-import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class MetaStoreTestUtils {
-
- private static final Logger LOG = LoggerFactory.getLogger("hive.log");
- public static final int RETRY_COUNT = 10;
-
- public static int startMetaStore() throws Exception {
- return MetaStoreTestUtils.startMetaStore(HadoopThriftAuthBridge.getBridge(), null);
- }
-
- public static int startMetaStore(final HadoopThriftAuthBridge bridge, HiveConf conf) throws Exception {
- int port = MetaStoreTestUtils.findFreePort();
- MetaStoreTestUtils.startMetaStore(port, bridge, conf);
- return port;
- }
-
- public static int startMetaStore(HiveConf conf) throws Exception {
- return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf);
- }
-
- public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception {
- MetaStoreTestUtils.startMetaStore(port, bridge, null);
- }
-
- public static void startMetaStore(final int port,
- final HadoopThriftAuthBridge bridge, HiveConf hiveConf)
- throws Exception{
- if (hiveConf == null) {
- hiveConf = new HiveConf(HMSHandler.class);
- }
- final HiveConf finalHiveConf = hiveConf;
- Thread thread = new Thread(new Runnable() {
- @Override
- public void run() {
- try {
- HiveMetaStore.startMetaStore(port, bridge, finalHiveConf);
- } catch (Throwable e) {
- LOG.error("Metastore Thrift Server threw an exception...", e);
- }
- }
- });
- thread.setDaemon(true);
- thread.start();
- MetaStoreTestUtils.loopUntilHMSReady(port);
- }
-
- public static int startMetaStoreWithRetry(final HadoopThriftAuthBridge bridge) throws Exception {
- return MetaStoreTestUtils.startMetaStoreWithRetry(bridge, null);
- }
-
- public static int startMetaStoreWithRetry(HiveConf conf) throws Exception {
- return MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
- }
-
- public static int startMetaStoreWithRetry() throws Exception {
- return MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), null);
- }
-
- public static int startMetaStoreWithRetry(final HadoopThriftAuthBridge bridge, HiveConf conf)
- throws Exception {
- Exception metaStoreException = null;
- int metaStorePort = 0;
-
- for (int tryCount = 0; tryCount < MetaStoreTestUtils.RETRY_COUNT; tryCount++) {
- try {
- metaStorePort = MetaStoreTestUtils.findFreePort();
- MetaStoreTestUtils.startMetaStore(metaStorePort, bridge, conf);
- return metaStorePort;
- } catch (ConnectException ce) {
- metaStoreException = ce;
- }
- }
-
- throw metaStoreException;
- }
-
- /**
- * A simple connect test to make sure that the metastore is up
- * @throws Exception
- */
- public static void loopUntilHMSReady(int port) throws Exception {
- int retries = 0;
- Exception exc = null;
- while (true) {
- try {
- Socket socket = new Socket();
- socket.connect(new InetSocketAddress(port), 5000);
- socket.close();
- return;
- } catch (Exception e) {
- if (retries++ > 60) { //give up
- exc = e;
- break;
- }
- Thread.sleep(1000);
- }
- }
- // something is preventing metastore from starting
- // print the stack from all threads for debugging purposes
- LOG.error("Unable to connect to metastore server: " + exc.getMessage());
- LOG.info("Printing all thread stack traces for debugging before throwing exception.");
- LOG.info(MetaStoreTestUtils.getAllThreadStacksAsString());
- throw exc;
- }
-
- public static String getAllThreadStacksAsString() {
- Map<Thread, StackTraceElement[]> threadStacks = Thread.getAllStackTraces();
- StringBuilder sb = new StringBuilder();
- for (Map.Entry<Thread, StackTraceElement[]> entry : threadStacks.entrySet()) {
- Thread t = entry.getKey();
- sb.append(System.lineSeparator());
- sb.append("Name: ").append(t.getName()).append(" State: ").append(t.getState());
- MetaStoreTestUtils.addStackString(entry.getValue(), sb);
- }
- return sb.toString();
- }
-
- public static void addStackString(StackTraceElement[] stackElems, StringBuilder sb) {
- sb.append(System.lineSeparator());
- for (StackTraceElement stackElem : stackElems) {
- sb.append(stackElem).append(System.lineSeparator());
- }
- }
-
- /**
- * Finds a free port on the machine.
- *
- * @return
- * @throws IOException
- */
- public static int findFreePort() throws IOException {
- ServerSocket socket= new ServerSocket(0);
- int port = socket.getLocalPort();
- socket.close();
- return port;
- }
-
- /**
- * Finds a free port on the machine, but allow the
- * ability to specify a port number to not use, no matter what.
- */
- public static int findFreePortExcepting(int portToExclude) throws IOException {
- ServerSocket socket1 = null;
- ServerSocket socket2 = null;
- try {
- socket1 = new ServerSocket(0);
- socket2 = new ServerSocket(0);
- if (socket1.getLocalPort() != portToExclude) {
- return socket1.getLocalPort();
- }
- // If we're here, then socket1.getLocalPort was the port to exclude
- // Since both sockets were open together at a point in time, we're
- // guaranteed that socket2.getLocalPort() is not the same.
- return socket2.getLocalPort();
- } finally {
- if (socket1 != null){
- socket1.close();
- }
- if (socket2 != null){
- socket2.close();
- }
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
deleted file mode 100644
index 12a862d..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-
-import java.util.List;
-
-/**
- * Test Mock-out for PartitionExpressionForMetastore.
- */
-public class MockPartitionExpressionForMetastore implements PartitionExpressionProxy {
- @Override
- public String convertExprToFilter(byte[] expr) throws MetaException {
- return null;
- }
-
- @Override
- public boolean filterPartitionsByExpr(List<FieldSchema> partColumns,
- byte[] expr, String defaultPartitionName,
- List<String> partitionNames) throws MetaException {
- return false;
- }
-
- @Override
- public FileMetadataExprType getMetadataType(String inputFormat) {
- return null;
- }
-
- @Override
- public SearchArgument createSarg(byte[] expr) {
- return null;
- }
-
- @Override
- public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
- return null;
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java
deleted file mode 100644
index f71911e..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreGetMetaConf.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.security.Permission;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.thrift.TException;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.Rule;
-import org.junit.rules.ExpectedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import static org.junit.Assert.*;
-import org.junit.Before;
-
-public class TestHiveMetaStoreGetMetaConf {
-
- @Rule
- public ExpectedException thrown = ExpectedException.none();
-
- private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStoreGetMetaConf.class);
- private static HiveConf hiveConf;
- private static SecurityManager securityManager;
-
- private HiveMetaStoreClient hmsc;
-
- public static class NoExitSecurityManager extends SecurityManager {
-
- @Override
- public void checkPermission(Permission perm) {
- // allow anything.
- }
-
- @Override
- public void checkPermission(Permission perm, Object context) {
- // allow anything.
- }
-
- @Override
- public void checkExit(int status) {
- super.checkExit(status);
- throw new RuntimeException("System.exit() was called. Raising exception.");
- }
- }
-
- @AfterClass
- public static void tearDown() throws Exception {
- LOG.info("Shutting down metastore.");
- System.setSecurityManager(securityManager);
- }
-
- @BeforeClass
- public static void startMetaStoreServer() throws Exception {
-
- securityManager = System.getSecurityManager();
- System.setSecurityManager(new NoExitSecurityManager());
- HiveConf metastoreConf = new HiveConf();
- metastoreConf.setClass(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.varname,
- MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class);
- metastoreConf.setBoolVar(HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL, false);
- int msPort = MetaStoreTestUtils.startMetaStore(metastoreConf);
- hiveConf = new HiveConf(TestHiveMetaStoreGetMetaConf.class);
- hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:"
- + msPort);
- hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, "");
- hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, "");
- hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
- hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 10);
-
- System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
- System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
- }
-
- @Before
- public void setup() throws MetaException {
- hmsc = new HiveMetaStoreClient(hiveConf);
- }
-
- @After
- public void closeClient() {
- if (hmsc != null) {
- hmsc.close();
- }
- }
-
- @Test
- public void testGetMetaConfDefault() throws MetaException, TException {
- HiveConf.ConfVars metaConfVar = HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL;
- String expected = metaConfVar.getDefaultValue();
- String actual = hmsc.getMetaConf(metaConfVar.toString());
- assertEquals(expected, actual);
- }
-
- @Test
- public void testGetMetaConfDefaultEmptyString() throws MetaException, TException {
- HiveConf.ConfVars metaConfVar = HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN;
- String expected = "";
- String actual = hmsc.getMetaConf(metaConfVar.toString());
- assertEquals(expected, actual);
- }
-
- @Test
- public void testGetMetaConfOverridden() throws MetaException, TException {
- HiveConf.ConfVars metaConfVar = HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL;
- String expected = "false";
- String actual = hmsc.getMetaConf(metaConfVar.toString());
- assertEquals(expected, actual);
- }
-
- @Test
- public void testGetMetaConfUnknownPreperty() throws MetaException, TException {
- String unknownPropertyName = "hive.meta.foo.bar";
- thrown.expect(MetaException.class);
- thrown.expectMessage("Invalid configuration key " + unknownPropertyName);
- hmsc.getMetaConf(unknownPropertyName);
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java
deleted file mode 100644
index 86462ff..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java
+++ /dev/null
@@ -1,409 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
-import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.security.Permission;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Test to check PartitionSpec support in HiveMetaStore.
- */
-public class TestHiveMetaStorePartitionSpecs {
-
- private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStorePartitionSpecs.class);
- private static int msPort;
- private static HiveConf hiveConf;
- private static SecurityManager securityManager;
-
- public static class NoExitSecurityManager extends SecurityManager {
-
- @Override
- public void checkPermission(Permission perm) {
- // allow anything.
- }
-
- @Override
- public void checkPermission(Permission perm, Object context) {
- // allow anything.
- }
-
- @Override
- public void checkExit(int status) {
-
- super.checkExit(status);
- throw new RuntimeException("System.exit() was called. Raising exception. ");
- }
- }
-
-
- @AfterClass
- public static void tearDown() throws Exception {
- LOG.info("Shutting down metastore.");
- System.setSecurityManager(securityManager);
-
- HiveMetaStoreClient hmsc = new HiveMetaStoreClient(hiveConf);
- hmsc.dropDatabase(dbName, true, true, true);
- }
-
- @BeforeClass
- public static void startMetaStoreServer() throws Exception {
-
- HiveConf metastoreConf = new HiveConf();
- metastoreConf.setClass(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.varname,
- MockPartitionExpressionForMetastore.class, PartitionExpressionProxy.class);
- msPort = MetaStoreTestUtils.startMetaStore(metastoreConf);
- securityManager = System.getSecurityManager();
- System.setSecurityManager(new NoExitSecurityManager());
- hiveConf = new HiveConf(TestHiveMetaStorePartitionSpecs.class);
- hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:"
- + msPort);
- hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
- hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
- hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
- hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname,
- "false");
- hiveConf.set(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.name(), MockPartitionExpressionForMetastore.class.getCanonicalName());
- System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
- System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
- }
-
- private static String dbName = "testpartitionspecs_db";
- private static String tableName = "testpartitionspecs_table";
- private static int nDates = 10;
- private static String datePrefix = "2014010";
-
- private static void createTable(HiveMetaStoreClient hmsc, boolean enablePartitionGrouping) throws Exception {
-
-
- List<FieldSchema> columns = new ArrayList<FieldSchema>();
- columns.add(new FieldSchema("foo", "string", ""));
- columns.add(new FieldSchema("bar", "string", ""));
-
- List<FieldSchema> partColumns = new ArrayList<FieldSchema>();
- partColumns.add(new FieldSchema("dt", "string", ""));
- partColumns.add(new FieldSchema("blurb", "string", ""));
-
- SerDeInfo serdeInfo = new SerDeInfo("LBCSerDe", LazyBinaryColumnarSerDe.class.getCanonicalName(), new HashMap<String, String>());
-
- StorageDescriptor storageDescriptor
- = new StorageDescriptor(columns, null,
- "org.apache.hadoop.hive.ql.io.RCFileInputFormat",
- "org.apache.hadoop.hive.ql.io.RCFileOutputFormat",
- false, 0, serdeInfo, null, null, null);
-
- Map<String, String> tableParameters = new HashMap<String, String>();
- tableParameters.put("hive.hcatalog.partition.spec.grouping.enabled", enablePartitionGrouping? "true":"false");
- Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", "");
-
- hmsc.createTable(table);
- Assert.assertTrue("Table " + dbName + "." + tableName + " does not exist",
- hmsc.tableExists(dbName, tableName));
-
- }
-
- private static void clearAndRecreateDB(HiveMetaStoreClient hmsc) throws Exception {
- hmsc.dropDatabase(dbName,
- true, // Delete data.
- true, // Ignore unknownDB.
- true // Cascade.
- );
-
- hmsc.createDatabase(new Database(dbName,
- "", // Description.
- null, // Location.
- null // Parameters.
- ));
- }
-
- // Get partition-path. For grid='XYZ', place the partition outside the table-path.
- private static String getPartitionPath(Table table, List<String> partValues) {
-
- return partValues.get(1).equalsIgnoreCase("isLocatedOutsideTablePath")? // i.e. Is the partition outside the table-dir?
- table.getSd().getLocation().replace(table.getTableName(), "location_outside_" + table.getTableName())
- + "_" + partValues.get(0) + "_" + partValues.get(1)
- : null ; // Use defaults... Partitions are put in the table directory.
-
- }
-
- private static void populatePartitions(HiveMetaStoreClient hmsc, Table table, List<String> blurbs) throws Exception {
- for (int i=0; i< nDates; ++i) {
- for (String blurb : blurbs) {
- StorageDescriptor sd = new StorageDescriptor(table.getSd());
- // Add partitions located in the table-directory (i.e. default).
- List<String> values = Arrays.asList(datePrefix + i, blurb);
- sd.setLocation(getPartitionPath(table, values));
- hmsc.add_partition(new Partition(values, dbName, tableName, 0, 0, sd, null));
- }
- }
- }
-
- private void testGetPartitionSpecs(boolean enablePartitionGrouping) {
- try {
- HiveMetaStoreClient hmsc = new HiveMetaStoreClient(hiveConf);
- clearAndRecreateDB(hmsc);
- createTable(hmsc, enablePartitionGrouping);
- Table table = hmsc.getTable(dbName, tableName);
- populatePartitions(hmsc, table, Arrays.asList("isLocatedInTablePath", "isLocatedOutsideTablePath"));
-
- PartitionSpecProxy partitionSpecProxy = hmsc.listPartitionSpecs(dbName, tableName, -1);
- Assert.assertEquals( "Unexpected number of partitions.", nDates * 2, partitionSpecProxy.size());
-
- Map<String, List<String>> locationToDateMap = new HashMap<String, List<String>>();
- locationToDateMap.put("isLocatedInTablePath", new ArrayList<String>());
- locationToDateMap.put("isLocatedOutsideTablePath", new ArrayList<String>());
- PartitionSpecProxy.PartitionIterator iterator = partitionSpecProxy.getPartitionIterator();
-
- while (iterator.hasNext()) {
- Partition partition = iterator.next();
- locationToDateMap.get(partition.getValues().get(1)).add(partition.getValues().get(0));
- }
-
- List<String> expectedDates = new ArrayList<String>(nDates);
- for (int i=0; i<nDates; ++i) {
- expectedDates.add(datePrefix + i);
- }
-
- Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedInTablePath").toArray());
- Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedOutsideTablePath").toArray());
-
- partitionSpecProxy = hmsc.listPartitionSpecsByFilter(dbName, tableName, "blurb = \"isLocatedOutsideTablePath\"", -1);
- locationToDateMap.get("isLocatedInTablePath").clear();
- locationToDateMap.get("isLocatedOutsideTablePath").clear();
- iterator = partitionSpecProxy.getPartitionIterator();
-
- while (iterator.hasNext()) {
- Partition partition = iterator.next();
- locationToDateMap.get(partition.getValues().get(1)).add(partition.getValues().get(0));
- }
-
- Assert.assertEquals("Unexpected date-values.", 0, locationToDateMap.get("isLocatedInTablePath").size());
- Assert.assertArrayEquals("Unexpected date-values.", expectedDates.toArray(), locationToDateMap.get("isLocatedOutsideTablePath").toArray());
-
-
- }
- catch (Throwable t) {
- LOG.error("Unexpected Exception!", t);
- t.printStackTrace();
- Assert.assertTrue("Unexpected Exception!", false);
- }
- }
-
- /**
- * Test for HiveMetaStoreClient.listPartitionSpecs() and HiveMetaStoreClient.listPartitionSpecsByFilter().
- * Check behaviour with and without Partition-grouping enabled.
- */
- @Test
- public void testGetPartitionSpecs_WithAndWithoutPartitionGrouping() {
- testGetPartitionSpecs(true);
- testGetPartitionSpecs(false);
- }
-
-
- /**
- * Test to confirm that partitions can be added using PartitionSpecs.
- */
- @Test
- public void testAddPartitions() {
- try {
- // Create source table.
- HiveMetaStoreClient hmsc = new HiveMetaStoreClient(hiveConf);
- clearAndRecreateDB(hmsc);
- createTable(hmsc, true);
- Table table = hmsc.getTable(dbName, tableName);
- populatePartitions(hmsc, table, Arrays.asList("isLocatedInTablePath", "isLocatedOutsideTablePath"));
-
- // Clone the table,
- String targetTableName = "cloned_" + tableName;
- Table targetTable = new Table(table);
- targetTable.setTableName(targetTableName);
- StorageDescriptor targetTableSd = new StorageDescriptor(targetTable.getSd());
- targetTableSd.setLocation(
- targetTableSd.getLocation().replace( tableName, targetTableName));
- hmsc.createTable(targetTable);
-
- // Get partition-list from source.
- PartitionSpecProxy partitionsForAddition
- = hmsc.listPartitionSpecsByFilter(dbName, tableName, "blurb = \"isLocatedInTablePath\"", -1);
- partitionsForAddition.setTableName(targetTableName);
- partitionsForAddition.setRootLocation(targetTableSd.getLocation());
-
- Assert.assertEquals("Unexpected number of partitions added. ",
- partitionsForAddition.size(), hmsc.add_partitions_pspec(partitionsForAddition));
-
- // Check that the added partitions are as expected.
- PartitionSpecProxy clonedPartitions = hmsc.listPartitionSpecs(dbName, targetTableName, -1);
- Assert.assertEquals("Unexpected number of partitions returned. ",
- partitionsForAddition.size(), clonedPartitions.size());
-
- PartitionSpecProxy.PartitionIterator sourceIterator = partitionsForAddition.getPartitionIterator(),
- targetIterator = clonedPartitions.getPartitionIterator();
-
- while (targetIterator.hasNext()) {
- Partition sourcePartition = sourceIterator.next(),
- targetPartition = targetIterator.next();
- Assert.assertEquals("Mismatched values.",
- sourcePartition.getValues(), targetPartition.getValues());
- Assert.assertEquals("Mismatched locations.",
- sourcePartition.getSd().getLocation(), targetPartition.getSd().getLocation());
- }
- }
- catch (Throwable t) {
- LOG.error("Unexpected Exception!", t);
- t.printStackTrace();
- Assert.assertTrue("Unexpected Exception!", false);
- }
- }
-
- /**
- * Test to confirm that Partition-grouping behaves correctly when Table-schemas evolve.
- * Partitions must be grouped by location and schema.
- */
- @Test
- public void testFetchingPartitionsWithDifferentSchemas() {
- try {
- // Create source table.
- HiveMetaStoreClient hmsc = new HiveMetaStoreClient(hiveConf);
- clearAndRecreateDB(hmsc);
- createTable(hmsc, true);
- Table table = hmsc.getTable(dbName, tableName);
- populatePartitions(hmsc,
- table,
- Arrays.asList("isLocatedInTablePath", "isLocatedOutsideTablePath") // Blurb list.
- );
-
- // Modify table schema. Add columns.
- List<FieldSchema> fields = table.getSd().getCols();
- fields.add(new FieldSchema("goo", "string", "Entirely new column. Doesn't apply to older partitions."));
- table.getSd().setCols(fields);
- hmsc.alter_table(dbName, tableName, table);
- // Check that the change stuck.
- table = hmsc.getTable(dbName,tableName);
- Assert.assertEquals("Unexpected number of table columns.",
- 3, table.getSd().getColsSize());
-
- // Add partitions with new schema.
- // Mark Partitions with new schema with different blurb.
- populatePartitions(hmsc, table, Arrays.asList("hasNewColumn"));
-
- // Retrieve *all* partitions from the table.
- PartitionSpecProxy partitionSpecProxy = hmsc.listPartitionSpecs(dbName, tableName, -1);
- Assert.assertEquals("Unexpected number of partitions.", nDates * 3, partitionSpecProxy.size());
-
- // Confirm grouping.
- Assert.assertTrue("Unexpected type of PartitionSpecProxy.", partitionSpecProxy instanceof CompositePartitionSpecProxy);
- CompositePartitionSpecProxy compositePartitionSpecProxy = (CompositePartitionSpecProxy)partitionSpecProxy;
- List<PartitionSpec> partitionSpecs = compositePartitionSpecProxy.toPartitionSpec();
- Assert.assertTrue("PartitionSpec[0] should have been a SharedSDPartitionSpec.",
- partitionSpecs.get(0).isSetSharedSDPartitionSpec());
- Assert.assertEquals("PartitionSpec[0] should use the table-path as the common root location. ",
- table.getSd().getLocation(), partitionSpecs.get(0).getRootPath());
- Assert.assertTrue("PartitionSpec[1] should have been a SharedSDPartitionSpec.",
- partitionSpecs.get(1).isSetSharedSDPartitionSpec());
- Assert.assertEquals("PartitionSpec[1] should use the table-path as the common root location. ",
- table.getSd().getLocation(), partitionSpecs.get(1).getRootPath());
- Assert.assertTrue("PartitionSpec[2] should have been a ListComposingPartitionSpec.",
- partitionSpecs.get(2).isSetPartitionList());
-
- // Categorize the partitions returned, and confirm that all partitions are accounted for.
- PartitionSpecProxy.PartitionIterator iterator = partitionSpecProxy.getPartitionIterator();
- Map<String, List<Partition>> blurbToPartitionList = new HashMap<String, List<Partition>>(3);
- while (iterator.hasNext()) {
-
- Partition partition = iterator.next();
- String blurb = partition.getValues().get(1);
-
- if (!blurbToPartitionList.containsKey(blurb)) {
- blurbToPartitionList.put(blurb, new ArrayList<Partition>(nDates));
- }
-
- blurbToPartitionList.get(blurb).add(partition);
-
- } // </Classification>
-
- // All partitions with blurb="isLocatedOutsideTablePath" should have 2 columns,
- // and must have locations outside the table directory.
- for (Partition partition : blurbToPartitionList.get("isLocatedOutsideTablePath")) {
- Assert.assertEquals("Unexpected number of columns.", 2, partition.getSd().getCols().size());
- Assert.assertEquals("Unexpected first column.", "foo", partition.getSd().getCols().get(0).getName());
- Assert.assertEquals("Unexpected second column.", "bar", partition.getSd().getCols().get(1).getName());
- String partitionLocation = partition.getSd().getLocation();
- String tableLocation = table.getSd().getLocation();
- Assert.assertTrue("Unexpected partition location: " + partitionLocation + ". " +
- "Partition should have been outside table location: " + tableLocation,
- !partitionLocation.startsWith(tableLocation));
- }
-
- // All partitions with blurb="isLocatedInTablePath" should have 2 columns,
- // and must have locations within the table directory.
- for (Partition partition : blurbToPartitionList.get("isLocatedInTablePath")) {
- Assert.assertEquals("Unexpected number of columns.", 2, partition.getSd().getCols().size());
- Assert.assertEquals("Unexpected first column.", "foo", partition.getSd().getCols().get(0).getName());
- Assert.assertEquals("Unexpected second column.", "bar", partition.getSd().getCols().get(1).getName());
- String partitionLocation = partition.getSd().getLocation();
- String tableLocation = table.getSd().getLocation();
- Assert.assertTrue("Unexpected partition location: " + partitionLocation + ". " +
- "Partition should have been within table location: " + tableLocation,
- partitionLocation.startsWith(tableLocation));
- }
-
- // All partitions with blurb="hasNewColumn" were added after the table schema changed,
- // and must have 3 columns. Also, the partition locations must lie within the table directory.
- for (Partition partition : blurbToPartitionList.get("hasNewColumn")) {
- Assert.assertEquals("Unexpected number of columns.", 3, partition.getSd().getCols().size());
- Assert.assertEquals("Unexpected first column.", "foo", partition.getSd().getCols().get(0).getName());
- Assert.assertEquals("Unexpected second column.", "bar", partition.getSd().getCols().get(1).getName());
- Assert.assertEquals("Unexpected third column.", "goo", partition.getSd().getCols().get(2).getName());
- String partitionLocation = partition.getSd().getLocation();
- String tableLocation = table.getSd().getLocation();
- Assert.assertTrue("Unexpected partition location: " + partitionLocation + ". " +
- "Partition should have been within table location: " + tableLocation,
- partitionLocation.startsWith(tableLocation));
- }
-
- }
- catch (Throwable t) {
- LOG.error("Unexpected Exception!", t);
- t.printStackTrace();
- Assert.assertTrue("Unexpected Exception!", false);
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java
deleted file mode 100644
index 2166c20..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.util.StringUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Test long running request timeout functionality in MetaStore Server
- * HiveMetaStore.HMSHandler.create_database() is used to simulate a long running method.
- */
-public class TestHiveMetaStoreTimeout {
- protected static HiveMetaStoreClient client;
- protected static HiveConf hiveConf;
- protected static Warehouse warehouse;
-
- @BeforeClass
- public static void setUp() throws Exception {
- HiveMetaStore.TEST_TIMEOUT_ENABLED = true;
- hiveConf = new HiveConf(TestHiveMetaStoreTimeout.class);
- hiveConf.set(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS.varname,
- MockPartitionExpressionForMetastore.class.getCanonicalName());
- hiveConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 10 * 1000,
- TimeUnit.MILLISECONDS);
- warehouse = new Warehouse(hiveConf);
- try {
- client = new HiveMetaStoreClient(hiveConf);
- } catch (Throwable e) {
- System.err.println("Unable to open the metastore");
- System.err.println(StringUtils.stringifyException(e));
- throw e;
- }
- }
-
- @AfterClass
- public static void tearDown() throws Exception {
- HiveMetaStore.TEST_TIMEOUT_ENABLED = false;
- try {
- client.close();
- } catch (Throwable e) {
- System.err.println("Unable to close metastore");
- System.err.println(StringUtils.stringifyException(e));
- throw e;
- }
- }
-
- @Test
- public void testNoTimeout() throws Exception {
- HiveMetaStore.TEST_TIMEOUT_VALUE = 5 * 1000;
-
- String dbName = "db";
- client.dropDatabase(dbName, true, true);
-
- Database db = new Database();
- db.setName(dbName);
- try {
- client.createDatabase(db);
- } catch (MetaException e) {
- Assert.fail("should not throw timeout exception: " + e.getMessage());
- }
-
- client.dropDatabase(dbName, true, true);
- }
-
- @Test
- public void testTimeout() throws Exception {
- HiveMetaStore.TEST_TIMEOUT_VALUE = 15 * 1000;
-
- String dbName = "db";
- client.dropDatabase(dbName, true, true);
-
- Database db = new Database();
- db.setName(dbName);
- try {
- client.createDatabase(db);
- Assert.fail("should throw timeout exception.");
- } catch (MetaException e) {
- Assert.assertTrue("unexpected MetaException", e.getMessage().contains("Timeout when " +
- "executing method: create_database"));
- }
-
- // restore
- HiveMetaStore.TEST_TIMEOUT_VALUE = 5 * 1000;
- }
-
- @Test
- public void testResetTimeout() throws Exception {
- HiveMetaStore.TEST_TIMEOUT_VALUE = 5 * 1000;
- String dbName = "db";
-
- // no timeout before reset
- client.dropDatabase(dbName, true, true);
- Database db = new Database();
- db.setName(dbName);
- try {
- client.createDatabase(db);
- } catch (MetaException e) {
- Assert.fail("should not throw timeout exception: " + e.getMessage());
- }
- client.dropDatabase(dbName, true, true);
-
- // reset
- client.setMetaConf(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT.varname, "3s");
-
- // timeout after reset
- try {
- client.createDatabase(db);
- Assert.fail("should throw timeout exception.");
- } catch (MetaException e) {
- Assert.assertTrue("unexpected MetaException", e.getMessage().contains("Timeout when " +
- "executing method: create_database"));
- }
-
- // restore
- client.dropDatabase(dbName, true, true);
- client.setMetaConf(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT.varname, "10s");
- }
-}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/metastore/src/test/org/apache/hadoop/hive/metastore/TestOldSchema.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestOldSchema.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestOldSchema.java
deleted file mode 100644
index 8409d9b..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestOldSchema.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
-import org.apache.hadoop.hive.metastore.api.InvalidInputException;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TestOldSchema {
- private ObjectStore store = null;
-
- private static final Logger LOG = LoggerFactory.getLogger(TestOldSchema.class.getName());
-
- public static class MockPartitionExpressionProxy implements PartitionExpressionProxy {
- @Override
- public String convertExprToFilter(byte[] expr) throws MetaException {
- return null;
- }
-
- @Override
- public boolean filterPartitionsByExpr(List<FieldSchema> partColumns, byte[] expr,
- String defaultPartitionName,
- List<String> partitionNames) throws MetaException {
- return false;
- }
-
- @Override
- public FileMetadataExprType getMetadataType(String inputFormat) {
- return null;
- }
-
- @Override
- public SearchArgument createSarg(byte[] expr) {
- return null;
- }
-
- @Override
- public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
- return null;
- }
- }
-
- byte bitVectors[][] = new byte[2][];
-
- @Before
- public void setUp() throws Exception {
- HiveConf conf = new HiveConf();
- conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS,
- MockPartitionExpressionProxy.class.getName());
- conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_FETCH_BITVECTOR, false);
-
- store = new ObjectStore();
- store.setConf(conf);
- dropAllStoreObjects(store);
-
- HyperLogLog hll = HyperLogLog.builder().build();
- hll.addLong(1);
- bitVectors[1] = hll.serialize();
- hll = HyperLogLog.builder().build();
- hll.addLong(2);
- hll.addLong(3);
- hll.addLong(3);
- hll.addLong(4);
- bitVectors[0] = hll.serialize();
- }
-
- @After
- public void tearDown() {
- }
-
- /**
- * Tests partition operations
- *
- * @throws Exception
- */
- @Test
- public void testPartitionOps() throws Exception {
- String dbName = "default";
- String tableName = "snp";
- Database db1 = new Database(dbName, "description", "locationurl", null);
- store.createDatabase(db1);
- long now = System.currentTimeMillis();
- List<FieldSchema> cols = new ArrayList<>();
- cols.add(new FieldSchema("col1", "long", "nocomment"));
- SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
- StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
- serde, null, null, Collections.<String, String> emptyMap());
- List<FieldSchema> partCols = new ArrayList<>();
- partCols.add(new FieldSchema("ds", "string", ""));
- Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols,
- Collections.<String, String> emptyMap(), null, null, null);
- store.createTable(table);
-
- Deadline.startTimer("getPartition");
- for (int i = 0; i < 10; i++) {
- List<String> partVal = new ArrayList<>();
- partVal.add(String.valueOf(i));
- StorageDescriptor psd = new StorageDescriptor(sd);
- psd.setLocation("file:/tmp/default/hit/ds=" + partVal);
- Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd,
- Collections.<String, String> emptyMap());
- store.addPartition(part);
- ColumnStatistics cs = new ColumnStatistics();
- ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName);
- desc.setLastAnalyzed(now);
- desc.setPartName("ds=" + String.valueOf(i));
- cs.setStatsDesc(desc);
- ColumnStatisticsObj obj = new ColumnStatisticsObj();
- obj.setColName("col1");
- obj.setColType("bigint");
- ColumnStatisticsData data = new ColumnStatisticsData();
- LongColumnStatsData dcsd = new LongColumnStatsData();
- dcsd.setHighValue(1000 + i);
- dcsd.setLowValue(-1000 - i);
- dcsd.setNumNulls(i);
- dcsd.setNumDVs(10 * i + 1);
- dcsd.setBitVectors(bitVectors[0]);
- data.setLongStats(dcsd);
- obj.setStatsData(data);
- cs.addToStatsObj(obj);
- store.updatePartitionColumnStatistics(cs, partVal);
-
- }
-
- Checker statChecker = new Checker() {
- @Override
- public void checkStats(AggrStats aggrStats) throws Exception {
- Assert.assertEquals(10, aggrStats.getPartsFound());
- Assert.assertEquals(1, aggrStats.getColStatsSize());
- ColumnStatisticsObj cso = aggrStats.getColStats().get(0);
- Assert.assertEquals("col1", cso.getColName());
- Assert.assertEquals("bigint", cso.getColType());
- LongColumnStatsData lcsd = cso.getStatsData().getLongStats();
- Assert.assertEquals(1009, lcsd.getHighValue(), 0.01);
- Assert.assertEquals(-1009, lcsd.getLowValue(), 0.01);
- Assert.assertEquals(45, lcsd.getNumNulls());
- Assert.assertEquals(91, lcsd.getNumDVs());
- }
- };
- List<String> partNames = new ArrayList<>();
- for (int i = 0; i < 10; i++) {
- partNames.add("ds=" + i);
- }
- AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames,
- Arrays.asList("col1"));
- statChecker.checkStats(aggrStats);
-
- }
-
- private static interface Checker {
- void checkStats(AggrStats aggrStats) throws Exception;
- }
-
- public static void dropAllStoreObjects(RawStore store) throws MetaException,
- InvalidObjectException, InvalidInputException {
- try {
- Deadline.registerIfNot(100000);
- Deadline.startTimer("getPartition");
- List<String> dbs = store.getAllDatabases();
- for (int i = 0; i < dbs.size(); i++) {
- String db = dbs.get(i);
- List<String> tbls = store.getAllTables(db);
- for (String tbl : tbls) {
- List<Partition> parts = store.getPartitions(db, tbl, 100);
- for (Partition part : parts) {
- store.dropPartition(db, tbl, part.getValues());
- }
- store.dropTable(db, tbl);
- }
- store.dropDatabase(db);
- }
- } catch (NoSuchObjectException e) {
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java
deleted file mode 100644
index a8c7ac3..0000000
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import static org.apache.commons.lang.StringUtils.repeat;
-
-import java.lang.reflect.AccessibleObject;
-import java.lang.reflect.Array;
-import java.lang.reflect.Field;
-import java.lang.reflect.Modifier;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.commons.lang.ClassUtils;
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.thrift.TException;
-
-public class VerifyingObjectStore extends ObjectStore {
- private static final Logger LOG = LoggerFactory.getLogger(VerifyingObjectStore.class);
-
- public VerifyingObjectStore() {
- super();
- LOG.warn(getClass().getSimpleName() + " is being used - test run");
- }
-
- @Override
- public List<Partition> getPartitionsByFilter(String dbName, String tblName, String filter,
- short maxParts) throws MetaException, NoSuchObjectException {
- List<Partition> sqlResults = getPartitionsByFilterInternal(
- dbName, tblName, filter, maxParts, true, false);
- List<Partition> ormResults = getPartitionsByFilterInternal(
- dbName, tblName, filter, maxParts, false, true);
- verifyLists(sqlResults, ormResults, Partition.class);
- return sqlResults;
- }
-
- @Override
- public List<Partition> getPartitionsByNames(String dbName, String tblName,
- List<String> partNames) throws MetaException, NoSuchObjectException {
- List<Partition> sqlResults = getPartitionsByNamesInternal(
- dbName, tblName, partNames, true, false);
- List<Partition> ormResults = getPartitionsByNamesInternal(
- dbName, tblName, partNames, false, true);
- verifyLists(sqlResults, ormResults, Partition.class);
- return sqlResults;
- }
-
- @Override
- public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
- String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
- List<Partition> ormParts = new LinkedList<Partition>();
- boolean sqlResult = getPartitionsByExprInternal(
- dbName, tblName, expr, defaultPartitionName, maxParts, result, true, false);
- boolean ormResult = getPartitionsByExprInternal(
- dbName, tblName, expr, defaultPartitionName, maxParts, ormParts, false, true);
- if (sqlResult != ormResult) {
- String msg = "The unknown flag is different - SQL " + sqlResult + ", ORM " + ormResult;
- LOG.error(msg);
- throw new MetaException(msg);
- }
- verifyLists(result, ormParts, Partition.class);
- return sqlResult;
- }
-
- @Override
- public List<Partition> getPartitions(
- String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException {
- List<Partition> sqlResults = getPartitionsInternal(dbName, tableName, maxParts, true, false);
- List<Partition> ormResults = getPartitionsInternal(dbName, tableName, maxParts, false, true);
- verifyLists(sqlResults, ormResults, Partition.class);
- return sqlResults;
- }
-
- @Override
- public ColumnStatistics getTableColumnStatistics(String dbName,
- String tableName, List<String> colNames) throws MetaException, NoSuchObjectException {
- ColumnStatistics sqlResult = getTableColumnStatisticsInternal(
- dbName, tableName, colNames, true, false);
- ColumnStatistics jdoResult = getTableColumnStatisticsInternal(
- dbName, tableName, colNames, false, true);
- verifyObjects(sqlResult, jdoResult, ColumnStatistics.class);
- return sqlResult;
- }
-
- @Override
- public List<ColumnStatistics> getPartitionColumnStatistics(String dbName,
- String tableName, List<String> partNames, List<String> colNames)
- throws MetaException, NoSuchObjectException {
- List<ColumnStatistics> sqlResult = getPartitionColumnStatisticsInternal(
- dbName, tableName, partNames, colNames, true, false);
- List<ColumnStatistics> jdoResult = getPartitionColumnStatisticsInternal(
- dbName, tableName, partNames, colNames, false, true);
- verifyLists(sqlResult, jdoResult, ColumnStatistics.class);
- return sqlResult;
- }
-
- private void verifyObjects(
- Object sqlResult, Object jdoResult, Class<?> clazz) throws MetaException {
- if (EqualsBuilder.reflectionEquals(sqlResult, jdoResult)) return;
- StringBuilder errorStr = new StringBuilder("Objects are different: \n");
- try {
- dumpObject(errorStr, "SQL", sqlResult, clazz, 0);
- errorStr.append("\n");
- dumpObject(errorStr, "ORM", jdoResult, clazz, 0);
- } catch (Throwable t) {
- errorStr.append("Error getting the diff: " + t);
- }
- LOG.error("Different results: \n" + errorStr.toString());
- throw new MetaException("Different results from SQL and ORM, see log for details");
- }
-
- private <T> void verifyLists(Collection<T> sqlResults, Collection<T> ormResults,
- Class<?> clazz) throws MetaException {
- final int MAX_DIFFS = 5;
- if (sqlResults.size() != ormResults.size()) {
- String msg = "Lists are not the same size: SQL " + sqlResults.size()
- + ", ORM " + ormResults.size();
- LOG.error(msg);
- throw new MetaException(msg);
- }
-
- Iterator<T> sqlIter = sqlResults.iterator(), ormIter = ormResults.iterator();
- StringBuilder errorStr = new StringBuilder();
- int errors = 0;
- for (int partIx = 0; partIx < sqlResults.size(); ++partIx) {
- assert sqlIter.hasNext() && ormIter.hasNext();
- T p1 = sqlIter.next(), p2 = ormIter.next();
- if (EqualsBuilder.reflectionEquals(p1, p2)) continue;
- errorStr.append("Results are different at list index " + partIx + ": \n");
- try {
- dumpObject(errorStr, "SQL", p1, clazz, 0);
- errorStr.append("\n");
- dumpObject(errorStr, "ORM", p2, clazz, 0);
- errorStr.append("\n\n");
- } catch (Throwable t) {
- String msg = "Error getting the diff at list index " + partIx;
- errorStr.append("\n\n" + msg);
- LOG.error(msg, t);
- break;
- }
- if (++errors == MAX_DIFFS) {
- errorStr.append("\n\nToo many diffs, giving up (lists might be sorted differently)");
- break;
- }
- }
- if (errorStr.length() > 0) {
- LOG.error("Different results: \n" + errorStr.toString());
- throw new MetaException("Different results from SQL and ORM, see log for details");
- }
- }
-
- private static void dumpObject(StringBuilder errorStr, String name, Object p,
- Class<?> c, int level) throws IllegalAccessException {
- String offsetStr = repeat(" ", level);
- if (p == null || c == String.class || c.isPrimitive()
- || ClassUtils.wrapperToPrimitive(c) != null) {
- errorStr.append(offsetStr).append(name + ": [" + p + "]\n");
- } else if (ClassUtils.isAssignable(c, Iterable.class)) {
- errorStr.append(offsetStr).append(name + " is an iterable\n");
- Iterator<?> i1 = ((Iterable<?>)p).iterator();
- int i = 0;
- while (i1.hasNext()) {
- Object o1 = i1.next();
- Class<?> t = o1 == null ? Object.class : o1.getClass(); // ...
- dumpObject(errorStr, name + "[" + (i++) + "]", o1, t, level + 1);
- }
- } else if (c.isArray()) {
- int len = Array.getLength(p);
- Class<?> t = c.getComponentType();
- errorStr.append(offsetStr).append(name + " is an array\n");
- for (int i = 0; i < len; ++i) {
- dumpObject(errorStr, name + "[" + i + "]", Array.get(p, i), t, level + 1);
- }
- } else if (ClassUtils.isAssignable(c, Map.class)) {
- Map<?,?> c1 = (Map<?,?>)p;
- errorStr.append(offsetStr).append(name + " is a map\n");
- dumpObject(errorStr, name + ".keys", c1.keySet(), Set.class, level + 1);
- dumpObject(errorStr, name + ".vals", c1.values(), Collection.class, level + 1);
- } else {
- errorStr.append(offsetStr).append(name + " is of type " + c.getCanonicalName() + "\n");
- // TODO: this doesn't include superclass.
- Field[] fields = c.getDeclaredFields();
- AccessibleObject.setAccessible(fields, true);
- for (int i = 0; i < fields.length; i++) {
- Field f = fields[i];
- if (f.getName().indexOf('$') != -1 || Modifier.isStatic(f.getModifiers())) continue;
- dumpObject(errorStr, name + "." + f.getName(), f.get(p), f.getType(), level + 1);
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/pom.xml
----------------------------------------------------------------------
diff --git a/ql/pom.xml b/ql/pom.xml
index 7ed8f27..f35a4c8 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -470,7 +470,7 @@
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
- <artifactId>hive-metastore</artifactId>
+ <artifactId>hive-standalone-metastore</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 4d52d74..d3df015 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -51,7 +51,8 @@ import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.conf.HiveVariableSource;
import org.apache.hadoop.hive.conf.VariableSubstitution;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.ColumnType;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Schema;
import org.apache.hadoop.hive.ql.exec.ConditionalTask;
@@ -309,7 +310,7 @@ public class Driver implements CommandProcessor {
String tableName = "result";
List<FieldSchema> lst = null;
try {
- lst = MetaStoreUtils.getFieldsFromDeserializer(tableName, td.getDeserializer(conf));
+ lst = HiveMetaStoreUtils.getFieldsFromDeserializer(tableName, td.getDeserializer(conf));
} catch (Exception e) {
LOG.warn("Error getting schema: "
+ org.apache.hadoop.util.StringUtils.stringifyException(e));
@@ -338,7 +339,7 @@ public class Driver implements CommandProcessor {
// Go over the schema and convert type to thrift type
if (lst != null) {
for (FieldSchema f : lst) {
- f.setType(MetaStoreUtils.typeToThriftType(f.getType()));
+ f.setType(ColumnType.typeToThriftType(f.getType()));
}
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
index 7d5aa8b..f3a46db 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
@@ -138,8 +138,8 @@ public class QueryState {
/**
* The source HiveConf object used to create the QueryState. If runAsync is false, and the
- * confOverLay is empty then we will reuse the hiveConf object as a backing datastore for the
- * QueryState. We will create a clone of the hiveConf object otherwise.
+ * confOverLay is empty then we will reuse the conf object as a backing datastore for the
+ * QueryState. We will create a clone of the conf object otherwise.
* @param hiveConf The source HiveConf
* @return The builder
*/
@@ -153,7 +153,7 @@ public class QueryState {
* - runAsync false
* - confOverlay null
* - generateNewQueryId false
- * - hiveConf null
+ * - conf null
* @return The generated QueryState object
*/
public QueryState build() {
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
index f7fad94..a5912eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
@@ -30,10 +30,10 @@ import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index d6bf746..55ef8de 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -72,7 +72,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.DefaultHiveMetaHook;
import org.apache.hadoop.hive.metastore.HiveMetaHook;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
import org.apache.hadoop.hive.metastore.PartitionDropOptions;
import org.apache.hadoop.hive.metastore.StatObjectConverter;
import org.apache.hadoop.hive.metastore.TableType;
@@ -111,6 +111,7 @@ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.DriverContext;
@@ -4219,7 +4220,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
// the fields so that new SerDe could operate. Note that this may fail if some fields
// from old SerDe are too long to be stored in metastore, but there's nothing we can do.
try {
- Deserializer oldSerde = MetaStoreUtils.getDeserializer(
+ Deserializer oldSerde = HiveMetaStoreUtils.getDeserializer(
conf, tbl.getTTable(), false, oldSerdeName);
tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), oldSerde));
} catch (MetaException ex) {
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index b85a243..fb78bd8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.ValidReadTxnList;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader;
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 6d13773..f5a5e71 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -24,10 +24,10 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.HiveStatsUtils;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.ErrorMsg;
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
index 567126e..c1dbd24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/StatsTask.java
@@ -28,6 +28,8 @@ import java.util.concurrent.Executors;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.ql.CompilationOpContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.QueryState;
@@ -43,8 +45,6 @@ import org.apache.hadoop.hive.ql.stats.BasicStatsNoJobTask;
import org.apache.hadoop.hive.ql.stats.BasicStatsTask;
import org.apache.hadoop.hive.ql.stats.ColStatsProcessor;
import org.apache.hadoop.hive.ql.stats.IStatsProcessor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index f7850fd..d68d646 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -104,11 +104,11 @@ import org.apache.hadoop.hive.common.StringInternUtils;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.Driver.LockedDriverState;
import org.apache.hadoop.hive.ql.ErrorMsg;
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 5c338b8..6c1afa6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -1184,10 +1184,10 @@ public class DagUtils {
/**
* Creates and initializes a JobConf object that can be used to execute
* the DAG. The configuration object will contain configurations from mapred-site
- * overlaid with key/value pairs from the hiveConf object. Finally it will also
+ * overlaid with key/value pairs from the conf object. Finally it will also
* contain some hive specific configurations that do not change from DAG to DAG.
*
- * @param hiveConf Current hiveConf for the execution
+ * @param hiveConf Current conf for the execution
* @return JobConf base configuration for job execution
* @throws IOException
*/
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
index 1beb839..b35df69 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.common.StringInternUtils;
import org.apache.hadoop.hive.common.ValidReadTxnList;
import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
import org.apache.hive.common.util.Ref;
import org.slf4j.Logger;
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index fbcd579..50bdce8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -81,8 +81,8 @@ import org.apache.hadoop.hive.metastore.HiveMetaHook;
import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.PartitionDropOptions;
import org.apache.hadoop.hive.metastore.RawStore;
import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
@@ -137,6 +137,7 @@ import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
import org.apache.hadoop.hive.metastore.api.WMMapping;
import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
@@ -876,7 +877,7 @@ public class Hive {
tbl.setDbName(SessionState.get().getCurrentDatabase());
}
if (tbl.getCols().size() == 0 || tbl.getSd().getColsSize() == 0) {
- tbl.setFields(MetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(),
+ tbl.setFields(HiveMetaStoreUtils.getFieldsFromDeserializer(tbl.getTableName(),
tbl.getDeserializer()));
}
tbl.checkValidity(conf);
@@ -918,7 +919,7 @@ public class Hive {
public static List<FieldSchema> getFieldsFromDeserializerForMsStorage(
Table tbl, Deserializer deserializer) throws SerDeException, MetaException {
- List<FieldSchema> schema = MetaStoreUtils.getFieldsFromDeserializer(
+ List<FieldSchema> schema = HiveMetaStoreUtils.getFieldsFromDeserializer(
tbl.getTableName(), deserializer);
for (FieldSchema field : schema) {
field.setType(MetaStoreUtils.TYPE_FROM_DESERIALIZER);
@@ -2762,10 +2763,10 @@ private void constructOneLBLocationMap(FileStatus fSta,
List<DropTableDesc.PartSpec> partSpecs, PartitionDropOptions dropOptions) throws HiveException {
try {
Table tbl = getTable(dbName, tblName);
- List<ObjectPair<Integer, byte[]>> partExprs =
- new ArrayList<ObjectPair<Integer,byte[]>>(partSpecs.size());
+ List<org.apache.hadoop.hive.metastore.utils.ObjectPair<Integer, byte[]>> partExprs =
+ new ArrayList<>(partSpecs.size());
for (DropTableDesc.PartSpec partSpec : partSpecs) {
- partExprs.add(new ObjectPair<Integer, byte[]>(partSpec.getPrefixLength(),
+ partExprs.add(new org.apache.hadoop.hive.metastore.utils.ObjectPair<>(partSpec.getPrefixLength(),
SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec())));
}
List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().dropPartitions(
@@ -4189,7 +4190,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
public static List<FieldSchema> getFieldsFromDeserializer(String name,
Deserializer serde) throws HiveException {
try {
- return MetaStoreUtils.getFieldsFromDeserializer(name, serde);
+ return HiveMetaStoreUtils.getFieldsFromDeserializer(name, serde);
} catch (SerDeException e) {
throw new HiveException("Error in getting fields from serde. "
+ e.getMessage(), e);
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
index 44026fa..6d10c10 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
@@ -28,13 +28,14 @@ import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.hive.common.StringInternUtils;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -228,7 +229,7 @@ public class Partition implements Serializable {
final public Deserializer getDeserializer() {
if (deserializer == null) {
try {
- deserializer = MetaStoreUtils.getDeserializer(SessionState.getSessionConf(),
+ deserializer = HiveMetaStoreUtils.getDeserializer(SessionState.getSessionConf(),
tPartition, table.getTTable());
} catch (MetaException e) {
throw new RuntimeException(e);
@@ -246,8 +247,8 @@ public class Partition implements Serializable {
}
public Properties getSchemaFromTableSchema(Properties tblSchema) {
- return MetaStoreUtils.getPartSchemaFromTableSchema(tPartition.getSd(), table.getTTable().getSd(),
- tPartition.getParameters(), table.getDbName(), table.getTableName(), table.getPartitionKeys(),
+ return MetaStoreUtils.getPartSchemaFromTableSchema(tPartition.getSd(),
+ tPartition.getParameters(),
tblSchema);
}
@@ -493,7 +494,7 @@ public class Partition implements Serializable {
SessionState.getSessionConf(), serializationLib, table.getParameters())) {
return Hive.getFieldsFromDeserializerForMsStorage(table, getDeserializer());
}
- return MetaStoreUtils.getFieldsFromDeserializer(table.getTableName(), getDeserializer());
+ return HiveMetaStoreUtils.getFieldsFromDeserializer(table.getTableName(), getDeserializer());
} catch (Exception e) {
LOG.error("Unable to get cols from serde: " +
tPartition.getSd().getSerdeInfo().getSerializationLib(), e);
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 1c26200..80c7804 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -30,17 +30,16 @@ import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.io.HdfsUtils;
import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
@@ -61,6 +60,8 @@ import org.apache.hadoop.hive.metastore.api.TableMeta;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.hive.shims.ShimLoader;
@@ -69,12 +70,12 @@ import org.apache.thrift.TException;
public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements IMetaStoreClient {
- SessionHiveMetaStoreClient(HiveConf conf, Boolean allowEmbedded) throws MetaException {
+ SessionHiveMetaStoreClient(Configuration conf, Boolean allowEmbedded) throws MetaException {
super(conf, null, allowEmbedded);
}
SessionHiveMetaStoreClient(
- HiveConf conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded) throws MetaException {
+ Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded) throws MetaException {
super(conf, hookLoader, allowEmbedded);
}
@@ -618,7 +619,7 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
tablePath = new Path(table.getSd().getLocation());
if (!getWh().isWritable(tablePath.getParent())) {
throw new MetaException("Table metadata not deleted since " + tablePath.getParent() +
- " is not writable by " + conf.getUser());
+ " is not writable by " + SecurityUtils.getUser());
}
} catch (IOException err) {
MetaException metaException =
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index a1cad9e..0debff6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -49,6 +49,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.SkewedInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
@@ -268,7 +269,7 @@ public class Table implements Serializable {
}
final public Class<? extends Deserializer> getDeserializerClass() throws Exception {
- return MetaStoreUtils.getDeserializerClass(SessionState.getSessionConf(), tTable);
+ return HiveMetaStoreUtils.getDeserializerClass(SessionState.getSessionConf(), tTable);
}
final public Deserializer getDeserializer(boolean skipConfError) {
@@ -280,7 +281,7 @@ public class Table implements Serializable {
final public Deserializer getDeserializerFromMetaStore(boolean skipConfError) {
try {
- return MetaStoreUtils.getDeserializer(SessionState.getSessionConf(), tTable, skipConfError);
+ return HiveMetaStoreUtils.getDeserializer(SessionState.getSessionConf(), tTable, skipConfError);
} catch (MetaException e) {
throw new RuntimeException(e);
}
@@ -640,7 +641,7 @@ public class Table implements Serializable {
SessionState.getSessionConf(), serializationLib, tTable.getParameters())) {
return Hive.getFieldsFromDeserializerForMsStorage(this, getDeserializer());
} else {
- return MetaStoreUtils.getFieldsFromDeserializer(getTableName(), getDeserializer());
+ return HiveMetaStoreUtils.getFieldsFromDeserializer(getTableName(), getDeserializer());
}
} catch (Exception e) {
LOG.error("Unable to get field from serde: " + serializationLib, e);
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
index 1b7b425..f3d878d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
@@ -27,6 +27,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hive.common.util.HiveStringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -37,7 +38,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
index c62d98f..a7fc3e9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
import org.apache.hadoop.hive.ql.exec.Operator;
http://git-wip-us.apache.org/repos/asf/hive/blob/d79c4595/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
index fc1d4f9..31d2b23 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
@@ -26,7 +26,6 @@ import java.util.Map;
import java.util.Stack;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator;