You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by rv...@apache.org on 2017/03/23 17:28:16 UTC
[45/50] [abbrv] bigtop git commit: BIGTOP-2704. Include ODPi runtime
tests option into the battery of smoke tests
http://git-wip-us.apache.org/repos/asf/bigtop/blob/5e342c45/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestHCatalog.java
----------------------------------------------------------------------
diff --git a/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestHCatalog.java b/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestHCatalog.java
new file mode 100644
index 0000000..0ea49ce
--- /dev/null
+++ b/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestHCatalog.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.odpi.specs.runtime.hive;
+
+import org.apache.commons.exec.CommandLine;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hive.hcatalog.data.schema.HCatFieldSchema;
+import org.apache.hive.hcatalog.data.schema.HCatSchema;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+
+public class TestHCatalog {
+ private static final String JOBJAR = "odpi.test.hive.hcat.job.jar";
+ private static final String HCATCORE = "odpi.test.hive.hcat.core.jar";
+
+ private static final Log LOG = LogFactory.getLog(TestHCatalog.class.getName());
+
+ private static IMetaStoreClient client = null;
+ private static HiveConf conf;
+ private static HCatSchema inputSchema;
+ private static HCatSchema outputSchema;
+
+ private Random rand;
+
+ @BeforeClass
+ public static void connect() throws MetaException {
+ if (JdbcConnector.testActive(JdbcConnector.TEST_HCATALOG, "Test HCatalog ")) {
+ String hiveConfDir = JdbcConnector.getProperty(JdbcConnector.HIVE_CONF_DIR,
+ "Hive conf directory ");
+ String hadoopConfDir = JdbcConnector.getProperty(JdbcConnector.HADOOP_CONF_DIR,
+ "Hadoop conf directory ");
+ conf = new HiveConf();
+ String fileSep = System.getProperty("file.separator");
+ conf.addResource(new Path(hadoopConfDir + fileSep + "core-site.xml"));
+ conf.addResource(new Path(hadoopConfDir + fileSep + "hdfs-site.xml"));
+ conf.addResource(new Path(hadoopConfDir + fileSep + "yarn-site.xml"));
+ conf.addResource(new Path(hadoopConfDir + fileSep + "mapred-site.xml"));
+ conf.addResource(new Path(hiveConfDir + fileSep + "hive-site.xml"));
+ client = new HiveMetaStoreClient(conf);
+
+ }
+ }
+
+ @Before
+ public void checkIfActive() {
+ Assume.assumeTrue(JdbcConnector.testActive(JdbcConnector.TEST_HCATALOG, "Test HCatalog "));
+ rand = new Random();
+ }
+
+ @Test
+ public void hcatInputFormatOutputFormat() throws TException, IOException, ClassNotFoundException,
+ InterruptedException, URISyntaxException {
+ // Create a table to write to
+ final String inputTable = "odpi_hcat_input_table_" + rand.nextInt(Integer.MAX_VALUE);
+ SerDeInfo serde = new SerDeInfo("default_serde",
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), new HashMap<String, String>());
+ FieldSchema schema = new FieldSchema("line", "string", "");
+ inputSchema = new HCatSchema(Collections.singletonList(new HCatFieldSchema(schema.getName(),
+ HCatFieldSchema.Type.STRING, schema.getComment())));
+ StorageDescriptor sd = new StorageDescriptor(Collections.singletonList(schema), null,
+ "org.apache.hadoop.mapred.TextInputFormat",
+ "org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat", false, 0, serde, null, null,
+ new HashMap<String, String>());
+ Table table = new Table(inputTable, "default", "me", 0, 0, 0, sd, null,
+ new HashMap<String, String>(), null, null, TableType.MANAGED_TABLE.toString());
+ client.createTable(table);
+
+ final String outputTable = "odpi_hcat_output_table_" + rand.nextInt(Integer.MAX_VALUE);
+ sd = new StorageDescriptor(Arrays.asList(
+ new FieldSchema("word", "string", ""),
+ new FieldSchema("count", "int", "")),
+ null, "org.apache.hadoop.mapred.TextInputFormat",
+ "org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat", false, 0, serde, null, null,
+ new HashMap<String, String>());
+ table = new Table(outputTable, "default", "me", 0, 0, 0, sd, null,
+ new HashMap<String, String>(), null, null, TableType.MANAGED_TABLE.toString());
+ client.createTable(table);
+ outputSchema = new HCatSchema(Arrays.asList(
+ new HCatFieldSchema("word", HCatFieldSchema.Type.STRING, ""),
+ new HCatFieldSchema("count", HCatFieldSchema.Type.INT, "")));
+
+ // LATER Could I use HCatWriter here and the reader to read it?
+ // Write some stuff into a file in the location of the table
+ table = client.getTable("default", inputTable);
+ String inputFile = table.getSd().getLocation() + "/input";
+ Path inputPath = new Path(inputFile);
+ FileSystem fs = FileSystem.get(conf);
+ FSDataOutputStream out = fs.create(inputPath);
+ out.writeChars("Mary had a little lamb\n");
+ out.writeChars("its fleece was white as snow\n");
+ out.writeChars("and everywhere that Mary went\n");
+ out.writeChars("the lamb was sure to go\n");
+ out.close();
+
+ Map<String, String> env = new HashMap<>();
+ env.put("HADOOP_CLASSPATH", System.getProperty(HCATCORE, ""));
+ Map<String, String> results = HiveHelper.execCommand(new CommandLine("hive")
+ .addArgument("--service")
+ .addArgument("jar")
+ .addArgument(System.getProperty(JOBJAR))
+ .addArgument(HCatalogMR.class.getName())
+ .addArgument("-it")
+ .addArgument(inputTable)
+ .addArgument("-ot")
+ .addArgument(outputTable)
+ .addArgument("-is")
+ .addArgument(inputSchema.getSchemaAsTypeString())
+ .addArgument("-os")
+ .addArgument(outputSchema.getSchemaAsTypeString()), env);
+ LOG.info(results.toString());
+ Assert.assertEquals("HCat job failed", 0, Integer.parseInt(results.get("exitValue")));
+
+ client.dropTable("default", inputTable);
+ client.dropTable("default", outputTable);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/bigtop/blob/5e342c45/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestJdbc.java
----------------------------------------------------------------------
diff --git a/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestJdbc.java b/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestJdbc.java
new file mode 100644
index 0000000..154fd9c
--- /dev/null
+++ b/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestJdbc.java
@@ -0,0 +1,545 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.odpi.specs.runtime.hive;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.Statement;
+import java.sql.Types;
+
+public class TestJdbc extends JdbcConnector {
+ private static final Log LOG = LogFactory.getLog(TestJdbc.class.getName());
+
+ /**
+ * Test simple non-statement related class. setSchema is tested elsewhere because there's work
+ * to do for that one. Similarly with getMetadata.
+ * @throws SQLException
+ */
+ @Test
+ public void nonStatementCalls() throws SQLException {
+ conn.clearWarnings();
+
+ boolean isAutoCommit = conn.getAutoCommit();
+ LOG.debug("Auto commit is " + isAutoCommit);
+
+ String catalog = conn.getCatalog();
+ LOG.debug("Catalog is " + catalog);
+
+ String schema = conn.getSchema();
+ LOG.debug("Schema is " + schema);
+
+ int txnIsolation = conn.getTransactionIsolation();
+ LOG.debug("Transaction Isolation is " + txnIsolation);
+
+ SQLWarning warning = conn.getWarnings();
+ while (warning != null) {
+ LOG.debug("Found a warning: " + warning.getMessage());
+ warning = warning.getNextWarning();
+ }
+
+ boolean closed = conn.isClosed();
+ LOG.debug("Is closed? " + closed);
+
+ boolean readOnly = conn.isReadOnly();
+ LOG.debug("Is read only?" + readOnly);
+
+ // Hive doesn't support catalogs, so setting this to whatever should be fine. If we have
+ // non-Hive systems trying to pass this setting it to a non-valid catalog name may cause
+ // issues, so we may need to make this value configurable or something.
+ conn.setCatalog("fred");
+ }
+
+ /**
+ * Test simple DatabaseMetaData calls. getColumns is tested elsewhere, as we need to call
+ * that on a valid table. Same with getFunctions.
+ * @throws SQLException
+ */
+ @Test
+ public void databaseMetaDataCalls() throws SQLException {
+ DatabaseMetaData md = conn.getMetaData();
+
+ boolean boolrc = md.allTablesAreSelectable();
+ LOG.debug("All tables are selectable? " + boolrc);
+
+ String strrc = md.getCatalogSeparator();
+ LOG.debug("Catalog separator " + strrc);
+
+ strrc = md.getCatalogTerm();
+ LOG.debug("Catalog term " + strrc);
+
+ ResultSet rs = md.getCatalogs();
+ while (rs.next()) {
+ strrc = rs.getString(1);
+ LOG.debug("Found catalog " + strrc);
+ }
+
+ Connection c = md.getConnection();
+
+ int intrc = md.getDatabaseMajorVersion();
+ LOG.debug("DB major version is " + intrc);
+
+ intrc = md.getDatabaseMinorVersion();
+ LOG.debug("DB minor version is " + intrc);
+
+ strrc = md.getDatabaseProductName();
+ LOG.debug("DB product name is " + strrc);
+
+ strrc = md.getDatabaseProductVersion();
+ LOG.debug("DB product version is " + strrc);
+
+ intrc = md.getDefaultTransactionIsolation();
+ LOG.debug("Default transaction isolation is " + intrc);
+
+ intrc = md.getDriverMajorVersion();
+ LOG.debug("Driver major version is " + intrc);
+
+ intrc = md.getDriverMinorVersion();
+ LOG.debug("Driver minor version is " + intrc);
+
+ strrc = md.getDriverName();
+ LOG.debug("Driver name is " + strrc);
+
+ strrc = md.getDriverVersion();
+ LOG.debug("Driver version is " + strrc);
+
+ strrc = md.getExtraNameCharacters();
+ LOG.debug("Extra name characters is " + strrc);
+
+ strrc = md.getIdentifierQuoteString();
+ LOG.debug("Identifier quote string is " + strrc);
+
+ // In Hive 1.2 this always returns an empty RS
+ rs = md.getImportedKeys("a", "b", "d");
+
+ // In Hive 1.2 this always returns an empty RS
+ rs = md.getIndexInfo("a", "b", "d", true, true);
+
+ intrc = md.getJDBCMajorVersion();
+ LOG.debug("JDBC major version is " + intrc);
+
+ intrc = md.getJDBCMinorVersion();
+ LOG.debug("JDBC minor version is " + intrc);
+
+ intrc = md.getMaxColumnNameLength();
+ LOG.debug("Maximum column name length is " + intrc);
+
+ strrc = md.getNumericFunctions();
+ LOG.debug("Numeric functions are " + strrc);
+
+ // In Hive 1.2 this always returns an empty RS
+ rs = md.getPrimaryKeys("a", "b", "d");
+
+ // In Hive 1.2 this always returns an empty RS
+ rs = md.getProcedureColumns("a", "b", "d", "e");
+
+ strrc = md.getProcedureTerm();
+ LOG.debug("Procedures are called " + strrc);
+
+ // In Hive 1.2 this always returns an empty RS
+ rs = md.getProcedures("a", "b", "d");
+
+ strrc = md.getSchemaTerm();
+ LOG.debug("Schemas are called " + strrc);
+
+ rs = md.getSchemas();
+ while (rs.next()) {
+ strrc = rs.getString(1);
+ LOG.debug("Found schema " + strrc);
+ }
+
+ strrc = md.getSearchStringEscape();
+ LOG.debug("Search string escape is " + strrc);
+
+ strrc = md.getStringFunctions();
+ LOG.debug("String functions are " + strrc);
+
+ strrc = md.getSystemFunctions();
+ LOG.debug("System functions are " + strrc);
+
+ rs = md.getTableTypes();
+ while (rs.next()) {
+ strrc = rs.getString(1);
+ LOG.debug("Found table type " + strrc);
+ }
+
+ strrc = md.getTimeDateFunctions();
+ LOG.debug("Time/date functions are " + strrc);
+
+ rs = md.getTypeInfo();
+ while (rs.next()) {
+ strrc = rs.getString(1);
+ LOG.debug("Found type " + strrc);
+ }
+
+ // In Hive 1.2 this always returns an empty RS
+ rs = md.getUDTs("a", "b", "d", null);
+
+ boolrc = md.supportsAlterTableWithAddColumn();
+ LOG.debug("Supports alter table with add column? " + boolrc);
+
+ boolrc = md.supportsAlterTableWithDropColumn();
+ LOG.debug("Supports alter table with drop column? " + boolrc);
+
+ boolrc = md.supportsBatchUpdates();
+ LOG.debug("Supports batch updates? " + boolrc);
+
+ boolrc = md.supportsCatalogsInDataManipulation();
+ LOG.debug("Supports catalogs in data manipulation? " + boolrc);
+
+ boolrc = md.supportsCatalogsInIndexDefinitions();
+ LOG.debug("Supports catalogs in index definition? " + boolrc);
+
+ boolrc = md.supportsCatalogsInPrivilegeDefinitions();
+ LOG.debug("Supports catalogs in privilege definition? " + boolrc);
+
+ boolrc = md.supportsCatalogsInProcedureCalls();
+ LOG.debug("Supports catalogs in procedure calls? " + boolrc);
+
+ boolrc = md.supportsCatalogsInTableDefinitions();
+ LOG.debug("Supports catalogs in table definition? " + boolrc);
+
+ boolrc = md.supportsColumnAliasing();
+ LOG.debug("Supports column aliasing? " + boolrc);
+
+ boolrc = md.supportsFullOuterJoins();
+ LOG.debug("Supports full outer joins? " + boolrc);
+
+ boolrc = md.supportsGroupBy();
+ LOG.debug("Supports group by? " + boolrc);
+
+ boolrc = md.supportsLimitedOuterJoins();
+ LOG.debug("Supports limited outer joins? " + boolrc);
+
+ boolrc = md.supportsMultipleResultSets();
+ LOG.debug("Supports limited outer joins? " + boolrc);
+
+ boolrc = md.supportsNonNullableColumns();
+ LOG.debug("Supports non-nullable columns? " + boolrc);
+
+ boolrc = md.supportsOuterJoins();
+ LOG.debug("Supports outer joins? " + boolrc);
+
+ boolrc = md.supportsPositionedDelete();
+ LOG.debug("Supports positioned delete? " + boolrc);
+
+ boolrc = md.supportsPositionedUpdate();
+ LOG.debug("Supports positioned update? " + boolrc);
+
+ boolrc = md.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT);
+ LOG.debug("Supports result set holdability? " + boolrc);
+
+ boolrc = md.supportsResultSetType(ResultSet.HOLD_CURSORS_OVER_COMMIT);
+ LOG.debug("Supports result set type? " + boolrc);
+
+ boolrc = md.supportsSavepoints();
+ LOG.debug("Supports savepoints? " + boolrc);
+
+ boolrc = md.supportsSchemasInDataManipulation();
+ LOG.debug("Supports schemas in data manipulation? " + boolrc);
+
+ boolrc = md.supportsSchemasInIndexDefinitions();
+ LOG.debug("Supports schemas in index definitions? " + boolrc);
+
+ boolrc = md.supportsSchemasInPrivilegeDefinitions();
+ LOG.debug("Supports schemas in privilege definitions? " + boolrc);
+
+ boolrc = md.supportsSchemasInProcedureCalls();
+ LOG.debug("Supports schemas in procedure calls? " + boolrc);
+
+ boolrc = md.supportsSchemasInTableDefinitions();
+ LOG.debug("Supports schemas in table definitions? " + boolrc);
+
+ boolrc = md.supportsSelectForUpdate();
+ LOG.debug("Supports select for update? " + boolrc);
+
+ boolrc = md.supportsStoredProcedures();
+ LOG.debug("Supports stored procedures? " + boolrc);
+
+ boolrc = md.supportsTransactions();
+ LOG.debug("Supports transactions? " + boolrc);
+
+ boolrc = md.supportsUnion();
+ LOG.debug("Supports union? " + boolrc);
+
+ boolrc = md.supportsUnionAll();
+ LOG.debug("Supports union all? " + boolrc);
+
+ }
+
+ @Test
+ public void setSchema() throws SQLException {
+ try (Statement stmt = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE,
+ ResultSet.CONCUR_READ_ONLY)) {
+
+ final String dbName = "odpi_jdbc_test_db";
+
+ final String tableName = "odpi_jdbc_test_table";
+ stmt.execute("drop table if exists " + tableName);
+
+ stmt.execute("drop database if exists " + dbName + " cascade");
+ stmt.execute("create database " + dbName);
+
+ conn.setSchema(dbName);
+
+ DatabaseMetaData md = conn.getMetaData();
+
+ ResultSet rs = md.getSchemas(null, dbName);
+
+ while (rs.next()) {
+ String schemaName = rs.getString(2);
+ LOG.debug("Schema name is " + schemaName);
+ }
+
+ stmt.execute("create table " + tableName + " (i int, s varchar(32))");
+
+ rs = md.getTables(null, dbName, tableName, null);
+ while (rs.next()) {
+ String tName = rs.getString(3);
+ LOG.debug("Schema name is " + tName);
+ }
+
+ rs = md.getColumns(null, dbName, tableName, "i");
+ while (rs.next()) {
+ String colName = rs.getString(4);
+ LOG.debug("Schema name is " + colName);
+ }
+
+ rs = md.getFunctions(null, dbName, "foo");
+ while (rs.next()) {
+ String funcName = rs.getString(3);
+ LOG.debug("Schema name is " + funcName);
+ }
+ }
+ }
+
+ @Test
+ public void statement() throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.cancel();
+ }
+
+ try (Statement stmt = conn.createStatement()) {
+ stmt.clearWarnings();
+
+ final String tableName = "odpi_jdbc_statement_test_table";
+
+ stmt.execute("drop table if exists " + tableName);
+ stmt.execute("create table " + tableName + " (a int, b varchar(32))");
+
+ stmt.executeUpdate("insert into " + tableName + " values (1, 'abc'), (2, 'def')");
+
+ int intrc = stmt.getUpdateCount();
+ LOG.debug("Update count is " + intrc);
+
+ ResultSet rs = stmt.executeQuery("select * from " + tableName);
+ while (rs.next()) {
+ LOG.debug("Fetched " + rs.getInt(1) + "," + rs.getString(2));
+ }
+
+ Connection localConn = stmt.getConnection();
+
+ intrc = stmt.getFetchDirection();
+ LOG.debug("Fetch direction is " + intrc);
+
+ intrc = stmt.getFetchSize();
+ LOG.debug("Fetch size is " + intrc);
+
+ intrc = stmt.getMaxRows();
+ LOG.debug("max rows is " + intrc);
+
+ boolean boolrc = stmt.getMoreResults();
+ LOG.debug("more results is " + boolrc);
+
+ intrc = stmt.getQueryTimeout();
+ LOG.debug("query timeout is " + intrc);
+
+ stmt.execute("select * from " + tableName);
+ rs = stmt.getResultSet();
+ while (rs.next()) {
+ LOG.debug("Fetched " + rs.getInt(1) + "," + rs.getString(2));
+ }
+
+ intrc = stmt.getResultSetType();
+ LOG.debug("result set type is " + intrc);
+
+ SQLWarning warning = stmt.getWarnings();
+ while (warning != null) {
+ LOG.debug("Found a warning: " + warning.getMessage());
+ warning = warning.getNextWarning();
+ }
+
+ boolrc = stmt.isClosed();
+ LOG.debug("is closed " + boolrc);
+
+ boolrc = stmt.isCloseOnCompletion();
+ LOG.debug("is close on completion " + boolrc);
+
+ boolrc = stmt.isPoolable();
+ LOG.debug("is poolable " + boolrc);
+
+ stmt.setFetchDirection(ResultSet.FETCH_FORWARD);
+ stmt.setFetchSize(500);
+ stmt.setMaxRows(500);
+ }
+ }
+
+ @Test
+ public void preparedStmtAndResultSet() throws SQLException {
+ final String tableName = "odpi_jdbc_psars_test_table";
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + tableName);
+ stmt.execute("create table " + tableName + " (bo boolean, ti tinyint, db double, fl float, " +
+ "i int, lo bigint, sh smallint, st varchar(32))");
+ }
+
+ // NOTE Hive 1.2 theoretically support binary, Date & Timestamp in JDBC, but I get errors when I
+ // try to put them in the query.
+ try (PreparedStatement ps = conn.prepareStatement("insert into " + tableName +
+ " values (?, ?, ?, ?, ?, ?, ?, ?)")) {
+ ps.setBoolean(1, true);
+ ps.setByte(2, (byte)1);
+ ps.setDouble(3, 3.141592654);
+ ps.setFloat(4, 3.14f);
+ ps.setInt(5, 3);
+ ps.setLong(6, 10L);
+ ps.setShort(7, (short)20);
+ ps.setString(8, "abc");
+ ps.executeUpdate();
+ }
+
+ try (PreparedStatement ps = conn.prepareStatement("insert into " + tableName + " (i, st) " +
+ "values(?, ?)", ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY)) {
+ ps.setNull(1, Types.INTEGER);
+ ps.setObject(2, "mary had a little lamb");
+ ps.executeUpdate();
+ ps.setNull(1, Types.INTEGER, null);
+ ps.setString(2, "its fleece was white as snow");
+ ps.clearParameters();
+ ps.setNull(1, Types.INTEGER, null);
+ ps.setString(2, "its fleece was white as snow");
+ ps.execute();
+
+ }
+
+ try (Statement stmt = conn.createStatement()) {
+
+ ResultSet rs = stmt.executeQuery("select * from " + tableName);
+
+ ResultSetMetaData md = rs.getMetaData();
+
+ int colCnt = md.getColumnCount();
+ LOG.debug("Column count is " + colCnt);
+
+ for (int i = 1; i <= colCnt; i++) {
+ LOG.debug("Looking at column " + i);
+ String strrc = md.getColumnClassName(i);
+ LOG.debug("Column class name is " + strrc);
+
+ int intrc = md.getColumnDisplaySize(i);
+ LOG.debug("Column display size is " + intrc);
+
+ strrc = md.getColumnLabel(i);
+ LOG.debug("Column label is " + strrc);
+
+ strrc = md.getColumnName(i);
+ LOG.debug("Column name is " + strrc);
+
+ intrc = md.getColumnType(i);
+ LOG.debug("Column type is " + intrc);
+
+ strrc = md.getColumnTypeName(i);
+ LOG.debug("Column type name is " + strrc);
+
+ intrc = md.getPrecision(i);
+ LOG.debug("Precision is " + intrc);
+
+ intrc = md.getScale(i);
+ LOG.debug("Scale is " + intrc);
+
+ boolean boolrc = md.isAutoIncrement(i);
+ LOG.debug("Is auto increment? " + boolrc);
+
+ boolrc = md.isCaseSensitive(i);
+ LOG.debug("Is case sensitive? " + boolrc);
+
+ boolrc = md.isCurrency(i);
+ LOG.debug("Is currency? " + boolrc);
+
+ intrc = md.getScale(i);
+ LOG.debug("Scale is " + intrc);
+
+ intrc = md.isNullable(i);
+ LOG.debug("Is nullable? " + intrc);
+
+ boolrc = md.isReadOnly(i);
+ LOG.debug("Is read only? " + boolrc);
+
+ }
+
+ while (rs.next()) {
+ LOG.debug("bo = " + rs.getBoolean(1));
+ LOG.debug("bo = " + rs.getBoolean("bo"));
+ LOG.debug("ti = " + rs.getByte(2));
+ LOG.debug("ti = " + rs.getByte("ti"));
+ LOG.debug("db = " + rs.getDouble(3));
+ LOG.debug("db = " + rs.getDouble("db"));
+ LOG.debug("fl = " + rs.getFloat(4));
+ LOG.debug("fl = " + rs.getFloat("fl"));
+ LOG.debug("i = " + rs.getInt(5));
+ LOG.debug("i = " + rs.getInt("i"));
+ LOG.debug("lo = " + rs.getLong(6));
+ LOG.debug("lo = " + rs.getLong("lo"));
+ LOG.debug("sh = " + rs.getShort(7));
+ LOG.debug("sh = " + rs.getShort("sh"));
+ LOG.debug("st = " + rs.getString(8));
+ LOG.debug("st = " + rs.getString("st"));
+ LOG.debug("tm = " + rs.getObject(8));
+ LOG.debug("tm = " + rs.getObject("st"));
+ LOG.debug("tm was null " + rs.wasNull());
+ }
+ LOG.debug("bo is column " + rs.findColumn("bo"));
+
+ int intrc = rs.getConcurrency();
+ LOG.debug("concurrency " + intrc);
+
+ intrc = rs.getFetchDirection();
+ LOG.debug("fetch direction " + intrc);
+
+ intrc = rs.getType();
+ LOG.debug("type " + intrc);
+
+ Statement copy = rs.getStatement();
+
+ SQLWarning warning = rs.getWarnings();
+ while (warning != null) {
+ LOG.debug("Found a warning: " + warning.getMessage());
+ warning = warning.getNextWarning();
+ }
+ rs.clearWarnings();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/bigtop/blob/5e342c45/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestSql.java
----------------------------------------------------------------------
diff --git a/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestSql.java b/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestSql.java
new file mode 100644
index 0000000..f247841
--- /dev/null
+++ b/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestSql.java
@@ -0,0 +1,337 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.odpi.specs.runtime.hive;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.junit.Test;
+
+import java.sql.SQLException;
+import java.sql.Statement;
+
+// This does not test every option that Hive supports, but does try to touch the major
+// options, especially anything unique to Hive. See each test for areas tested and not tested.
+public class TestSql extends JdbcConnector {
+ private static final Log LOG = LogFactory.getLog(TestSql.class.getName());
+
+ @Test
+ public void db() throws SQLException {
+ final String db1 = "odpi_sql_db1";
+ final String db2 = "odpi_sql_db2";
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop database if exists " + db1 + " cascade");
+
+ // Simple create database
+ stmt.execute("create database " + db1);
+ stmt.execute("drop database " + db1);
+
+ stmt.execute("drop schema if exists " + db2 + " cascade");
+
+ String location = getProperty(LOCATION, "a writable directory in HDFS");
+
+ // All the bells and whistles
+ stmt.execute("create schema if not exists " + db2 + " comment 'a db' location '" + location +
+ "' with dbproperties ('a' = 'b')");
+
+ stmt.execute("alter database " + db2 + " set dbproperties ('c' = 'd')");
+
+ stmt.execute("drop database " + db2 + " restrict");
+ }
+ }
+
+ @Test
+ public void table() throws SQLException {
+ final String table1 = "odpi_sql_table1";
+ final String table2 = "odpi_sql_table2";
+ final String table3 = "odpi_sql_table3";
+ final String table4 = "odpi_sql_table4";
+ final String table5 = "odpi_sql_table5";
+
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + table1);
+ stmt.execute("drop table if exists " + table2);
+ stmt.execute("drop table if exists " + table3);
+ stmt.execute("drop table if exists " + table4);
+ stmt.execute("drop table if exists " + table5);
+
+ String location = getProperty(LOCATION, "a writable directory in HDFS");
+ stmt.execute("create external table " + table1 + "(a int, b varchar(32)) location '" +
+ location + "'");
+
+ // With a little bit of everything, except partitions, we'll do those below
+ stmt.execute("create table if not exists " + table2 +
+ "(c1 tinyint," +
+ " c2 smallint," +
+ " c3 int comment 'a column comment'," +
+ " c4 bigint," +
+ " c5 float," +
+ " c6 double," +
+ " c7 decimal," +
+ " c8 decimal(12)," +
+ " c9 decimal(8,2)," +
+ " c10 timestamp," +
+ " c11 date," +
+ " c12 string," +
+ " c13 varchar(120)," +
+ " c14 char(10)," +
+ " c15 boolean," +
+ " c16 binary," +
+ " c17 array<string>," +
+ " c18 map <string, string>," +
+ " c19 struct<s1:int, s2:bigint>," +
+ " c20 uniontype<int, string>) " +
+ "comment 'table comment'" +
+ "clustered by (c1) sorted by (c2) into 10 buckets " +
+ "stored as orc " +
+ "tblproperties ('a' = 'b')");
+
+ // Not testing SKEWED BY, ROW FORMAT, STORED BY (storage handler
+
+ stmt.execute("create temporary table " + table3 + " like " + table2);
+
+ stmt.execute("insert into " + table1 + " values (3, 'abc'), (4, 'def')");
+
+ stmt.execute("create table " + table4 + " as select a, b from " + table1);
+
+ stmt.execute("truncate table " + table4);
+
+ stmt.execute("alter table " + table4 + " rename to " + table5);
+ stmt.execute("alter table " + table2 + " set tblproperties ('c' = 'd')");
+
+ // Not testing alter of clustered or sorted by, because that's suicidal
+ // Not testing alter of skewed or serde properties since we didn't test it for create
+ // above.
+
+ stmt.execute("drop table " + table1 + " purge");
+ stmt.execute("drop table " + table2);
+ stmt.execute("drop table " + table3);
+ stmt.execute("drop table " + table5);
+ }
+ }
+
+ @Test
+ public void partitionedTable() throws SQLException {
+ final String table1 = "odpi_sql_ptable1";
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + table1);
+
+ stmt.execute("create table " + table1 +
+ "(c1 int," +
+ " c2 varchar(32))" +
+ "partitioned by (p1 string comment 'a partition column')" +
+ "stored as orc");
+
+ stmt.execute("alter table " + table1 + " add partition (p1 = 'a')");
+ stmt.execute("insert into " + table1 + " partition (p1 = 'a') values (1, 'abc')");
+ stmt.execute("insert into " + table1 + " partition (p1 = 'a') values (2, 'def')");
+ stmt.execute("insert into " + table1 + " partition (p1 = 'a') values (3, 'ghi')");
+ stmt.execute("alter table " + table1 + " partition (p1 = 'a') concatenate");
+ stmt.execute("alter table " + table1 + " touch partition (p1 = 'a')");
+
+ stmt.execute("alter table " + table1 + " add columns (c3 float)");
+ stmt.execute("alter table " + table1 + " drop partition (p1 = 'a')");
+
+ // Not testing rename partition, exchange partition, msck repair, archive/unarchive,
+ // set location, enable/disable no_drop/offline, compact (because not everyone may have
+ // ACID on), change column
+
+ stmt.execute("drop table " + table1);
+
+ }
+ }
+
+ @Test
+ public void view() throws SQLException {
+ final String table1 = "odpi_sql_vtable1";
+ final String view1 = "odpi_sql_view1";
+ final String view2 = "odpi_sql_view2";
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + table1);
+ stmt.execute("drop view if exists " + view1);
+ stmt.execute("drop view if exists " + view2);
+ stmt.execute("create table " + table1 + "(a int, b varchar(32))");
+ stmt.execute("create view " + view1 + " as select a from " + table1);
+
+ stmt.execute("create view if not exists " + view2 +
+ " comment 'a view comment' " +
+ "tblproperties ('a' = 'b') " +
+ "as select b from " + table1);
+
+ stmt.execute("alter view " + view1 + " as select a, b from " + table1);
+ stmt.execute("alter view " + view2 + " set tblproperties('c' = 'd')");
+
+ stmt.execute("drop view " + view1);
+ stmt.execute("drop view " + view2);
+ }
+ }
+
+ // Not testing indices because they are currently useless in Hive
+ // Not testing macros because as far as I know no one uses them
+
+ @Test
+ public void function() throws SQLException {
+ final String func1 = "odpi_sql_func1";
+ final String func2 = "odpi_sql_func2";
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("create temporary function " + func1 +
+ " as 'org.apache.hadoop.hive.ql.udf.UDFToInteger'");
+ stmt.execute("drop temporary function " + func1);
+
+ stmt.execute("drop function if exists " + func2);
+
+ stmt.execute("create function " + func2 +
+ " as 'org.apache.hadoop.hive.ql.udf.UDFToInteger'");
+ stmt.execute("drop function " + func2);
+ }
+ }
+
+ // Not testing grant/revoke/roles as different vendors use different security solutions
+ // and hence different things will work here.
+
+ // This covers insert (non-partitioned, partitioned, dynamic partitions, overwrite, with
+ // values and select), and multi-insert. Load is not tested as there's no guarantee that the
+ // test machine has access to HDFS and thus the ability to upload a file.
+ @Test
+ public void insert() throws SQLException {
+ final String table1 = "odpi_insert_table1";
+ final String table2 = "odpi_insert_table2";
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + table1);
+ stmt.execute("create table " + table1 +
+ "(c1 tinyint," +
+ " c2 smallint," +
+ " c3 int," +
+ " c4 bigint," +
+ " c5 float," +
+ " c6 double," +
+ " c7 decimal(8,2)," +
+ " c8 varchar(120)," +
+ " c9 char(10)," +
+ " c10 boolean)" +
+ " partitioned by (p1 string)");
+
+ // insert with partition
+ stmt.execute("explain insert into " + table1 + " partition (p1 = 'a') values " +
+ "(1, 2, 3, 4, 1.1, 2.2, 3.3, 'abcdef', 'ghi', true)," +
+ "(5, 6, 7, 8, 9.9, 8.8, 7.7, 'jklmno', 'pqr', true)");
+
+ stmt.execute("set hive.exec.dynamic.partition.mode=nonstrict");
+
+ // dynamic partition
+ stmt.execute("explain insert into " + table1 + " partition (p1) values " +
+ "(1, 2, 3, 4, 1.1, 2.2, 3.3, 'abcdef', 'ghi', true, 'b')," +
+ "(5, 6, 7, 8, 9.9, 8.8, 7.7, 'jklmno', 'pqr', true, 'b')");
+
+ stmt.execute("drop table if exists " + table2);
+
+ stmt.execute("create table " + table2 +
+ "(c1 tinyint," +
+ " c2 smallint," +
+ " c3 int," +
+ " c4 bigint," +
+ " c5 float," +
+ " c6 double," +
+ " c7 decimal(8,2)," +
+ " c8 varchar(120)," +
+ " c9 char(10)," +
+ " c10 boolean)");
+
+ stmt.execute("explain insert into " + table2 + " values " +
+ "(1, 2, 3, 4, 1.1, 2.2, 3.3, 'abcdef', 'ghi', true)," +
+ "(5, 6, 7, 8, 9.9, 8.8, 7.7, 'jklmno', 'pqr', true)");
+
+ stmt.execute("explain insert overwrite table " + table2 + " select c1, c2, c3, c4, c5, c6, " +
+ "c7, c8, c9, c10 from " + table1);
+
+ // multi-insert
+ stmt.execute("from " + table1 +
+ " insert into table " + table1 + " partition (p1 = 'c') " +
+ " select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10" +
+ " insert into table " + table2 + " select c1, c2, c3, c4, c5, c6, c7, c8, c9, c10");
+ }
+ }
+
+ // This tests CTEs
+ @Test
+ public void cte() throws SQLException {
+ final String table1 = "odpi_cte_table1";
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists " + table1);
+ stmt.execute("create table " + table1 + "(c1 int, c2 varchar(32))");
+ stmt.execute("with cte1 as (select c1 from " + table1 + " where c1 < 10) " +
+ " select c1 from cte1");
+ }
+ }
+
+ // This tests select, including CTEs, all/distinct, single tables, joins (inner & outer),
+ // group by (w/ and w/o having), order by, cluster by/distribute by/sort by, limit, union,
+ // subqueries, and over.
+
+ @Test
+ public void select() throws SQLException {
+ final String[] tables = {"odpi_select_table1", "odpi_select_table2"};
+ try (Statement stmt = conn.createStatement()) {
+ for (int i = 0; i < tables.length; i++) {
+ stmt.execute("drop table if exists " + tables[i]);
+ stmt.execute("create table " + tables[i] + "(c1 int, c2 varchar(32))");
+ }
+
+ // single table queries tested above in several places
+
+ stmt.execute("explain select all a.c2, SUM(a.c1), SUM(b.c1) " +
+ "from " + tables[0] + " a join " + tables[1] + " b on (a.c2 = b.c2) " +
+ "group by a.c2 " +
+ "order by a.c2 asc " +
+ "limit 10");
+
+ stmt.execute("explain select distinct a.c2 " +
+ "from " + tables[0] + " a left outer join " + tables[1] + " b on (a.c2 = b.c2) " +
+ "order by a.c2 desc ");
+
+ stmt.execute("explain select a.c2, SUM(a.c1) " +
+ "from " + tables[0] + " a right outer join " + tables[1] + " b on (a.c2 = b.c2) " +
+ "group by a.c2 " +
+ "having SUM(b.c1) > 0 " +
+ "order by a.c2 ");
+
+ stmt.execute("explain select a.c2, rank() over (partition by a.c1) " +
+ "from " + tables[0] + " a full outer join " + tables[1] + " b on (a.c2 = b.c2) ");
+
+ stmt.execute("explain select c2 from " + tables[0] + " union all select c2 from " + tables[1]);
+
+ stmt.execute("explain select * from " + tables[0] + " distribute by c1 sort by c2");
+ stmt.execute("explain select * from " + tables[0] + " cluster by c1");
+
+ stmt.execute("explain select * from (select c1 from " + tables[0] + ") t");
+ stmt.execute("explain select * from " + tables[0] + " where c1 in (select c1 from " + tables[1] +
+ ")");
+
+ }
+
+ }
+
+ // Update and delete are not tested because not everyone configures their system to run
+ // with ACID.
+
+
+}
+
+
+
+
+
http://git-wip-us.apache.org/repos/asf/bigtop/blob/5e342c45/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java
----------------------------------------------------------------------
diff --git a/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java b/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java
new file mode 100644
index 0000000..8e0abda
--- /dev/null
+++ b/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.odpi.specs.runtime.hive;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Random;
+
+public class TestThrift {
+
+ private static final Log LOG = LogFactory.getLog(TestThrift.class.getName());
+
+ private static IMetaStoreClient client = null;
+ private static HiveConf conf;
+
+ private Random rand;
+
+ @BeforeClass
+ public static void connect() throws MetaException {
+ if (JdbcConnector.testActive(JdbcConnector.TEST_THRIFT, "Test Thrift ")) {
+ String url = JdbcConnector.getProperty(JdbcConnector.METASTORE_URL, "Thrift metastore URL");
+ conf = new HiveConf();
+ conf.setVar(HiveConf.ConfVars.METASTOREURIS, url);
+ LOG.info("Set to test against metastore at " + url);
+ client = new HiveMetaStoreClient(conf);
+ }
+ }
+
+ @Before
+ public void checkIfActive() {
+ Assume.assumeTrue(JdbcConnector.testActive(JdbcConnector.TEST_THRIFT, "Test Thrift "));
+ rand = new Random();
+ }
+
+ @Test
+ public void db() throws TException {
+ final String dbName = "odpi_thrift_db_" + rand.nextInt(Integer.MAX_VALUE);
+
+ Database db = new Database(dbName, "a db", null, new HashMap<String, String>());
+ client.createDatabase(db);
+ db = client.getDatabase(dbName);
+ Assert.assertNotNull(db);
+ db = new Database(db);
+ db.getParameters().put("a", "b");
+ client.alterDatabase(dbName, db);
+ List<String> alldbs = client.getDatabases("odpi_*");
+ Assert.assertNotNull(alldbs);
+ Assert.assertTrue(alldbs.size() > 0);
+ alldbs = client.getAllDatabases();
+ Assert.assertNotNull(alldbs);
+ Assert.assertTrue(alldbs.size() > 0);
+ client.dropDatabase(dbName, true, true);
+ }
+
+ // Not testing types calls, as they aren't used AFAIK
+
+ @Test
+ public void nonPartitionedTable() throws TException {
+ final String tableName = "odpi_thrift_table_" + rand.nextInt(Integer.MAX_VALUE);
+
+ // I don't test every operation related to tables, but only those that are frequently used.
+ SerDeInfo serde = new SerDeInfo("default_serde",
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), new HashMap<String, String>());
+ FieldSchema fs = new FieldSchema("a", "int", "no comment");
+ StorageDescriptor sd = new StorageDescriptor(Collections.singletonList(fs), null,
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT),
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT), false, 0, serde, null, null,
+ new HashMap<String, String>());
+ Table table = new Table(tableName, "default", "me", 0, 0, 0, sd, null,
+ new HashMap<String, String>(), null, null, TableType.MANAGED_TABLE.toString());
+ client.createTable(table);
+
+ table = client.getTable("default", tableName);
+ Assert.assertNotNull(table);
+
+ List<Table> tables =
+ client.getTableObjectsByName("default", Collections.singletonList(tableName));
+ Assert.assertNotNull(tables);
+ Assert.assertEquals(1, tables.size());
+
+ List<String> tableNames = client.getTables("default", "odpi_*");
+ Assert.assertNotNull(tableNames);
+ Assert.assertTrue(tableNames.size() >= 1);
+
+ tableNames = client.getAllTables("default");
+ Assert.assertNotNull(tableNames);
+ Assert.assertTrue(tableNames.size() >= 1);
+
+ List<FieldSchema> cols = client.getFields("default", tableName);
+ Assert.assertNotNull(cols);
+ Assert.assertEquals(1, cols.size());
+
+ cols = client.getSchema("default", tableName);
+ Assert.assertNotNull(cols);
+ Assert.assertEquals(1, cols.size());
+
+ table = new Table(table);
+ table.getParameters().put("a", "b");
+ client.alter_table("default", tableName, table, false);
+
+ table.getParameters().put("c", "d");
+ client.alter_table("default", tableName, table);
+
+ client.dropTable("default", tableName, true, false);
+ }
+
+ @Test
+ public void partitionedTable() throws TException {
+ final String tableName = "odpi_thrift_partitioned_table_" + rand.nextInt(Integer.MAX_VALUE);
+
+ // I don't test every operation related to tables, but only those that are frequently used.
+ SerDeInfo serde = new SerDeInfo("default_serde",
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), new HashMap<String, String>());
+ FieldSchema fs = new FieldSchema("a", "int", "no comment");
+ StorageDescriptor sd = new StorageDescriptor(Collections.singletonList(fs), null,
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT),
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT), false, 0, serde, null, null,
+ new HashMap<String, String>());
+ FieldSchema pk = new FieldSchema("pk", "string", "");
+ Table table = new Table(tableName, "default", "me", 0, 0, 0, sd, Collections.singletonList(pk),
+ new HashMap<String, String>(), null, null, TableType.MANAGED_TABLE.toString());
+ client.createTable(table);
+
+ sd = new StorageDescriptor(Collections.singletonList(fs), null,
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE),
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), false, 0, serde, null, null,
+ new HashMap<String, String>());
+ Partition partition = new Partition(Collections.singletonList("x"), "default", tableName, 0,
+ 0, sd, new HashMap<String, String>());
+ client.add_partition(partition);
+
+ List<Partition> partitions = new ArrayList<>(2);
+ sd = new StorageDescriptor(Collections.singletonList(fs), null,
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE),
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), false, 0, serde, null, null,
+ new HashMap<String, String>());
+ partitions.add(new Partition(Collections.singletonList("y"), "default", tableName, 0,
+ 0, sd, new HashMap<String, String>()));
+ sd = new StorageDescriptor(Collections.singletonList(fs), null,
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE),
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), false, 0, serde, null, null,
+ new HashMap<String, String>());
+ partitions.add(new Partition(Collections.singletonList("z"), "default", tableName, 0,
+ 0, sd, new HashMap<String, String>()));
+ client.add_partitions(partitions);
+
+ List<Partition> parts = client.listPartitions("default", tableName, (short)-1);
+ Assert.assertNotNull(parts);
+ Assert.assertEquals(3, parts.size());
+
+ parts = client.listPartitions("default", tableName, Collections.singletonList("x"),
+ (short)-1);
+ Assert.assertNotNull(parts);
+ Assert.assertEquals(1, parts.size());
+
+ parts = client.listPartitionsWithAuthInfo("default", tableName, (short)-1, "me",
+ Collections.<String>emptyList());
+ Assert.assertNotNull(parts);
+ Assert.assertEquals(3, parts.size());
+
+ List<String> partNames = client.listPartitionNames("default", tableName, (short)-1);
+ Assert.assertNotNull(partNames);
+ Assert.assertEquals(3, partNames.size());
+
+ parts = client.listPartitionsByFilter("default", tableName, "pk = \"x\"", (short)-1);
+ Assert.assertNotNull(parts);
+ Assert.assertEquals(1, parts.size());
+
+ parts = client.getPartitionsByNames("default", tableName, Collections.singletonList("pk=x"));
+ Assert.assertNotNull(parts);
+ Assert.assertEquals(1, parts.size());
+
+ partition = client.getPartition("default", tableName, Collections.singletonList("x"));
+ Assert.assertNotNull(partition);
+
+ partition = client.getPartition("default", tableName, "pk=x");
+ Assert.assertNotNull(partition);
+
+ partition = client.getPartitionWithAuthInfo("default", tableName, Collections.singletonList("x"),
+ "me", Collections.<String>emptyList());
+ Assert.assertNotNull(partition);
+
+ partition = new Partition(partition);
+ partition.getParameters().put("a", "b");
+ client.alter_partition("default", tableName, partition);
+
+ for (Partition p : parts) p.getParameters().put("c", "d");
+ client.alter_partitions("default", tableName, parts);
+
+ // Not testing get_partitions_by_expr because I don't want to hard code some byte sequence
+ // from the parser. The odds that anyone other than Hive parser would call this method seem
+ // low, since you'd have to exactly match the serliazation of the Hive parser.
+
+ // Not testing partition marking events, not used by anyone but Hive replication AFAIK
+
+ client.dropPartition("default", tableName, "pk=x", true);
+ client.dropPartition("default", tableName, Collections.singletonList("y"), true);
+ }
+
+ // Not testing index calls, as no one uses indices
+
+
+ // Not sure if anyone uses stats calls or not. Other query engines might. Ignoring for now.
+
+ // Not sure if anyone else uses functions, though I'm guessing not as without Hive classes they
+ // won't be runable.
+
+ // Not testing authorization calls as AFAIK no one else uses Hive security
+
+ // Not testing transaction/locking calls, as those are used only by Hive.
+
+ // Not testing notification logging calls, as those are used only by Hive replication.
+
+}
http://git-wip-us.apache.org/repos/asf/bigtop/blob/5e342c45/bigtop-tests/smoke-tests/odpi-runtime/src/test/python/find-public-apis.py
----------------------------------------------------------------------
diff --git a/bigtop-tests/smoke-tests/odpi-runtime/src/test/python/find-public-apis.py b/bigtop-tests/smoke-tests/odpi-runtime/src/test/python/find-public-apis.py
new file mode 100755
index 0000000..091c496
--- /dev/null
+++ b/bigtop-tests/smoke-tests/odpi-runtime/src/test/python/find-public-apis.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+import re
+import warnings
+from optparse import OptionParser
+
+def main():
+ parser = OptionParser()
+ parser.add_option("-d", "--directory", help="Top level directory of source tree")
+ parser.add_option("-r", "--report", help="API compatibility report file, in HTML format")
+
+ (options, args) = parser.parse_args()
+
+ # Get the ATS endpoint if it's not given.
+ if options.directory == None:
+ print "You must specify a top level directory of the source tree"
+ return 1
+
+ if options.report == None:
+ print "You must specify the report to check against"
+ return 1
+
+ publicClasses = set()
+ for directory in os.walk(options.directory):
+ for afile in directory[2]:
+ if re.search("\.java$", afile) != None:
+ handle = open(os.path.join(directory[0], afile))
+ # Figure out the package we're in
+ pre = re.search("org/apache/hadoop[\w/]*", directory[0])
+ if pre == None:
+ warnings.warn("No package for " + directory[0])
+ continue
+ package = pre.group(0)
+ expecting = 0
+ for line in handle:
+ if re.search("@InterfaceAudience.Public", line) != None:
+ expecting = 1
+ classname = re.search("class (\w*)", line)
+ if classname != None and expecting == 1:
+ publicClasses.add(package + "/" + classname.group(1))
+ expecting = 0
+ handle.close()
+
+ handle = open(options.report)
+ haveChecked = set()
+ for line in handle:
+ classre = re.search("mangled: <b>(org/apache/hadoop[\w/]+)", line)
+ if classre != None:
+ classname = classre.group(1)
+ if classname not in haveChecked:
+ if classname in publicClasses:
+ print "Warning, found change in public class " + classname
+ haveChecked.add(classname)
+ handle.close()
+
+
+
+
+main()
+
+