You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/09/08 06:38:26 UTC
svn commit: r1623263 [4/28] - in /hive/branches/spark: ./
accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/
ant/src/org/apache/hadoop/hive/ant/ beeline/src/java/org/apache/hive/beeline/
beeline/src/test/org/apache/hive/beeline/ bin/ ...
Modified: hive/branches/spark/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java (original)
+++ hive/branches/spark/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java Mon Sep 8 04:38:17 2014
@@ -653,7 +653,7 @@ public class Server {
verifyParam(inputs, "input");
verifyParam(mapper, "mapper");
verifyParam(reducer, "reducer");
-
+
Map<String, Object> userArgs = new HashMap<String, Object>();
userArgs.put("user.name", getDoAsUser());
userArgs.put("input", inputs);
@@ -680,8 +680,8 @@ public class Server {
/**
* Run a MapReduce Jar job.
* Params correspond to the REST api params
- * @param usesHcatalog if {@code true}, means the Jar uses HCat and thus needs to access
- * metastore, which requires additional steps for WebHCat to perform in a secure cluster.
+ * @param usesHcatalog if {@code true}, means the Jar uses HCat and thus needs to access
+ * metastore, which requires additional steps for WebHCat to perform in a secure cluster.
* @param callback URL which WebHCat will call when the hive job finishes
* @see org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob
*/
@@ -703,7 +703,7 @@ public class Server {
verifyUser();
verifyParam(jar, "jar");
verifyParam(mainClass, "class");
-
+
Map<String, Object> userArgs = new HashMap<String, Object>();
userArgs.put("user.name", getDoAsUser());
userArgs.put("jar", jar);
@@ -729,7 +729,7 @@ public class Server {
* Run a Pig job.
* Params correspond to the REST api params. If '-useHCatalog' is in the {@code pigArgs, usesHcatalog},
* is interpreted as true.
- * @param usesHcatalog if {@code true}, means the Pig script uses HCat and thus needs to access
+ * @param usesHcatalog if {@code true}, means the Pig script uses HCat and thus needs to access
* metastore, which requires additional steps for WebHCat to perform in a secure cluster.
* This does nothing to ensure that Pig is installed on target node in the cluster.
* @param callback URL which WebHCat will call when the hive job finishes
@@ -752,7 +752,7 @@ public class Server {
if (execute == null && srcFile == null) {
throw new BadParam("Either execute or file parameter required");
}
-
+
//add all function arguments to a map
Map<String, Object> userArgs = new HashMap<String, Object>();
userArgs.put("user.name", getDoAsUser());
@@ -819,7 +819,7 @@ public class Server {
* @param execute SQL statement to run, equivalent to "-e" from hive command line
* @param srcFile name of hive script file to run, equivalent to "-f" from hive
* command line
- * @param hiveArgs additional command line argument passed to the hive command line.
+ * @param hiveArgs additional command line argument passed to the hive command line.
* Please check https://cwiki.apache.org/Hive/languagemanual-cli.html
* for detailed explanation of command line arguments
* @param otherFiles additional files to be shipped to the launcher, such as the jars
@@ -846,7 +846,7 @@ public class Server {
if (execute == null && srcFile == null) {
throw new BadParam("Either execute or file parameter required");
}
-
+
//add all function arguments to a map
Map<String, Object> userArgs = new HashMap<String, Object>();
userArgs.put("user.name", getDoAsUser());
@@ -903,42 +903,42 @@ public class Server {
* Example usages:
* 1. curl -s 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan'
* Return all the Job IDs submitted by hsubramaniyan
- * 2. curl -s
+ * 2. curl -s
* 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&showall=true'
* Return all the Job IDs that are visible to hsubramaniyan
* 3. curl -s
* 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&jobid=job_201312091733_0003'
* Return all the Job IDs for hsubramaniyan after job_201312091733_0003.
- * 4. curl -s 'http://localhost:50111/templeton/v1/jobs?
+ * 4. curl -s 'http://localhost:50111/templeton/v1/jobs?
* user.name=hsubramaniyan&jobid=job_201312091733_0003&numrecords=5'
- * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after job_201312091733_0003.
- * 5. curl -s
+ * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after job_201312091733_0003.
+ * 5. curl -s
* 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&numrecords=5'
- * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after sorting the Job ID list
+ * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after sorting the Job ID list
* lexicographically.
* </p>
* <p>
* Supporting pagination using "jobid" and "numrecords" parameters:
* Step 1: Get the start "jobid" = job_xxx_000, "numrecords" = n
- * Step 2: Issue a curl command by specifying the user-defined "numrecords" and "jobid"
- * Step 3: If list obtained from Step 2 has size equal to "numrecords", retrieve the list's
+ * Step 2: Issue a curl command by specifying the user-defined "numrecords" and "jobid"
+ * Step 3: If list obtained from Step 2 has size equal to "numrecords", retrieve the list's
* last record and get the Job Id of the last record as job_yyy_k, else quit.
* Step 4: set "jobid"=job_yyy_k and go to step 2.
- * </p>
+ * </p>
* @param fields If "fields" set to "*", the request will return full details of the job.
* If "fields" is missing, will only return the job ID. Currently the value can only
* be "*", other values are not allowed and will throw exception.
* @param showall If "showall" is set to "true", the request will return all jobs the user
* has permission to view, not only the jobs belonging to the user.
- * @param jobid If "jobid" is present, the records whose Job Id is lexicographically greater
- * than "jobid" are only returned. For example, if "jobid" = "job_201312091733_0001",
- * the jobs whose Job ID is greater than "job_201312091733_0001" are returned. The number of
+ * @param jobid If "jobid" is present, the records whose Job Id is lexicographically greater
+ * than "jobid" are only returned. For example, if "jobid" = "job_201312091733_0001",
+ * the jobs whose Job ID is greater than "job_201312091733_0001" are returned. The number of
* records returned depends on the value of "numrecords".
- * @param numrecords If the "jobid" and "numrecords" parameters are present, the top #numrecords
- * records appearing after "jobid" will be returned after sorting the Job Id list
- * lexicographically.
- * If "jobid" parameter is missing and "numrecords" is present, the top #numrecords will
- * be returned after lexicographically sorting the Job Id list. If "jobid" parameter is present
+ * @param numrecords If the "jobid" and "numrecords" parameters are present, the top #numrecords
+ * records appearing after "jobid" will be returned after sorting the Job Id list
+ * lexicographically.
+ * If "jobid" parameter is missing and "numrecords" is present, the top #numrecords will
+ * be returned after lexicographically sorting the Job Id list. If "jobid" parameter is present
* and "numrecords" is missing, all the records whose Job Id is greater than "jobid" are returned.
* @return list of job items based on the filter conditions specified by the user.
*/
@@ -950,7 +950,7 @@ public class Server {
@QueryParam("jobid") String jobid,
@QueryParam("numrecords") String numrecords)
throws NotAuthorizedException, BadParam, IOException, InterruptedException {
-
+
verifyUser();
boolean showDetails = false;
@@ -971,9 +971,9 @@ public class Server {
try {
if (numrecords != null) {
numRecords = Integer.parseInt(numrecords);
- if (numRecords <= 0) {
- throw new BadParam("numrecords should be an integer > 0");
- }
+ if (numRecords <= 0) {
+ throw new BadParam("numrecords should be an integer > 0");
+ }
}
else {
numRecords = -1;
@@ -983,18 +983,18 @@ public class Server {
throw new BadParam("Invalid numrecords format: numrecords should be an integer > 0");
}
- // Sort the list lexicographically
+ // Sort the list lexicographically
Collections.sort(list);
for (String job : list) {
// If numRecords = -1, fetch all records.
// Hence skip all the below checks when numRecords = -1.
if (numRecords != -1) {
- // If currRecord >= numRecords, we have already fetched the top #numRecords
+ // If currRecord >= numRecords, we have already fetched the top #numRecords
if (currRecord >= numRecords) {
break;
- }
- // If the current record needs to be returned based on the
+ }
+ // If the current record needs to be returned based on the
// filter conditions specified by the user, increment the counter
else if ((jobid != null && job.compareTo(jobid) > 0) || jobid == null) {
currRecord++;
@@ -1101,7 +1101,7 @@ public class Server {
* value of user.name query param, in kerberos mode it's the kinit'ed user.
*/
private String getRequestingUser() {
- if (theSecurityContext == null) {
+ if (theSecurityContext == null) {
return null;
}
String userName = null;
@@ -1114,7 +1114,7 @@ public class Server {
if(userName == null) {
return null;
}
- //map hue/foo.bar@something.com->hue since user group checks
+ //map hue/foo.bar@something.com->hue since user group checks
// and config files are in terms of short name
return UserGroupInformation.createRemoteUser(userName).getShortUserName();
}
@@ -1161,7 +1161,7 @@ public class Server {
return unkHost;
}
}
-
+
private void checkEnableLogPrerequisite(boolean enablelog, String statusdir) throws BadParam {
if (enablelog && !TempletonUtils.isset(statusdir))
throw new BadParam("enablelog is only applicable when statusdir is set");
Modified: hive/branches/spark/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java (original)
+++ hive/branches/spark/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java Mon Sep 8 04:38:17 2014
@@ -169,9 +169,9 @@ public class JobState {
String childJobIDs = getField("children");
if (childJobIDs != null) {
for (String jobid : childJobIDs.split(",")) {
- children.add(new JobState(jobid, config));
+ children.add(new JobState(jobid, config));
}
- }
+ }
return children;
}
Modified: hive/branches/spark/itests/hive-unit/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/pom.xml?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/pom.xml (original)
+++ hive/branches/spark/itests/hive-unit/pom.xml Mon Sep 8 04:38:17 2014
@@ -53,6 +53,16 @@
<artifactId>hive-exec</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.apache.hive.hcatalog</groupId>
+ <artifactId>hive-hcatalog-core</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hive.hcatalog</groupId>
+ <artifactId>hive-hcatalog-streaming</artifactId>
+ <version>${project.version}</version>
+ </dependency>
<!-- dependencies are always listed in sorted order by groupId, artifectId -->
<!-- test intra-project -->
Modified: hive/branches/spark/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java (original)
+++ hive/branches/spark/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java Mon Sep 8 04:38:17 2014
@@ -31,7 +31,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim;
import org.apache.hadoop.hive.shims.HadoopShims.MiniMrShim;
@@ -57,6 +56,7 @@ public class MiniHS2 extends AbstractHiv
private static final AtomicLong hs2Counter = new AtomicLong();
private MiniMrShim mr;
private MiniDFSShim dfs;
+ private FileSystem localFS;
private boolean useMiniMR = false;
private boolean useMiniKdc = false;
private final String serverPrincipal;
@@ -137,6 +137,10 @@ public class MiniHS2 extends AbstractHiv
this.dfs = dfs;
}
+ public FileSystem getLocalFS() {
+ return localFS;
+ }
+
public boolean isUseMiniMR() {
return useMiniMR;
}
@@ -157,7 +161,8 @@ public class MiniHS2 extends AbstractHiv
this.serverPrincipal = serverPrincipal;
this.serverKeytab = serverKeytab;
this.isMetastoreRemote = isMetastoreRemote;
- baseDir = Files.createTempDir();
+ baseDir = Files.createTempDir();
+ localFS = FileSystem.getLocal(hiveConf);
FileSystem fs;
if (useMiniMR) {
dfs = ShimLoader.getHadoopShims().getMiniDfs(hiveConf, 4, true, null);
@@ -371,7 +376,7 @@ public class MiniHS2 extends AbstractHiv
getMiniKdc().loginUser(getMiniKdc().getDefaultUserPrincipal());
sessionConf.put("principal", serverPrincipal);
}
- */
+ */
sessionHandle = hs2Client.openSession("foo", "bar", sessionConf);
} catch (Exception e) {
// service not started yet
Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java Mon Sep 8 04:38:17 2014
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore
import java.io.IOException;
import java.net.ServerSocket;
+import java.util.concurrent.TimeUnit;
import junit.framework.TestCase;
@@ -45,7 +46,7 @@ public class TestMetaStoreAuthorization
"true");
conf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
conf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
- conf.setIntVar(ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, 60);
+ conf.setTimeVar(ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, 60, TimeUnit.SECONDS);
}
public void testIsWritable() throws Exception {
Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java Mon Sep 8 04:38:17 2014
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.metastore
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
import junit.framework.Assert;
import junit.framework.TestCase;
@@ -58,7 +59,7 @@ public class TestRetryingHMSHandler exte
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hiveConf.setIntVar(HiveConf.ConfVars.HMSHANDLERATTEMPTS, 2);
- hiveConf.setIntVar(HiveConf.ConfVars.HMSHANDLERINTERVAL, 0);
+ hiveConf.setTimeVar(HiveConf.ConfVars.HMSHANDLERINTERVAL, 0, TimeUnit.MILLISECONDS);
hiveConf.setBoolVar(HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF, false);
msc = new HiveMetaStoreClient(hiveConf, null);
}
Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java Mon Sep 8 04:38:17 2014
@@ -96,6 +96,7 @@ public class TestStorageBasedMetastoreAu
driver = new Driver(clientHiveConf);
setupFakeUser();
+ InjectableDummyAuthenticator.injectMode(false);
}
@@ -159,6 +160,38 @@ public class TestStorageBasedMetastoreAu
assertEquals(expectedRet, resp.getResponseCode());
}
+ /**
+ * Drop view should not be blocked by SBA. View will not have any location to drop.
+ * @throws Exception
+ */
+ public void testDropView() throws Exception {
+ String dbName = getTestDbName();
+ String tblName = getTestTableName();
+ String viewName = "view" + tblName;
+ setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx");
+
+ CommandProcessorResponse resp = driver.run("create database " + dbName);
+ assertEquals(0, resp.getResponseCode());
+ Database db = msc.getDatabase(dbName);
+ validateCreateDb(db, dbName);
+
+ setPermissions(db.getLocationUri(), "-rwxrwxrwt");
+
+ String dbDotTable = dbName + "." + tblName;
+ resp = driver.run("create table " + dbDotTable + "(i int)");
+ assertEquals(0, resp.getResponseCode());
+
+ String dbDotView = dbName + "." + viewName;
+ resp = driver.run("create view " + dbDotView + " as select * from " + dbDotTable);
+ assertEquals(0, resp.getResponseCode());
+
+ resp = driver.run("drop view " + dbDotView);
+ assertEquals(0, resp.getResponseCode());
+
+ resp = driver.run("drop table " + dbDotTable);
+ assertEquals(0, resp.getResponseCode());
+ }
+
public void testDropPartition() throws Exception {
dropPartitionByOtherUser("-rwxrwxrwx", 0);
@@ -202,7 +235,6 @@ public class TestStorageBasedMetastoreAu
InjectableDummyAuthenticator.injectUserName(fakeUser);
InjectableDummyAuthenticator.injectGroupNames(fakeGroupNames);
- InjectableDummyAuthenticator.injectMode(true);
}
private String setupUser() {
Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java Mon Sep 8 04:38:17 2014
@@ -31,7 +31,9 @@ import java.sql.Statement;
import java.util.HashMap;
import java.util.Map;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hive.jdbc.miniHS2.MiniHS2;
@@ -41,255 +43,357 @@ import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
- public class TestJdbcWithMiniHS2 {
- private static MiniHS2 miniHS2 = null;
- private static Path dataFilePath;
-
- private Connection hs2Conn = null;
-
- @BeforeClass
- public static void beforeTest() throws Exception {
- Class.forName(MiniHS2.getJdbcDriverName());
- HiveConf conf = new HiveConf();
- conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
- miniHS2 = new MiniHS2(conf);
- String dataFileDir = conf.get("test.data.files").replace('\\', '/')
- .replace("c:", "");
- dataFilePath = new Path(dataFileDir, "kv1.txt");
- Map<String, String> confOverlay = new HashMap<String, String>();
- miniHS2.start(confOverlay);
- }
+public class TestJdbcWithMiniHS2 {
+ private static MiniHS2 miniHS2 = null;
+ private static Path dataFilePath;
+
+ private Connection hs2Conn = null;
+
+ @BeforeClass
+ public static void beforeTest() throws Exception {
+ Class.forName(MiniHS2.getJdbcDriverName());
+ HiveConf conf = new HiveConf();
+ conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ miniHS2 = new MiniHS2(conf);
+ String dataFileDir = conf.get("test.data.files").replace('\\', '/')
+ .replace("c:", "");
+ dataFilePath = new Path(dataFileDir, "kv1.txt");
+ Map<String, String> confOverlay = new HashMap<String, String>();
+ miniHS2.start(confOverlay);
+ }
- @Before
- public void setUp() throws Exception {
- hs2Conn = getConnection(miniHS2.getJdbcURL(), System.getProperty("user.name"), "bar");
- }
+ @Before
+ public void setUp() throws Exception {
+ hs2Conn = getConnection(miniHS2.getJdbcURL(), System.getProperty("user.name"), "bar");
+ }
- private Connection getConnection(String jdbcURL, String user, String pwd) throws SQLException {
- Connection conn = DriverManager.getConnection(jdbcURL, user, pwd);
- conn.createStatement().execute("set hive.support.concurrency = false");
- return conn;
- }
+ private Connection getConnection(String jdbcURL, String user, String pwd) throws SQLException {
+ Connection conn = DriverManager.getConnection(jdbcURL, user, pwd);
+ conn.createStatement().execute("set hive.support.concurrency = false");
+ return conn;
+ }
- @After
- public void tearDown() throws Exception {
- hs2Conn.close();
+ @After
+ public void tearDown() throws Exception {
+ hs2Conn.close();
+ }
+
+ @AfterClass
+ public static void afterTest() throws Exception {
+ if (miniHS2.isStarted()) {
+ miniHS2.stop();
}
+ }
+
+ @Test
+ public void testConnection() throws Exception {
+ String tableName = "testTab1";
+ Statement stmt = hs2Conn.createStatement();
+
+ // create table
+ stmt.execute("DROP TABLE IF EXISTS " + tableName);
+ stmt.execute("CREATE TABLE " + tableName
+ + " (under_col INT COMMENT 'the under column', value STRING) COMMENT ' test table'");
+
+ // load data
+ stmt.execute("load data local inpath '"
+ + dataFilePath.toString() + "' into table " + tableName);
+
+ ResultSet res = stmt.executeQuery("SELECT * FROM " + tableName);
+ assertTrue(res.next());
+ assertEquals("val_238", res.getString(2));
+ res.close();
+ stmt.close();
+ }
+
- @AfterClass
- public static void afterTest() throws Exception {
- if (miniHS2.isStarted())
- miniHS2.stop();
+ /** This test is to connect to any database without using the command "Use <<DB>>"
+ * 1)connect to default database.
+ * 2) Create a new DB test_default.
+ * 3) Connect to test_default database.
+ * 4) Connect and create table under test_default_test.
+ * 5) Connect and display all tables.
+ * 6) Connect to default database and shouldn't find table test_default_test.
+ * 7) Connect and drop test_default_test.
+ * 8) drop test_default database.
+ */
+
+ @Test
+ public void testURIDatabaseName() throws Exception{
+
+ String jdbcUri = miniHS2.getJdbcURL().substring(0, miniHS2.getJdbcURL().indexOf("default"));
+
+ hs2Conn= getConnection(jdbcUri+"default", System.getProperty("user.name"),"bar");
+ String dbName="test_connection_non_default_db";
+ String tableInNonDefaultSchema="table_in_non_default_schema";
+ Statement stmt = hs2Conn.createStatement();
+ stmt.execute("create database if not exists "+dbName);
+ stmt.close();
+ hs2Conn.close();
+
+ hs2Conn = getConnection(jdbcUri+dbName,System.getProperty("user.name"),"bar");
+ stmt = hs2Conn .createStatement();
+ boolean expected = stmt.execute(" create table "+tableInNonDefaultSchema +" (x int)");
+ stmt.close();
+ hs2Conn .close();
+
+ hs2Conn = getConnection(jdbcUri+dbName,System.getProperty("user.name"),"bar");
+ stmt = hs2Conn .createStatement();
+ ResultSet res = stmt.executeQuery("show tables");
+ boolean testTableExists = false;
+ while (res.next()) {
+ assertNotNull("table name is null in result set", res.getString(1));
+ if (tableInNonDefaultSchema.equalsIgnoreCase(res.getString(1))) {
+ testTableExists = true;
+ }
+ }
+ assertTrue("table name "+tableInNonDefaultSchema
+ + " found in SHOW TABLES result set", testTableExists);
+ stmt.close();
+ hs2Conn .close();
+
+ hs2Conn = getConnection(jdbcUri+"default",System.getProperty("user.name"),"bar");
+ stmt = hs2Conn .createStatement();
+ res = stmt.executeQuery("show tables");
+ testTableExists = false;
+ while (res.next()) {
+ assertNotNull("table name is null in result set", res.getString(1));
+ if (tableInNonDefaultSchema.equalsIgnoreCase(res.getString(1))) {
+ testTableExists = true;
+ }
}
- @Test
- public void testConnection() throws Exception {
- String tableName = "testTab1";
- Statement stmt = hs2Conn.createStatement();
-
- // create table
- stmt.execute("DROP TABLE IF EXISTS " + tableName);
- stmt.execute("CREATE TABLE " + tableName
- + " (under_col INT COMMENT 'the under column', value STRING) COMMENT ' test table'");
-
- // load data
- stmt.execute("load data local inpath '"
- + dataFilePath.toString() + "' into table " + tableName);
-
- ResultSet res = stmt.executeQuery("SELECT * FROM " + tableName);
- assertTrue(res.next());
- assertEquals("val_238", res.getString(2));
- res.close();
- stmt.close();
+ assertFalse("table name "+tableInNonDefaultSchema
+ + " NOT found in SHOW TABLES result set", testTableExists);
+ stmt.close();
+ hs2Conn .close();
+
+ hs2Conn = getConnection(jdbcUri+dbName,System.getProperty("user.name"),"bar");
+ stmt = hs2Conn .createStatement();
+ stmt.execute("set hive.support.concurrency = false");
+ res = stmt.executeQuery("show tables");
+
+ stmt.execute(" drop table if exists table_in_non_default_schema");
+ expected = stmt.execute("DROP DATABASE "+ dbName);
+ stmt.close();
+
+ hs2Conn = getConnection(jdbcUri+"default",System.getProperty("user.name"),"bar");
+ stmt = hs2Conn .createStatement();
+ res = stmt.executeQuery("show tables");
+ testTableExists = false;
+ while (res.next()) {
+ assertNotNull("table name is null in result set", res.getString(1));
+ if (tableInNonDefaultSchema.equalsIgnoreCase(res.getString(1))) {
+ testTableExists = true;
+ }
}
+ // test URI with no dbName
+ hs2Conn = getConnection(jdbcUri, System.getProperty("user.name"),"bar");
+ verifyCurrentDB("default", hs2Conn);
+ hs2Conn.close();
+
+ hs2Conn = getConnection(jdbcUri + ";", System.getProperty("user.name"),"bar");
+ verifyCurrentDB("default", hs2Conn);
+ hs2Conn.close();
+
+ hs2Conn = getConnection(jdbcUri + ";/foo=bar;foo1=bar1", System.getProperty("user.name"),"bar");
+ verifyCurrentDB("default", hs2Conn);
+ hs2Conn.close();
+ }
- /** This test is to connect to any database without using the command "Use <<DB>>"
- * 1)connect to default database.
- * 2) Create a new DB test_default.
- * 3) Connect to test_default database.
- * 4) Connect and create table under test_default_test.
- * 5) Connect and display all tables.
- * 6) Connect to default database and shouldn't find table test_default_test.
- * 7) Connect and drop test_default_test.
- * 8) drop test_default database.
+ @Test
+ public void testConnectionSchemaAPIs() throws Exception {
+ String db1 = "DB1";
+ /**
+ * get/set Schema are new in JDK7 and not available in java.sql.Connection in JDK6.
+ * Hence the test uses HiveConnection object to call these methods so that test will run with older JDKs
*/
+ HiveConnection hiveConn = (HiveConnection)hs2Conn;
- @Test
- public void testURIDatabaseName() throws Exception{
+ assertEquals("default", hiveConn.getSchema());
+ Statement stmt = hs2Conn.createStatement();
+ stmt.execute("DROP DATABASE IF EXISTS " + db1 + " CASCADE");
+ stmt.execute("CREATE DATABASE " + db1);
+ assertEquals("default", hiveConn.getSchema());
+
+ stmt.execute("USE " + db1);
+ assertEquals(db1, hiveConn.getSchema());
+
+ stmt.execute("USE default");
+ assertEquals("default", hiveConn.getSchema());
+
+ hiveConn.setSchema(db1);
+ assertEquals(db1, hiveConn.getSchema());
+ hiveConn.setSchema("default");
+ assertEquals("default", hiveConn.getSchema());
+
+ assertTrue(hiveConn.getCatalog().isEmpty());
+ hiveConn.setCatalog("foo");
+ assertTrue(hiveConn.getCatalog().isEmpty());
+ }
- String jdbcUri = miniHS2.getJdbcURL().substring(0, miniHS2.getJdbcURL().indexOf("default"));
+ /**
+ * verify that the current db is the one expected. first create table as <db>.tab and then
+ * describe that table to check if <db> is the current database
+ * @param expectedDbName
+ * @param hs2Conn
+ * @throws Exception
+ */
+ private void verifyCurrentDB(String expectedDbName, Connection hs2Conn) throws Exception {
+ String verifyTab = "miniHS2DbVerificationTable";
+ Statement stmt = hs2Conn.createStatement();
+ stmt.execute("DROP TABLE IF EXISTS " + expectedDbName + "." + verifyTab);
+ stmt.execute("CREATE TABLE " + expectedDbName + "." + verifyTab + "(id INT)");
+ stmt.execute("DESCRIBE " + verifyTab);
+ stmt.execute("DROP TABLE IF EXISTS " + expectedDbName + "." + verifyTab);
+ stmt.close();
+ }
- hs2Conn= getConnection(jdbcUri+"default",System.getProperty("user.name"),"bar");
- String dbName="test_connection_non_default_db";
- String tableInNonDefaultSchema="table_in_non_default_schema";
- Statement stmt = hs2Conn.createStatement();
- stmt.execute("create database if not exists "+dbName);
- stmt.close();
- hs2Conn.close();
-
- hs2Conn = getConnection(jdbcUri+dbName,System.getProperty("user.name"),"bar");
- stmt = hs2Conn .createStatement();
- boolean expected = stmt.execute(" create table "+tableInNonDefaultSchema +" (x int)");
- stmt.close();
- hs2Conn .close();
-
- hs2Conn = getConnection(jdbcUri+dbName,System.getProperty("user.name"),"bar");
- stmt = hs2Conn .createStatement();
- ResultSet res = stmt.executeQuery("show tables");
- boolean testTableExists = false;
- while (res.next()) {
- assertNotNull("table name is null in result set", res.getString(1));
- if (tableInNonDefaultSchema.equalsIgnoreCase(res.getString(1))) {
- testTableExists = true;
- }
- }
- assertTrue("table name "+tableInNonDefaultSchema
- + " found in SHOW TABLES result set", testTableExists);
- stmt.close();
- hs2Conn .close();
-
- hs2Conn = getConnection(jdbcUri+"default",System.getProperty("user.name"),"bar");
- stmt = hs2Conn .createStatement();
- res = stmt.executeQuery("show tables");
- testTableExists = false;
- while (res.next()) {
- assertNotNull("table name is null in result set", res.getString(1));
- if (tableInNonDefaultSchema.equalsIgnoreCase(res.getString(1))) {
- testTableExists = true;
- }
- }
-
- assertFalse("table name "+tableInNonDefaultSchema
- + " NOT found in SHOW TABLES result set", testTableExists);
- stmt.close();
- hs2Conn .close();
-
- hs2Conn = getConnection(jdbcUri+dbName,System.getProperty("user.name"),"bar");
- stmt = hs2Conn .createStatement();
- stmt.execute("set hive.support.concurrency = false");
- res = stmt.executeQuery("show tables");
-
- stmt.execute(" drop table if exists table_in_non_default_schema");
- expected = stmt.execute("DROP DATABASE "+ dbName);
- stmt.close();
-
- hs2Conn = getConnection(jdbcUri+"default",System.getProperty("user.name"),"bar");
- stmt = hs2Conn .createStatement();
- res = stmt.executeQuery("show tables");
- testTableExists = false;
- while (res.next()) {
- assertNotNull("table name is null in result set", res.getString(1));
- if (tableInNonDefaultSchema.equalsIgnoreCase(res.getString(1))) {
- testTableExists = true;
- }
- }
-
- // test URI with no dbName
- hs2Conn = getConnection(jdbcUri, System.getProperty("user.name"),"bar");
- verifyCurrentDB("default", hs2Conn);
- hs2Conn.close();
-
- hs2Conn = getConnection(jdbcUri + ";", System.getProperty("user.name"),"bar");
- verifyCurrentDB("default", hs2Conn);
- hs2Conn.close();
-
- hs2Conn = getConnection(jdbcUri + ";/foo=bar;foo1=bar1", System.getProperty("user.name"),"bar");
- verifyCurrentDB("default", hs2Conn);
- hs2Conn.close();
- }
-
- @Test
- public void testConnectionSchemaAPIs() throws Exception {
- String db1 = "DB1";
- /**
- * get/set Schema are new in JDK7 and not available in java.sql.Connection in JDK6.
- * Hence the test uses HiveConnection object to call these methods so that test will run with older JDKs
- */
- HiveConnection hiveConn = (HiveConnection)hs2Conn;
-
- assertEquals("default", hiveConn.getSchema());
- Statement stmt = hs2Conn.createStatement();
- stmt.execute("DROP DATABASE IF EXISTS " + db1 + " CASCADE");
- stmt.execute("CREATE DATABASE " + db1);
- assertEquals("default", hiveConn.getSchema());
-
- stmt.execute("USE " + db1);
- assertEquals(db1, hiveConn.getSchema());
-
- stmt.execute("USE default");
- assertEquals("default", hiveConn.getSchema());
-
- hiveConn.setSchema(db1);
- assertEquals(db1, hiveConn.getSchema());
- hiveConn.setSchema("default");
- assertEquals("default", hiveConn.getSchema());
-
- assertTrue(hiveConn.getCatalog().isEmpty());
- hiveConn.setCatalog("foo");
- assertTrue(hiveConn.getCatalog().isEmpty());
- }
-
- /**
- * verify that the current db is the one expected. first create table as <db>.tab and then
- * describe that table to check if <db> is the current database
- * @param expectedDbName
- * @param hs2Conn
- * @throws Exception
- */
- private void verifyCurrentDB(String expectedDbName, Connection hs2Conn) throws Exception {
- String verifyTab = "miniHS2DbVerificationTable";
- Statement stmt = hs2Conn.createStatement();
- stmt.execute("DROP TABLE IF EXISTS " + expectedDbName + "." + verifyTab);
- stmt.execute("CREATE TABLE " + expectedDbName + "." + verifyTab + "(id INT)");
- stmt.execute("DESCRIBE " + verifyTab);
- stmt.execute("DROP TABLE IF EXISTS " + expectedDbName + "." + verifyTab);
- stmt.close();
- }
-
- /**
- * This method tests whether while creating a new connection, the config
- * variables specified in the JDBC URI are properly set for the connection.
- * This is a test for HiveConnection#configureConnection.
- *
- * @throws Exception
- */
- @Test
- public void testNewConnectionConfiguration() throws Exception {
-
- // Set some conf parameters
- String hiveConf = "hive.cli.print.header=true;hive.server2.async.exec.shutdown.timeout=20;"
- + "hive.server2.async.exec.threads=30;hive.server2.thrift.http.max.worker.threads=15";
- // Set some conf vars
- String hiveVar = "stab=salesTable;icol=customerID";
- String jdbcUri = miniHS2.getJdbcURL() + "?" + hiveConf + "#" + hiveVar;
-
- // Open a new connection with these conf & vars
- Connection con1 = DriverManager.getConnection(jdbcUri);
-
- // Execute "set" command and retrieve values for the conf & vars specified
- // above
- // Assert values retrieved
- Statement stmt = con1.createStatement();
-
- // Verify that the property has been properly set while creating the
- // connection above
- verifyConfProperty(stmt, "hive.cli.print.header", "true");
- verifyConfProperty(stmt, "hive.server2.async.exec.shutdown.timeout", "20");
- verifyConfProperty(stmt, "hive.server2.async.exec.threads", "30");
- verifyConfProperty(stmt, "hive.server2.thrift.http.max.worker.threads",
- "15");
- verifyConfProperty(stmt, "stab", "salesTable");
- verifyConfProperty(stmt, "icol", "customerID");
- con1.close();
- }
-
- private void verifyConfProperty(Statement stmt, String property,
- String expectedValue) throws Exception {
- ResultSet res = stmt.executeQuery("set " + property);
- while (res.next()) {
- String resultValues[] = res.getString(1).split("=");
- assertEquals(resultValues[1], expectedValue);
- }
- }
+ /**
+ * This method tests whether while creating a new connection, the config
+ * variables specified in the JDBC URI are properly set for the connection.
+ * This is a test for HiveConnection#configureConnection.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testNewConnectionConfiguration() throws Exception {
+
+ // Set some conf parameters
+ String hiveConf = "hive.cli.print.header=true;hive.server2.async.exec.shutdown.timeout=20;"
+ + "hive.server2.async.exec.threads=30;hive.server2.thrift.http.max.worker.threads=15";
+ // Set some conf vars
+ String hiveVar = "stab=salesTable;icol=customerID";
+ String jdbcUri = miniHS2.getJdbcURL() + "?" + hiveConf + "#" + hiveVar;
+
+ // Open a new connection with these conf & vars
+ Connection con1 = DriverManager.getConnection(jdbcUri);
+
+ // Execute "set" command and retrieve values for the conf & vars specified
+ // above
+ // Assert values retrieved
+ Statement stmt = con1.createStatement();
+
+ // Verify that the property has been properly set while creating the
+ // connection above
+ verifyConfProperty(stmt, "hive.cli.print.header", "true");
+ verifyConfProperty(stmt, "hive.server2.async.exec.shutdown.timeout", "20");
+ verifyConfProperty(stmt, "hive.server2.async.exec.threads", "30");
+ verifyConfProperty(stmt, "hive.server2.thrift.http.max.worker.threads",
+ "15");
+ verifyConfProperty(stmt, "stab", "salesTable");
+ verifyConfProperty(stmt, "icol", "customerID");
+ con1.close();
+ }
+
+ private void verifyConfProperty(Statement stmt, String property,
+ String expectedValue) throws Exception {
+ ResultSet res = stmt.executeQuery("set " + property);
+ while (res.next()) {
+ String resultValues[] = res.getString(1).split("=");
+ assertEquals(resultValues[1], expectedValue);
+ }
+ }
+
+ /**
+ * Tests the creation of the 3 scratch dirs: hdfs, local, downloaded resources (which is also local).
+ * 1. Test with doAs=false: open a new JDBC session and verify the presence of directories/permissions
+ * 2. Test with doAs=true: open a new JDBC session and verify the presence of directories/permissions
+ * @throws Exception
+ */
+ @Test
+ public void testScratchDirs() throws Exception {
+ // Stop HiveServer2
+ if (miniHS2.isStarted()) {
+ miniHS2.stop();
+ }
+ HiveConf conf = new HiveConf();
+ String userName;
+ Path scratchDirPath;
+ // 1. Test with doAs=false
+ conf.setBoolean("hive.server2.enable.doAs", false);
+ // Set a custom prefix for hdfs scratch dir path
+ conf.set("hive.exec.scratchdir", "/tmp/hs2");
+ // Set a scratch dir permission
+ String fsPermissionStr = "700";
+ conf.set("hive.scratch.dir.permission", fsPermissionStr);
+ // Start an instance of HiveServer2 which uses miniMR
+ miniHS2 = new MiniHS2(conf);
+ Map<String, String> confOverlay = new HashMap<String, String>();
+ miniHS2.start(confOverlay);
+ userName = System.getProperty("user.name");
+ hs2Conn = getConnection(miniHS2.getJdbcURL(), userName, "password");
+ // FS
+ FileSystem fs = miniHS2.getLocalFS();
+
+ // Verify scratch dir paths and permission
+ // HDFS scratch dir
+ scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR) + "/" + userName);
+ verifyScratchDir(conf, fs, scratchDirPath, userName, false);
+
+ // Local scratch dir
+ scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR));
+ verifyScratchDir(conf, fs, scratchDirPath, userName, true);
+
+ // Downloaded resources dir
+ scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR));
+ verifyScratchDir(conf, fs, scratchDirPath, userName, true);
+
+ // 2. Test with doAs=true
+ // Restart HiveServer2 with doAs=true
+ if (miniHS2.isStarted()) {
+ miniHS2.stop();
+ }
+ conf.setBoolean("hive.server2.enable.doAs", true);
+ // Start HS2
+ miniHS2 = new MiniHS2(conf);
+ miniHS2.start(confOverlay);
+ // Test for user "neo"
+ userName = "neo";
+ hs2Conn = getConnection(miniHS2.getJdbcURL(), userName, "the-one");
+
+ // Verify scratch dir paths and permission
+ // HDFS scratch dir
+ scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR) + "/" + userName);
+ verifyScratchDir(conf, fs, scratchDirPath, userName, false);
+
+ // Local scratch dir
+ scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR));
+ verifyScratchDir(conf, fs, scratchDirPath, userName, true);
+
+ // Downloaded resources dir
+ scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR));
+ verifyScratchDir(conf, fs, scratchDirPath, userName, true);
+
+ // Test for user "trinity"
+ userName = "trinity";
+ hs2Conn = getConnection(miniHS2.getJdbcURL(), userName, "the-one");
+
+ // Verify scratch dir paths and permission
+ // HDFS scratch dir
+ scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR) + "/" + userName);
+ verifyScratchDir(conf, fs, scratchDirPath, userName, false);
+
+ // Local scratch dir
+ scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR));
+ verifyScratchDir(conf, fs, scratchDirPath, userName, true);
+
+ // Downloaded resources dir
+ scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR));
+ verifyScratchDir(conf, fs, scratchDirPath, userName, true);
+ }
+
+ private void verifyScratchDir(HiveConf conf, FileSystem fs, Path scratchDirPath,
+ String userName, boolean isLocal) throws Exception {
+ String dirType = isLocal ? "Local" : "DFS";
+ FsPermission expectedFSPermission = new FsPermission(HiveConf.getVar(conf,
+ HiveConf.ConfVars.SCRATCHDIRPERMISSION));
+ assertTrue("The expected " + dirType + " scratch dir does not exist for the user: " +
+ userName, fs.exists(scratchDirPath));
+ if (fs.exists(scratchDirPath) && !isLocal) {
+ assertEquals("DFS scratch dir permissions don't match", expectedFSPermission,
+ fs.getFileStatus(scratchDirPath).getPermission());
+ }
}
+}
\ No newline at end of file
Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniMr.java Mon Sep 8 04:38:17 2014
@@ -19,16 +19,16 @@
package org.apache.hive.jdbc;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
-import java.util.Map;
-import java.util.HashMap;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
+import java.util.HashMap;
+import java.util.Map;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -48,11 +48,11 @@ public class TestJdbcWithMiniMr {
public static final String TEST_TAG = "miniHS2.miniMr.tag";
public static final String TEST_TAG_VALUE = "miniHS2.miniMr.value";
public static class MiniMrTestSessionHook implements HiveSessionHook {
- @Override
- public void run(HiveSessionHookContext sessionHookContext) throws HiveSQLException {
- sessionHookContext.getSessionConf().set(TEST_TAG, TEST_TAG_VALUE);
- }
- }
+ @Override
+ public void run(HiveSessionHookContext sessionHookContext) throws HiveSQLException {
+ sessionHookContext.getSessionConf().set(TEST_TAG, TEST_TAG_VALUE);
+ }
+ }
private static MiniHS2 miniHS2 = null;
private static HiveConf conf;
@@ -93,7 +93,7 @@ public class TestJdbcWithMiniMr {
@Before
public void setUp() throws Exception {
hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(dbName),
- System.getProperty("user.name"), "bar");
+ System.getProperty("user.name"), "bar");
stmt = hs2Conn.createStatement();
stmt.execute("USE " + dbName);
}
@@ -225,7 +225,7 @@ public class TestJdbcWithMiniMr {
String queryStr = "SELECT * FROM " + tempTableName +
" where value = '" + resultVal + "'";
verifyResult(queryStr, resultVal, 2);
-
+
// A second connection should not be able to see the table
Connection conn2 = DriverManager.getConnection(miniHS2.getJdbcURL(dbName),
System.getProperty("user.name"), "bar");
Modified: hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/service/auth/TestCustomAuthentication.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/service/auth/TestCustomAuthentication.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/service/auth/TestCustomAuthentication.java (original)
+++ hive/branches/spark/itests/hive-unit/src/test/java/org/apache/hive/service/auth/TestCustomAuthentication.java Mon Sep 8 04:38:17 2014
@@ -18,7 +18,6 @@
package org.apache.hive.service.auth;
import junit.framework.Assert;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hive.service.server.HiveServer2;
import org.junit.AfterClass;
Modified: hive/branches/spark/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/src/test/resources/testconfiguration.properties?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/branches/spark/itests/src/test/resources/testconfiguration.properties Mon Sep 8 04:38:17 2014
@@ -44,6 +44,7 @@ minimr.query.files=auto_sortmerge_join_1
stats_counter_partitioned.q,\
temp_table_external.q,\
truncate_column_buckets.q,\
+ uber_reduce.q,\
udf_using.q
minitez.query.files.shared=alter_merge_2_orc.q,\
@@ -100,6 +101,7 @@ minitez.query.files.shared=alter_merge_2
script_env_var2.q,\
script_pipe.q,\
scriptfile1.q,\
+ select_dummy_source.q,\
stats_counter.q,\
stats_counter_partitioned.q,\
stats_noscan_1.q,\
@@ -138,8 +140,11 @@ minitez.query.files.shared=alter_merge_2
minitez.query.files=bucket_map_join_tez1.q,\
bucket_map_join_tez2.q,\
+ dynamic_partition_pruning.q,\
+ dynamic_partition_pruning_2.q,\
mapjoin_decimal.q,\
mrr.q,\
+ tez_bmj_schema_evolution.q,\
tez_dml.q,\
tez_fsstat.q,\
tez_insert_overwrite_local_directory_1.q,\
Modified: hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/branches/spark/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java Mon Sep 8 04:38:17 2014
@@ -43,7 +43,6 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Deque;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
@@ -64,10 +63,10 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.apache.hadoop.hive.cli.CliDriver;
import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.common.io.CachingPrintStream;
import org.apache.hadoop.hive.common.io.DigestPrintStream;
import org.apache.hadoop.hive.common.io.SortAndDigestPrintStream;
import org.apache.hadoop.hive.common.io.SortPrintStream;
-import org.apache.hadoop.hive.common.io.CachingPrintStream;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
@@ -75,8 +74,6 @@ import org.apache.hadoop.hive.metastore.
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.vector.util.AllVectorTypesRecord;
-import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -87,22 +84,14 @@ import org.apache.hadoop.hive.ql.parse.P
import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer;
-import org.apache.hadoop.hive.serde2.thrift.test.Complex;
import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.mapred.SequenceFileInputFormat;
-import org.apache.hadoop.mapred.SequenceFileOutputFormat;
-import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.util.Shell;
import org.apache.hive.common.util.StreamPrinter;
-import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.tools.ant.BuildException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
-import org.junit.Assume;
import com.google.common.collect.ImmutableList;
@@ -145,8 +134,8 @@ public class QTestUtil {
private QTestSetup setup = null;
private boolean isSessionStateStarted = false;
- private String initScript;
- private String cleanupScript;
+ private final String initScript;
+ private final String cleanupScript;
static {
for (String srcTable : System.getProperty("test.src.tables", "").trim().split(",")) {
@@ -332,14 +321,6 @@ public class QTestUtil {
HadoopShims shims = ShimLoader.getHadoopShims();
int numberOfDataNodes = 4;
- // can run tez tests only on hadoop 2
- if (clusterType == MiniClusterType.tez) {
- Assume.assumeTrue(ShimLoader.getMajorVersion().equals("0.23"));
- // this is necessary temporarily - there's a probem with multi datanodes on MiniTezCluster
- // will be fixed in 0.3
- numberOfDataNodes = 1;
- }
-
if (clusterType != MiniClusterType.none) {
dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null);
FileSystem fs = dfs.getFileSystem();
Modified: hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java (original)
+++ hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java Mon Sep 8 04:38:17 2014
@@ -160,7 +160,7 @@ public abstract class HiveBaseResultSet
}
public InputStream getBinaryStream(String columnName) throws SQLException {
- return getBinaryStream(findColumn(columnName));
+ return getBinaryStream(findColumn(columnName));
}
public Blob getBlob(int i) throws SQLException {
Modified: hive/branches/spark/metastore/if/hive_metastore.thrift
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/if/hive_metastore.thrift?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/if/hive_metastore.thrift (original)
+++ hive/branches/spark/metastore/if/hive_metastore.thrift Mon Sep 8 04:38:17 2014
@@ -273,6 +273,32 @@ struct Partition {
8: optional PrincipalPrivilegeSet privileges
}
+struct PartitionWithoutSD {
+ 1: list<string> values // string value is converted to appropriate partition key type
+ 2: i32 createTime,
+ 3: i32 lastAccessTime,
+ 4: string relativePath,
+ 5: map<string, string> parameters,
+ 6: optional PrincipalPrivilegeSet privileges
+}
+
+struct PartitionSpecWithSharedSD {
+ 1: list<PartitionWithoutSD> partitions,
+ 2: StorageDescriptor sd,
+}
+
+struct PartitionListComposingSpec {
+ 1: list<Partition> partitions
+}
+
+struct PartitionSpec {
+ 1: string dbName,
+ 2: string tableName,
+ 3: string rootPath,
+ 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec,
+ 5: optional PartitionListComposingSpec partitionList
+}
+
struct Index {
1: string indexName, // unique with in the whole database namespace
2: string indexHandlerClass, // reserved
@@ -793,6 +819,8 @@ service ThriftHiveMetastore extends fb30
3:MetaException o3)
i32 add_partitions(1:list<Partition> new_parts)
throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+ i32 add_partitions_pspec(1:list<PartitionSpec> new_parts)
+ throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
Partition append_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
AddPartitionsResult add_partitions_req(1:AddPartitionsRequest request)
@@ -838,6 +866,9 @@ service ThriftHiveMetastore extends fb30
list<Partition> get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1,
4: string user_name, 5: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ list<PartitionSpec> get_partitions_pspec(1:string db_name, 2:string tbl_name, 3:i32 max_parts=-1)
+ throws(1:NoSuchObjectException o1, 2:MetaException o2)
+
list<string> get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
throws(1:MetaException o2)
@@ -862,6 +893,11 @@ service ThriftHiveMetastore extends fb30
3:string filter, 4:i16 max_parts=-1)
throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ // List partitions as PartitionSpec instances.
+ list<PartitionSpec> get_part_specs_by_filter(1:string db_name 2:string tbl_name
+ 3:string filter, 4:i32 max_parts=-1)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
// get the partitions matching the given partition filter
// unlike get_partitions_by_filter, takes serialized hive expression, and with that can work
// with any filter (get_partitions_by_filter only works if the filter can be pushed down to JDOQL.
Modified: hive/branches/spark/metastore/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/pom.xml?rev=1623263&r1=1623262&r2=1623263&view=diff
==============================================================================
--- hive/branches/spark/metastore/pom.xml (original)
+++ hive/branches/spark/metastore/pom.xml Mon Sep 8 04:38:17 2014
@@ -165,6 +165,39 @@
</dependency>
</dependencies>
</profile>
+ <profile>
+ <id>thriftif</id>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>com.google.code.maven-replacer-plugin</groupId>
+ <artifactId>replacer</artifactId>
+ <version>1.5.3</version>
+ <executions>
+ <execution>
+ <id>process-thrift-sources</id>
+ <phase>process-sources</phase>
+ <goals>
+ <goal>replace</goal>
+ </goals>
+ </execution>
+ </executions>
+ <configuration>
+ <basedir>${basedir}/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/</basedir>
+ <includes>
+ <include>FieldSchema.java</include>
+ <include>Partition.java</include>
+ <include>SerDeInfo.java</include>
+ <include>StorageDescriptor.java</include>
+ </includes>
+ <tokenValueMap>${basedir}/src/main/resources/thrift-replacements.txt</tokenValueMap>
+ <regex>true</regex>
+ <quiet>false</quiet>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
</profiles>
<build>