You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by gu...@apache.org on 2014/01/10 03:42:40 UTC
svn commit: r1557013 [2/2] - in /hive/trunk: ./
ant/src/org/apache/hadoop/hive/ant/
common/src/java/org/apache/hadoop/hive/common/
common/src/java/org/apache/hadoop/hive/conf/ conf/ data/conf/tez/
hbase-handler/src/test/templates/ hcatalog/webhcat/svr/...
Modified: hive/trunk/ql/src/test/templates/TestParseNegative.vm
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/templates/TestParseNegative.vm?rev=1557013&r1=1557012&r2=1557013&view=diff
==============================================================================
--- hive/trunk/ql/src/test/templates/TestParseNegative.vm (original)
+++ hive/trunk/ql/src/test/templates/TestParseNegative.vm Fri Jan 10 02:42:39 2014
@@ -25,6 +25,7 @@ import java.io.*;
import java.util.*;
import org.apache.hadoop.hive.ql.QTestUtil;
+import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
import org.apache.hadoop.hive.ql.exec.Task;
public class $className extends TestCase {
@@ -33,8 +34,10 @@ public class $className extends TestCase
private static QTestUtil qt;
static {
+
+ MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
+
try {
- boolean miniMR = "$clusterMode".equals("miniMR");
String hadoopVer = "$hadoopVersion";
qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer);
} catch (Exception e) {
Modified: hive/trunk/shims/0.23/pom.xml
URL: http://svn.apache.org/viewvc/hive/trunk/shims/0.23/pom.xml?rev=1557013&r1=1557012&r2=1557013&view=diff
==============================================================================
--- hive/trunk/shims/0.23/pom.xml (original)
+++ hive/trunk/shims/0.23/pom.xml Fri Jan 10 02:42:39 2014
@@ -87,6 +87,81 @@
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
<version>${jetty.version}</version>
+ <optional>true</optional>
</dependency>
+ <dependency>
+ <groupId>org.apache.tez</groupId>
+ <artifactId>tez-api</artifactId>
+ <version>${tez.version}</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.tez</groupId>
+ <artifactId>tez-dag</artifactId>
+ <version>${tez.version}</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency >
+ <groupId>org.apache.tez</groupId>
+ <artifactId>tez-common</artifactId>
+ <version>${tez.version}</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.tez</groupId>
+ <artifactId>tez-runtime-library</artifactId>
+ <version>${tez.version}</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.tez</groupId>
+ <artifactId>tez-mapreduce</artifactId>
+ <version>${tez.version}</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.tez</groupId>
+ <artifactId>tez-runtime-internals</artifactId>
+ <version>${tez.version}</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-hdfs</artifactId>
+ <version>${hadoop-23.version}</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-api</artifactId>
+ <version>${hadoop-23.version}</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-common</artifactId>
+ <version>${hadoop-23.version}</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-client</artifactId>
+ <version>${hadoop-23.version}</version>
+ <optional>true</optional>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.tez</groupId>
+ <artifactId>tez-tests</artifactId>
+ <version>${tez.version}</version>
+ <optional>true</optional>
+ <type>test-jar</type>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.hadoop</groupId>
+ <artifactId>hadoop-yarn-server-tests</artifactId>
+ <version>${hadoop-23.version}</version>
+ <optional>true</optional>
+ <type>test-jar</type>
+ </dependency>
</dependencies>
</project>
Modified: hive/trunk/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
URL: http://svn.apache.org/viewvc/hive/trunk/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java?rev=1557013&r1=1557012&r2=1557013&view=diff
==============================================================================
--- hive/trunk/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (original)
+++ hive/trunk/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java Fri Jan 10 02:42:39 2014
@@ -61,13 +61,15 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.mapred.lib.TotalOrderPartitioner;
import org.apache.hadoop.security.UserGroupInformation;
-
+import org.apache.tez.test.MiniTezCluster;
/**
* Implemention of shims against Hadoop 0.23.0.
*/
public class Hadoop23Shims extends HadoopShimsSecure {
+ HadoopShims.MiniDFSShim cluster = null;
+
@Override
public String getTaskAttemptLogUrl(JobConf conf,
String taskTrackerHttpAddress, String taskAttemptId)
@@ -194,6 +196,11 @@ public class Hadoop23Shims extends Hadoo
private final MiniMRCluster mr;
private final Configuration conf;
+ public MiniMrShim() {
+ mr = null;
+ conf = null;
+ }
+
public MiniMrShim(Configuration conf, int numberOfTaskTrackers,
String nameNode, int numDir) throws IOException {
this.conf = conf;
@@ -231,6 +238,73 @@ public class Hadoop23Shims extends Hadoo
}
}
+ /**
+ * Returns a shim to wrap MiniMrTez
+ */
+ public MiniMrShim getMiniTezCluster(Configuration conf, int numberOfTaskTrackers,
+ String nameNode, int numDir) throws IOException {
+ return new MiniTezShim(conf, numberOfTaskTrackers, nameNode, numDir);
+ }
+
+ /**
+ * Shim for MiniTezCluster
+ */
+ public class MiniTezShim extends Hadoop23Shims.MiniMrShim {
+
+ private final MiniTezCluster mr;
+ private final Configuration conf;
+
+ public MiniTezShim(Configuration conf, int numberOfTaskTrackers,
+ String nameNode, int numDir) throws IOException {
+
+ mr = new MiniTezCluster("hive", numberOfTaskTrackers);
+ conf.set("fs.defaultFS", nameNode);
+ conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/apps_staging_dir");
+ mr.init(conf);
+ mr.start();
+ this.conf = mr.getConfig();
+ }
+
+ @Override
+ public int getJobTrackerPort() throws UnsupportedOperationException {
+ String address = conf.get("yarn.resourcemanager.address");
+ address = StringUtils.substringAfterLast(address, ":");
+
+ if (StringUtils.isBlank(address)) {
+ throw new IllegalArgumentException("Invalid YARN resource manager port.");
+ }
+
+ return Integer.parseInt(address);
+ }
+
+ @Override
+ public void shutdown() throws IOException {
+ mr.stop();
+ }
+
+ @Override
+ public void setupConfiguration(Configuration conf) {
+ Configuration config = mr.getConfig();
+ for (Map.Entry<String, String> pair: config) {
+ conf.set(pair.getKey(), pair.getValue());
+ }
+
+ Path jarPath = new Path("hdfs:///user/hive");
+ Path hdfsPath = new Path("hdfs:///user/");
+ try {
+ FileSystem fs = cluster.getFileSystem();
+ jarPath = fs.makeQualified(jarPath);
+ conf.set("hive.jar.directory", jarPath.toString());
+ fs.mkdirs(jarPath);
+ hdfsPath = fs.makeQualified(hdfsPath);
+ conf.set("hive.user.install.directory", hdfsPath.toString());
+ fs.mkdirs(hdfsPath);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
// Don't move this code to the parent class. There's a binary
// incompatibility between hadoop 1 and 2 wrt MiniDFSCluster and we
// need to have two different shim classes even though they are
@@ -239,7 +313,8 @@ public class Hadoop23Shims extends Hadoo
int numDataNodes,
boolean format,
String[] racks) throws IOException {
- return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks));
+ cluster = new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks));
+ return cluster;
}
/**