You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by om...@apache.org on 2015/11/20 22:46:51 UTC
[11/12] hive git commit: HIVE-12319 : Remove
HadoopShims::getHadoopConfNames() (Aleksei Statkevich via Ashutosh Chauhan)
HIVE-12319 : Remove HadoopShims::getHadoopConfNames() (Aleksei Statkevich via Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b9650be8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b9650be8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b9650be8
Branch: refs/heads/master-fixed
Commit: b9650be8e48da18b357d502c4d4be2700d956469
Parents: 82705f0
Author: Aleksei Statkevich <me...@gmail.com>
Authored: Sun Nov 15 20:55:00 2015 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Fri Nov 20 13:46:23 2015 -0800
----------------------------------------------------------------------
.../org/apache/hadoop/hive/conf/HiveConf.java | 21 ++++++-----------
.../metrics/metrics2/TestCodahaleMetrics.java | 6 ++---
.../rcfile/RCFileMapReduceInputFormat.java | 8 +++----
.../rcfile/TestRCFileMapReduceInputFormat.java | 4 ++--
...estDDLWithRemoteMetastoreSecondNamenode.java | 3 ++-
.../org/apache/hadoop/hive/ql/QTestUtil.java | 3 ++-
.../java/org/apache/hadoop/hive/ql/Driver.java | 7 +++---
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 5 ++--
.../hadoop/hive/ql/exec/FetchOperator.java | 3 ++-
.../apache/hadoop/hive/ql/exec/Utilities.java | 6 ++---
.../hadoop/hive/ql/exec/mr/ExecDriver.java | 8 +++----
.../hive/ql/exec/persistence/RowContainer.java | 8 ++++---
.../hadoop/hive/ql/exec/tez/DagUtils.java | 2 +-
.../hive/ql/exec/tez/HiveSplitGenerator.java | 15 ++----------
.../hive/ql/io/CombineHiveInputFormat.java | 7 +++---
.../hadoop/hive/ql/io/HiveFileFormatUtils.java | 6 ++---
.../hadoop/hive/ql/io/HiveInputFormat.java | 5 +---
.../hadoop/hive/ql/io/merge/MergeFileTask.java | 6 ++---
.../hadoop/hive/ql/io/orc/OrcInputFormat.java | 8 ++-----
.../ql/io/rcfile/stats/PartialScanTask.java | 6 ++---
.../rcfile/truncate/ColumnTruncateMapper.java | 4 ++--
.../io/rcfile/truncate/ColumnTruncateTask.java | 7 +++---
.../hadoop/hive/ql/exec/TestOperators.java | 5 ++--
.../apache/hadoop/hive/ql/io/TestRCFile.java | 9 ++------
.../hive/ql/io/orc/TestInputOutputFormat.java | 20 ++++++++--------
.../hive/ql/io/orc/TestOrcSplitElimination.java | 14 ++++++------
.../apache/hadoop/hive/shims/Hadoop23Shims.java | 24 --------------------
.../apache/hadoop/hive/shims/HadoopShims.java | 3 ---
.../hadoop/hive/shims/HadoopShimsSecure.java | 15 ++++++++----
29 files changed, 95 insertions(+), 143 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 0afb964..4f8209a 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -28,9 +28,10 @@ import org.apache.hadoop.hive.conf.Validator.RangeValidator;
import org.apache.hadoop.hive.conf.Validator.RatioValidator;
import org.apache.hadoop.hive.conf.Validator.StringSet;
import org.apache.hadoop.hive.conf.Validator.TimeValidator;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Shell;
import org.apache.hive.common.HiveCompat;
@@ -386,22 +387,14 @@ public class HiveConf extends Configuration {
HADOOPBIN("hadoop.bin.path", findHadoopBinary(), "", true),
HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem",
"The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"),
- HADOOPFS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPFS"), null, "", true),
- HADOOPMAPFILENAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPFILENAME"), null, "", true),
- HADOOPMAPREDINPUTDIR(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIR"), null, "", true),
- HADOOPMAPREDINPUTDIRRECURSIVE(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIRRECURSIVE"), false, "", true),
- MAPREDMAXSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), 256000000L, "", true),
- MAPREDMINSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"), 1L, "", true),
- MAPREDMINSPLITSIZEPERNODE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERNODE"), 1L, "", true),
- MAPREDMINSPLITSIZEPERRACK(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERRACK"), 1L, "", true),
+ MAPREDMAXSPLITSIZE(FileInputFormat.SPLIT_MAXSIZE, 256000000L, "", true),
+ MAPREDMINSPLITSIZE(FileInputFormat.SPLIT_MINSIZE, 1L, "", true),
+ MAPREDMINSPLITSIZEPERNODE(CombineFileInputFormat.SPLIT_MINSIZE_PERNODE, 1L, "", true),
+ MAPREDMINSPLITSIZEPERRACK(CombineFileInputFormat.SPLIT_MINSIZE_PERRACK, 1L, "", true),
// The number of reduce tasks per job. Hadoop sets this value to 1 by default
// By setting this property to -1, Hive will automatically determine the correct
// number of reducers.
- HADOOPNUMREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPNUMREDUCERS"), -1, "", true),
- HADOOPJOBNAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPJOBNAME"), null, "", true),
- HADOOPSPECULATIVEEXECREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPSPECULATIVEEXECREDUCERS"), true, "", true),
- MAPREDSETUPCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDSETUPCLEANUPNEEDED"), false, "", true),
- MAPREDTASKCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDTASKCLEANUPNEEDED"), false, "", true),
+ HADOOPNUMREDUCERS("mapreduce.job.reduces", -1, "", true),
// Metastore stuff. Be sure to update HiveConf.metaVars when you add something here!
METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse",
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
----------------------------------------------------------------------
diff --git a/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java b/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
index 27825b1..55bb2c2 100644
--- a/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
+++ b/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleMetrics.java
@@ -22,11 +22,10 @@ import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
import org.apache.hadoop.hive.common.metrics.common.MetricsVariable;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -57,8 +56,7 @@ public class TestCodahaleMetrics {
jsonReportFile = new File(workDir, "json_reporting");
jsonReportFile.delete();
- String defaultFsName = ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPFS");
- conf.set(defaultFsName, "local");
+ conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "local");
conf.setVar(HiveConf.ConfVars.HIVE_METRICS_CLASS, CodahaleMetrics.class.getCanonicalName());
conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name());
conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_LOCATION, jsonReportFile.toString());
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java
index 0c3d707..d5fa93c 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java
@@ -21,8 +21,8 @@ package org.apache.hive.hcatalog.rcfile;
import java.io.IOException;
import java.util.List;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.serde2.columnar.BytesRefArrayWritable;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.mapreduce.InputSplit;
@@ -44,10 +44,8 @@ public class RCFileMapReduceInputFormat<K extends LongWritable, V extends BytesR
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
-
- job.getConfiguration().setLong(
- ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"),
- SequenceFile.SYNC_INTERVAL);
+ HiveConf.setLongVar(job.getConfiguration(),
+ HiveConf.ConfVars.MAPREDMINSPLITSIZE, SequenceFile.SYNC_INTERVAL);
return super.getSplits(job);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java
index 02a13b3..bb6c582 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java
@@ -230,8 +230,8 @@ public class TestRCFileMapReduceInputFormat extends TestCase {
Configuration jonconf = new Configuration(cloneConf);
jonconf.set("mapred.input.dir", testDir.toString());
JobContext context = new Job(jonconf);
- context.getConfiguration().setLong(
- ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), maxSplitSize);
+ HiveConf.setLongVar(context.getConfiguration(),
+ HiveConf.ConfVars.MAPREDMAXSPLITSIZE, maxSplitSize);
List<InputSplit> splits = inputFormat.getSplits(context);
assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
int readCount = 0;
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
index 7ed6436..bfb25aa 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
@@ -23,6 +23,7 @@ import java.util.HashMap;
import junit.framework.JUnit4TestAdapter;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -97,7 +98,7 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
}
fs2.mkdirs(tmppathFs2);
fs2Uri = fs2.getUri().toString();
- jobConf.setVar(HiveConf.ConfVars.HADOOPFS, fs2Uri);
+ jobConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fs2Uri);
driver = new Driver(jobConf);
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index bf6bea7..ac1f15e 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -56,6 +56,7 @@ import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@@ -299,7 +300,7 @@ public class QTestUtil {
// set fs.default.name to the uri of mini-dfs
String dfsUriString = WindowsPathUtil.getHdfsUriString(dfs.getFileSystem().getUri().toString());
- conf.setVar(HiveConf.ConfVars.HADOOPFS, dfsUriString);
+ conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, dfsUriString);
// hive.metastore.warehouse.dir needs to be set relative to the mini-dfs
conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE,
(new Path(dfsUriString,
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index fbde9eb..8fafd61 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -36,6 +36,7 @@ import java.util.Set;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -1459,7 +1460,7 @@ public class Driver implements CommandProcessor {
public int execute() throws CommandNeedRetryException {
PerfLogger perfLogger = SessionState.getPerfLogger();
perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_EXECUTE);
- boolean noName = StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HADOOPJOBNAME));
+ boolean noName = StringUtils.isEmpty(conf.get(MRJobConfig.JOB_NAME));
int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
String queryId = plan.getQueryId();
@@ -1703,7 +1704,7 @@ public class Driver implements CommandProcessor {
SessionState.get().getHiveHistory().endQuery(queryId);
}
if (noName) {
- conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, "");
+ conf.set(MRJobConfig.JOB_NAME, "");
}
dumpMetaCallTimingWithoutEx("execution");
perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_EXECUTE);
@@ -1786,7 +1787,7 @@ public class Driver implements CommandProcessor {
}
if (tsk.isMapRedTask() && !(tsk instanceof ConditionalTask)) {
if (noName) {
- conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, jobname + "(" + tsk.getId() + ")");
+ conf.set(MRJobConfig.JOB_NAME, jobname + "(" + tsk.getId() + ")");
}
conf.set("mapreduce.workflow.node.name", tsk.getId());
Utilities.setWorkflowAdjacencies(conf, plan);
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 9ab3e98..a210b95 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.exec;
import com.google.common.collect.Iterables;
import org.apache.commons.lang.StringEscapeUtils;
import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -1368,7 +1369,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
String jobname = String.format("Archiving %s@%s",
tbl.getTableName(), partSpecInfo.getName());
jobname = Utilities.abbreviate(jobname, maxJobNameLen - 6);
- conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, jobname);
+ conf.set(MRJobConfig.JOB_NAME, jobname);
HadoopArchives har = new HadoopArchives(conf);
List<String> args = new ArrayList<String>();
@@ -1378,7 +1379,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
args.add(originalDir.toString());
args.add(tmpPath.toString());
- ret = ToolRunner.run(har, args.toArray(new String[0]));;
+ ret = ToolRunner.run(har, args.toArray(new String[0]));
} catch (Exception e) {
throw new HiveException(e);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index ad36093..3aafe89 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.StringUtils;
import org.apache.hive.common.util.AnnotationUtils;
import org.apache.hive.common.util.ReflectionUtil;
@@ -648,7 +649,7 @@ public class FetchOperator implements Serializable {
* @return list of file status entries
*/
private FileStatus[] listStatusUnderPath(FileSystem fs, Path p) throws IOException {
- boolean recursive = HiveConf.getBoolVar(job, HiveConf.ConfVars.HADOOPMAPREDINPUTDIRRECURSIVE);
+ boolean recursive = job.getBoolean(FileInputFormat.INPUT_DIR_RECURSIVE, false);
// If this is in acid format always read it recursively regardless of what the jobconf says.
if (!recursive && !AcidUtils.isAcid(p, job)) {
return fs.listStatus(p, FileUtils.HIDDEN_FILES_PATH_FILTER);
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 0700e2f..9dbb45a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -75,7 +75,6 @@ import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.zip.Deflater;
@@ -92,6 +91,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -207,7 +207,6 @@ import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Shell;
import org.apache.hive.common.util.ReflectionUtil;
-import org.slf4j.Logger;
import com.esotericsoftware.kryo.Kryo;
import com.esotericsoftware.kryo.io.Input;
@@ -3952,7 +3951,8 @@ public final class Utilities {
* @return True/False
*/
public static boolean isDefaultNameNode(HiveConf conf) {
- return !conf.getChangedProperties().containsKey(HiveConf.ConfVars.HADOOPFS.varname);
+ return !conf.getChangedProperties().containsKey(
+ CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
}
/**
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index 380cf08..df96d8c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -31,6 +31,7 @@ import java.util.List;
import java.util.Properties;
import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@@ -275,8 +276,7 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
// Turn on speculative execution for reducers
boolean useSpeculativeExecReducers = HiveConf.getBoolVar(job,
HiveConf.ConfVars.HIVESPECULATIVEEXECREDUCERS);
- HiveConf.setBoolVar(job, HiveConf.ConfVars.HADOOPSPECULATIVEEXECREDUCERS,
- useSpeculativeExecReducers);
+ job.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, useSpeculativeExecReducers);
String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT);
@@ -316,11 +316,11 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
initializeFiles("tmpfiles", addedFiles);
}
int returnVal = 0;
- boolean noName = StringUtils.isEmpty(HiveConf.getVar(job, HiveConf.ConfVars.HADOOPJOBNAME));
+ boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
if (noName) {
// This is for a special case to ensure unit tests pass
- HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME, "JOB" + Utilities.randGen.nextInt());
+ job.set(MRJobConfig.JOB_NAME, "JOB" + Utilities.randGen.nextInt());
}
String addedArchives = HiveConf.getVar(job, HiveConf.ConfVars.HIVEADDEDARCHIVES);
// Transfer HIVEADDEDARCHIVES to "tmparchives" so hadoop understands it
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
index c2d0d68..358f692 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
@@ -23,13 +23,14 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
@@ -147,7 +148,8 @@ public class RowContainer<ROW extends List<Object>>
private JobConf getLocalFSJobConfClone(Configuration jc) {
if (this.jobCloneUsingLocalFs == null) {
this.jobCloneUsingLocalFs = new JobConf(jc);
- HiveConf.setVar(jobCloneUsingLocalFs, HiveConf.ConfVars.HADOOPFS, Utilities.HADOOP_LOCAL_FS);
+ jobCloneUsingLocalFs.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
+ Utilities.HADOOP_LOCAL_FS);
}
return this.jobCloneUsingLocalFs;
}
@@ -217,7 +219,7 @@ public class RowContainer<ROW extends List<Object>>
tblDesc.getInputFileFormatClass(), localJc);
}
- HiveConf.setVar(localJc, HiveConf.ConfVars.HADOOPMAPREDINPUTDIR,
+ localJc.set(FileInputFormat.INPUT_DIR,
org.apache.hadoop.util.StringUtils.escapeString(parentFile.getAbsolutePath()));
inputSplits = inputFormat.getSplits(localJc, 1);
actualSplitNum = inputSplits.length;
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 6dcfe8d..db4d73d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -665,7 +665,7 @@ public class DagUtils {
boolean useSpeculativeExecReducers = HiveConf.getBoolVar(conf,
HiveConf.ConfVars.HIVESPECULATIVEEXECREDUCERS);
- HiveConf.setBoolVar(conf, HiveConf.ConfVars.HADOOPSPECULATIVEEXECREDUCERS,
+ conf.setBoolean(org.apache.hadoop.mapreduce.MRJobConfig.REDUCE_SPECULATIVE,
useSpeculativeExecReducers);
return conf;
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
index 8902acb..532d242 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hive.ql.plan.MapWork;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
@@ -81,16 +80,6 @@ public class HiveSplitGenerator extends InputInitializer {
private final MapWork work;
private final SplitGrouper splitGrouper = new SplitGrouper();
- private static final String MIN_SPLIT_SIZE;
- @SuppressWarnings("unused")
- private static final String MAX_SPLIT_SIZE;
-
- static {
- final HadoopShims SHIMS = ShimLoader.getHadoopShims();
- MIN_SPLIT_SIZE = SHIMS.getHadoopConfNames().get("MAPREDMINSPLITSIZE");
- MAX_SPLIT_SIZE = SHIMS.getHadoopConfNames().get("MAPREDMAXSPLITSIZE");
- }
-
public HiveSplitGenerator(InputInitializerContext initializerContext) throws IOException,
SerDeException {
super(initializerContext);
@@ -143,7 +132,7 @@ public class HiveSplitGenerator extends InputInitializer {
int taskResource = getContext().getVertexTaskResource().getMemory();
int availableSlots = totalResource / taskResource;
- if (conf.getLong(MIN_SPLIT_SIZE, 1) <= 1) {
+ if (HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1) <= 1) {
// broken configuration from mapred-default.xml
final long blockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
@@ -151,7 +140,7 @@ public class HiveSplitGenerator extends InputInitializer {
TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE,
TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE_DEFAULT);
final long preferredSplitSize = Math.min(blockSize / 2, minGrouping);
- jobConf.setLong(MIN_SPLIT_SIZE, preferredSplitSize);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, preferredSplitSize);
LOG.info("The preferred split size is " + preferredSplitSize);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
index 323ac43..a598ccc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.Operator;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.log.PerfLogger;
@@ -507,11 +506,11 @@ public class CombineHiveInputFormat<K extends WritableComparable, V extends Writ
}
// Store the previous value for the path specification
- String oldPaths = job.get(HiveConf.ConfVars.HADOOPMAPREDINPUTDIR.varname);
+ String oldPaths = job.get(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR);
if (LOG.isDebugEnabled()) {
LOG.debug("The received input paths are: [" + oldPaths +
"] against the property "
- + HiveConf.ConfVars.HADOOPMAPREDINPUTDIR.varname);
+ + org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR);
}
// Process the normal splits
@@ -540,7 +539,7 @@ public class CombineHiveInputFormat<K extends WritableComparable, V extends Writ
// This is just to prevent incompatibilities with previous versions Hive
// if some application depends on the original value being set.
if (oldPaths != null) {
- job.set(HiveConf.ConfVars.HADOOPMAPREDINPUTDIR.varname, oldPaths);
+ job.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR, oldPaths);
}
// clear work from ThreadLocal after splits generated in case of thread is reused in pool.
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
index bc13862..8b229af 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hive.ql.plan.OperatorDesc;
import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.compress.CompressionCodec;
@@ -63,6 +62,7 @@ import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.TaskAttemptContext;
import org.apache.hadoop.mapred.TextInputFormat;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.util.Shell;
import org.apache.hive.common.util.ReflectionUtil;
import org.slf4j.Logger;
@@ -567,10 +567,10 @@ public final class HiveFileFormatUtils {
// option to bypass job setup and cleanup was introduced in hadoop-21 (MAPREDUCE-463)
// but can be backported. So we disable setup/cleanup in all versions >= 0.19
- conf.setBoolean(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDSETUPCLEANUPNEEDED"), false);
+ conf.setBoolean(MRJobConfig.SETUP_CLEANUP_NEEDED, false);
// option to bypass task cleanup task was introduced in hadoop-23 (MAPREDUCE-2206)
// but can be backported. So we disable setup/cleanup in all versions >= 0.19
- conf.setBoolean(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDTASKCLEANUPNEEDED"), false);
+ conf.setBoolean(MRJobConfig.TASK_CLEANUP_NEEDED, false);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
index b63deb2..3feab1a 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.ql.plan.TableScanDesc;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.FileInputFormat;
@@ -360,9 +359,7 @@ public class HiveInputFormat<K extends WritableComparable, V extends Writable>
footerCount = Utilities.getFooterCount(table, conf);
if (headerCount != 0 || footerCount != 0) {
// Input file has header or footer, cannot be splitted.
- conf.setLong(
- ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"),
- Long.MAX_VALUE);
+ HiveConf.setLongVar(conf, ConfVars.MAPREDMINSPLITSIZE, Long.MAX_VALUE);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java
index 2f09014..7453145 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import java.io.IOException;
import java.io.Serializable;
@@ -106,8 +107,7 @@ public class MergeFileTask extends Task<MergeFileWork> implements Serializable,
}
// set job name
- boolean noName = StringUtils.isEmpty(HiveConf.getVar(job,
- HiveConf.ConfVars.HADOOPJOBNAME));
+ boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
String jobName = null;
if (noName && this.getQueryPlan() != null) {
@@ -118,7 +118,7 @@ public class MergeFileTask extends Task<MergeFileWork> implements Serializable,
if (noName) {
// This is for a special case to ensure unit tests pass
- HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME,
+ job.set(MRJobConfig.JOB_NAME,
jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 714af23..0567cbe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -129,10 +129,6 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
private static final Logger LOG = LoggerFactory.getLogger(OrcInputFormat.class);
private static boolean isDebugEnabled = LOG.isDebugEnabled();
static final HadoopShims SHIMS = ShimLoader.getHadoopShims();
- static final String MIN_SPLIT_SIZE =
- SHIMS.getHadoopConfNames().get("MAPREDMINSPLITSIZE");
- static final String MAX_SPLIT_SIZE =
- SHIMS.getHadoopConfNames().get("MAPREDMAXSPLITSIZE");
private static final long DEFAULT_MIN_SPLIT_SIZE = 16 * 1024 * 1024;
private static final long DEFAULT_MAX_SPLIT_SIZE = 256 * 1024 * 1024;
@@ -497,8 +493,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
Context(Configuration conf, final int minSplits) {
this.conf = conf;
this.sarg = ConvertAstToSearchArg.createFromConf(conf);
- minSize = conf.getLong(MIN_SPLIT_SIZE, DEFAULT_MIN_SPLIT_SIZE);
- maxSize = conf.getLong(MAX_SPLIT_SIZE, DEFAULT_MAX_SPLIT_SIZE);
+ minSize = HiveConf.getLongVar(conf, ConfVars.MAPREDMINSPLITSIZE, DEFAULT_MIN_SPLIT_SIZE);
+ maxSize = HiveConf.getLongVar(conf, ConfVars.MAPREDMAXSPLITSIZE, DEFAULT_MAX_SPLIT_SIZE);
String ss = conf.get(ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname);
if (ss == null || ss.equals(SplitStrategyKind.HYBRID.name())) {
splitStrategyKind = SplitStrategyKind.HYBRID;
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
index fd04fb5..188e9a6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
@@ -157,8 +158,7 @@ public class PartialScanTask extends Task<PartialScanWork> implements
int returnVal = 0;
RunningJob rj = null;
- boolean noName = StringUtils.isEmpty(HiveConf.getVar(job,
- HiveConf.ConfVars.HADOOPJOBNAME));
+ boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
String jobName = null;
if (noName && this.getQueryPlan() != null) {
@@ -169,7 +169,7 @@ public class PartialScanTask extends Task<PartialScanWork> implements
if (noName) {
// This is for a special case to ensure unit tests pass
- HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME,
+ job.set(MRJobConfig.JOB_NAME,
jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java
index 34a18cb..bd537cd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java
@@ -20,12 +20,12 @@ package org.apache.hadoop.hive.ql.io.rcfile.truncate;
import java.io.IOException;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.RCFile;
import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
@@ -95,7 +95,7 @@ public class ColumnTruncateMapper extends MapReduceBase implements
String taskId = Utilities.getTaskId(jc);
this.tmpPath = tmpPath;
this.taskTmpPath = taskTmpPath;
- String inputFile = HiveConf.getVar(jc, HiveConf.ConfVars.HADOOPMAPFILENAME);
+ String inputFile = jc.get(MRJobConfig.MAP_INPUT_FILE);
int lastSeparator = inputFile.lastIndexOf(Path.SEPARATOR) + 1;
finalPath = new Path(tmpPath, inputFile.substring(lastSeparator));
outPath = new Path(taskTmpPath, Utilities.toTempPath(taskId));
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java
index 79b3cfa..08e3d80 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RunningJob;
+import org.apache.hadoop.mapreduce.MRJobConfig;
@SuppressWarnings( { "deprecation", "unchecked" })
public class ColumnTruncateTask extends Task<ColumnTruncateWork> implements Serializable,
@@ -142,8 +143,8 @@ public class ColumnTruncateTask extends Task<ColumnTruncateWork> implements Seri
int returnVal = 0;
RunningJob rj = null;
- boolean noName = StringUtils.isEmpty(HiveConf.getVar(job,
- HiveConf.ConfVars.HADOOPJOBNAME));
+
+ boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
String jobName = null;
if (noName && this.getQueryPlan() != null) {
@@ -154,7 +155,7 @@ public class ColumnTruncateTask extends Task<ColumnTruncateWork> implements Seri
if (noName) {
// This is for a special case to ensure unit tests pass
- HiveConf.setVar(job, HiveConf.ConfVars.HADOOPJOBNAME,
+ job.set(MRJobConfig.JOB_NAME,
jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
index 21dcf86..e18117a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.io.IOContext;
import org.apache.hadoop.hive.ql.io.IOContextMap;
import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory;
import org.apache.hadoop.hive.ql.plan.CollectDesc;
@@ -58,6 +57,7 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextInputFormat;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
@@ -287,8 +287,7 @@ public class TestOperators extends TestCase {
System.out.println("Testing Map Operator");
// initialize configuration
JobConf hconf = new JobConf(TestOperators.class);
- HiveConf.setVar(hconf, HiveConf.ConfVars.HADOOPMAPFILENAME,
- "hdfs:///testDir/testFile");
+ hconf.set(MRJobConfig.MAP_INPUT_FILE, "hdfs:///testDir/testFile");
IOContextMap.get(hconf).setInputPath(
new Path("hdfs:///testDir/testFile"));
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
index a68049f..2914194 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
@@ -59,7 +59,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
@@ -642,9 +641,7 @@ public class TestRCFile {
RCFileInputFormat inputFormat = new RCFileInputFormat();
JobConf jobconf = new JobConf(cloneConf);
jobconf.set("mapred.input.dir", testDir.toString());
- jobconf.setLong(
- ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"),
- fileLen);
+ HiveConf.setLongVar(jobconf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, fileLen);
InputSplit[] splits = inputFormat.getSplits(jobconf, 1);
RCFileRecordReader rr = new RCFileRecordReader(jobconf, (FileSplit)splits[0]);
long lastSync = 0;
@@ -711,9 +708,7 @@ public class TestRCFile {
RCFileInputFormat inputFormat = new RCFileInputFormat();
JobConf jonconf = new JobConf(cloneConf);
jonconf.set("mapred.input.dir", testDir.toString());
- jonconf.setLong(
- ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"),
- minSplitSize);
+ HiveConf.setLongVar(jonconf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, minSplitSize);
InputSplit[] splits = inputFormat.getSplits(jonconf, splitNumber);
assertEquals("splits length should be " + splitNumber, splits.length, splitNumber);
int readCount = 0;
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index b8d39d2..9f616ab 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -1080,8 +1080,8 @@ public class TestInputOutputFormat {
new MockBlock("host0", "host3-2", "host3-3"),
new MockBlock("host4-1", "host4-2", "host4-3"),
new MockBlock("host5-1", "host5-2", "host5-3")));
- conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 300);
- conf.setInt(OrcInputFormat.MIN_SPLIT_SIZE, 200);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 300);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 200);
OrcInputFormat.Context context = new OrcInputFormat.Context(conf);
OrcInputFormat.SplitGenerator splitter =
new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
@@ -1104,8 +1104,8 @@ public class TestInputOutputFormat {
assertEquals(1800, result.getStart());
assertEquals(200, result.getLength());
// test min = 0, max = 0 generates each stripe
- conf.setInt(OrcInputFormat.MIN_SPLIT_SIZE, 0);
- conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 0);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 0);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 0);
context = new OrcInputFormat.Context(conf);
splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
AcidUtils.createOriginalObj(null, fs.getFileStatus(new Path("/a/file"))), null, true,
@@ -1129,8 +1129,8 @@ public class TestInputOutputFormat {
new MockBlock("host0", "host3-2", "host3-3"),
new MockBlock("host4-1", "host4-2", "host4-3"),
new MockBlock("host5-1", "host5-2", "host5-3")));
- conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 300);
- conf.setInt(OrcInputFormat.MIN_SPLIT_SIZE, 200);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 300);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 200);
conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
OrcInputFormat.Context context = new OrcInputFormat.Context(conf);
@@ -1154,8 +1154,8 @@ public class TestInputOutputFormat {
assertEquals(41867, result.getProjectedColumnsUncompressedSize());
// test min = 0, max = 0 generates each stripe
- conf.setInt(OrcInputFormat.MIN_SPLIT_SIZE, 0);
- conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 0);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 0);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 0);
context = new OrcInputFormat.Context(conf);
splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
fs.getFileStatus(new Path("/a/file")), null, true,
@@ -1174,8 +1174,8 @@ public class TestInputOutputFormat {
}
// single split
- conf.setInt(OrcInputFormat.MIN_SPLIT_SIZE, 100000);
- conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 1000);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 1000);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 100000);
context = new OrcInputFormat.Context(conf);
splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
fs.getFileStatus(new Path("/a/file")), null, true,
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java
index 02f015c..3560c43 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java
@@ -28,6 +28,7 @@ import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -40,7 +41,6 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan;
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputFormat;
@@ -109,8 +109,8 @@ public class TestOrcSplitElimination {
100000, CompressionKind.NONE, 10000, 10000);
writeData(writer);
writer.close();
- conf.set(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"), "1000");
- conf.set(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), "5000");
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1000);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 5000);
InputFormat<?, ?> in = new OrcInputFormat();
FileInputFormat.setInputPaths(conf, testFilePath.toString());
@@ -187,8 +187,8 @@ public class TestOrcSplitElimination {
100000, CompressionKind.NONE, 10000, 10000);
writeData(writer);
writer.close();
- conf.set(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"), "1000");
- conf.set(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), "150000");
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1000);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 150000);
InputFormat<?, ?> in = new OrcInputFormat();
FileInputFormat.setInputPaths(conf, testFilePath.toString());
@@ -276,8 +276,8 @@ public class TestOrcSplitElimination {
100000, CompressionKind.NONE, 10000, 10000);
writeData(writer);
writer.close();
- conf.set(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"), "1000");
- conf.set(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), "150000");
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1000);
+ HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 150000);
InputFormat<?, ?> in = new OrcInputFormat();
FileInputFormat.setInputPaths(conf, testFilePath.toString());
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
----------------------------------------------------------------------
diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index 4da98e4..0b5c092 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -21,21 +21,16 @@ import java.io.DataInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URI;
-import java.net.URL;
import java.nio.ByteBuffer;
import java.security.AccessControlException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -1041,25 +1036,6 @@ public class Hadoop23Shims extends HadoopShimsSecure {
}
@Override
- public Map<String, String> getHadoopConfNames() {
- Map<String, String> ret = new HashMap<String, String>();
- ret.put("HADOOPFS", "fs.defaultFS");
- ret.put("HADOOPMAPFILENAME", "mapreduce.map.input.file");
- ret.put("HADOOPMAPREDINPUTDIR", "mapreduce.input.fileinputformat.inputdir");
- ret.put("HADOOPMAPREDINPUTDIRRECURSIVE", "mapreduce.input.fileinputformat.input.dir.recursive");
- ret.put("MAPREDMAXSPLITSIZE", "mapreduce.input.fileinputformat.split.maxsize");
- ret.put("MAPREDMINSPLITSIZE", "mapreduce.input.fileinputformat.split.minsize");
- ret.put("MAPREDMINSPLITSIZEPERNODE", "mapreduce.input.fileinputformat.split.minsize.per.node");
- ret.put("MAPREDMINSPLITSIZEPERRACK", "mapreduce.input.fileinputformat.split.minsize.per.rack");
- ret.put("HADOOPNUMREDUCERS", "mapreduce.job.reduces");
- ret.put("HADOOPJOBNAME", "mapreduce.job.name");
- ret.put("HADOOPSPECULATIVEEXECREDUCERS", "mapreduce.reduce.speculative");
- ret.put("MAPREDSETUPCLEANUPNEEDED", "mapreduce.job.committer.setup.cleanup.needed");
- ret.put("MAPREDTASKCLEANUPNEEDED", "mapreduce.job.committer.task.cleanup.needed");
- return ret;
- }
-
- @Override
public ZeroCopyReaderShim getZeroCopyReader(FSDataInputStream in, ByteBufferPoolShim pool) throws IOException {
if(zeroCopy) {
return ZeroCopyShims.getZeroCopyReader(in, pool);
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
index 47b3caa..f9785ec 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
@@ -27,7 +27,6 @@ import java.security.AccessControlException;
import java.security.NoSuchAlgorithmException;
import java.util.Comparator;
import java.util.List;
-import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
@@ -411,8 +410,6 @@ public interface HadoopShims {
*/
public FileSystem createProxyFileSystem(FileSystem fs, URI uri);
- public Map<String, String> getHadoopConfNames();
-
/**
* Create a shim for DFS storage policy.
*/
http://git-wip-us.apache.org/repos/asf/hive/blob/b9650be8/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
index 0a0f52d..63d48a5 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
@@ -294,18 +294,25 @@ public abstract class HadoopShimsSecure implements HadoopShims {
@Override
public CombineFileSplit[] getSplits(JobConf job, int numSplits) throws IOException {
- long minSize = job.getLong(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"), 0);
+
+ long minSize =
+ job.getLong(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.SPLIT_MINSIZE, 0);
// For backward compatibility, let the above parameter be used
- if (job.getLong(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERNODE"), 0) == 0) {
+ if (job.getLong(
+ org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat.SPLIT_MINSIZE_PERNODE,
+ 0) == 0) {
super.setMinSplitSizeNode(minSize);
}
- if (job.getLong(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERRACK"), 0) == 0) {
+ if (job.getLong(
+ org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat.SPLIT_MINSIZE_PERRACK,
+ 0) == 0) {
super.setMinSplitSizeRack(minSize);
}
- if (job.getLong(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), 0) == 0) {
+ if (job.getLong(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.SPLIT_MAXSIZE,
+ 0) == 0) {
super.setMaxSplitSize(minSize);
}