You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2015/08/18 00:22:06 UTC
[1/7] hive git commit: HIVE-11556: HiveFilter.copy should take the
condition given as a parameter (Jesus Camacho Rodriguez,
reviewed by Ashutosh Chauhan)
Repository: hive
Updated Branches:
refs/heads/hbase-metastore 2fe60861d -> 9d9dd72a0
HIVE-11556: HiveFilter.copy should take the condition given as a parameter (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/147347a3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/147347a3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/147347a3
Branch: refs/heads/hbase-metastore
Commit: 147347a31dd021ac3a6f956c7c02a77b505bce7b
Parents: 3071ce9
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Sat Aug 15 10:49:55 2015 +0300
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Sat Aug 15 10:49:55 2015 +0300
----------------------------------------------------------------------
.../hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/147347a3/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
index de61e48..eb97bec 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
@@ -39,7 +39,7 @@ public class HiveFilter extends Filter implements HiveRelNode {
@Override
public Filter copy(RelTraitSet traitSet, RelNode input, RexNode condition) {
assert traitSet.containsIfApplicable(HiveRelNode.CONVENTION);
- return new HiveFilter(getCluster(), traitSet, input, getCondition());
+ return new HiveFilter(getCluster(), traitSet, input, condition);
}
@Override
[6/7] hive git commit: HIVE-11542 : port fileId support on shims and
splits from llap branch (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
Posted by se...@apache.org.
HIVE-11542 : port fileId support on shims and splits from llap branch (Sergey Shelukhin, reviewed by Prasanth Jayachandran)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3b6825b5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3b6825b5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3b6825b5
Branch: refs/heads/hbase-metastore
Commit: 3b6825b5b61e943e8e41743f5cbf6d640e0ebdf5
Parents: e059409
Author: Sergey Shelukhin <se...@apache.org>
Authored: Mon Aug 17 15:16:57 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Mon Aug 17 15:16:57 2015 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hive/conf/HiveConf.java | 2 +
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 183 ++++++++++++++-----
.../hadoop/hive/ql/io/orc/OrcInputFormat.java | 97 +++++++---
.../apache/hadoop/hive/ql/io/orc/OrcSplit.java | 25 ++-
.../hive/ql/txn/compactor/CompactorMR.java | 13 +-
.../hadoop/hive/ql/txn/compactor/Initiator.java | 9 +-
.../apache/hadoop/hive/ql/io/TestAcidUtils.java | 27 +--
.../hive/ql/io/orc/TestInputOutputFormat.java | 6 +-
.../hadoop/hive/shims/Hadoop20SShims.java | 11 ++
.../apache/hadoop/hive/shims/Hadoop23Shims.java | 66 +++++++
.../apache/hadoop/hive/shims/HadoopShims.java | 15 ++
11 files changed, 348 insertions(+), 106 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 9a6781b..da171b1 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1024,6 +1024,8 @@ public class HiveConf extends Configuration {
"data is read remotely (from the client or HS2 machine) and sent to all the tasks."),
HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000,
"Max cache size for keeping meta info about orc splits cached in the client."),
+ HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true,
+ "Include file ID in splits on file systems thaty support it."),
HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,
"How many threads orc should use to create splits in parallel."),
HIVE_ORC_SKIP_CORRUPT_DATA("hive.exec.orc.skip.corrupt.data", false,
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index c7e0780..30db513 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -28,6 +28,9 @@ import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId;
+
+import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.util.ArrayList;
@@ -221,7 +224,7 @@ public class AcidUtils {
* Get the list of original files.
* @return the list of original files (eg. 000000_0)
*/
- List<FileStatus> getOriginalFiles();
+ List<HdfsFileStatusWithId> getOriginalFiles();
/**
* Get the list of base and delta directories that are valid and not
@@ -423,6 +426,20 @@ public class AcidUtils {
return false;
}
+ @VisibleForTesting
+ public static Directory getAcidState(Path directory,
+ Configuration conf,
+ ValidTxnList txnList
+ ) throws IOException {
+ return getAcidState(directory, conf, txnList, false);
+ }
+
+ /** State class for getChildState; cannot modify 2 things in a method. */
+ private static class TxnBase {
+ private FileStatus status;
+ private long txn;
+ }
+
/**
* Get the ACID state of the given directory. It finds the minimal set of
* base and diff directories. Note that because major compactions don't
@@ -436,51 +453,40 @@ public class AcidUtils {
*/
public static Directory getAcidState(Path directory,
Configuration conf,
- ValidTxnList txnList
+ ValidTxnList txnList,
+ boolean useFileIds
) throws IOException {
FileSystem fs = directory.getFileSystem(conf);
- FileStatus bestBase = null;
- long bestBaseTxn = 0;
final List<ParsedDelta> deltas = new ArrayList<ParsedDelta>();
List<ParsedDelta> working = new ArrayList<ParsedDelta>();
List<FileStatus> originalDirectories = new ArrayList<FileStatus>();
final List<FileStatus> obsolete = new ArrayList<FileStatus>();
- List<FileStatus> children = SHIMS.listLocatedStatus(fs, directory,
- hiddenFileFilter);
- for(FileStatus child: children) {
- Path p = child.getPath();
- String fn = p.getName();
- if (fn.startsWith(BASE_PREFIX) && child.isDir()) {
- long txn = parseBase(p);
- if (bestBase == null) {
- bestBase = child;
- bestBaseTxn = txn;
- } else if (bestBaseTxn < txn) {
- obsolete.add(bestBase);
- bestBase = child;
- bestBaseTxn = txn;
- } else {
- obsolete.add(child);
- }
- } else if (fn.startsWith(DELTA_PREFIX) && child.isDir()) {
- ParsedDelta delta = parseDelta(child);
- if (txnList.isTxnRangeValid(delta.minTransaction,
- delta.maxTransaction) !=
- ValidTxnList.RangeResponse.NONE) {
- working.add(delta);
- }
- } else {
- // This is just the directory. We need to recurse and find the actual files. But don't
- // do this until we have determined there is no base. This saves time. Plus,
- // it is possible that the cleaner is running and removing these original files,
- // in which case recursing through them could cause us to get an error.
- originalDirectories.add(child);
+ List<HdfsFileStatusWithId> childrenWithId = null;
+ if (useFileIds) {
+ try {
+ childrenWithId = SHIMS.listLocatedHdfsStatus(fs, directory, hiddenFileFilter);
+ } catch (Throwable t) {
+ LOG.error("Failed to get files with ID; using regular API", t);
+ useFileIds = false;
+ }
+ }
+ TxnBase bestBase = new TxnBase();
+ final List<HdfsFileStatusWithId> original = new ArrayList<>();
+ if (childrenWithId != null) {
+ for (HdfsFileStatusWithId child : childrenWithId) {
+ getChildState(child.getFileStatus(), child, txnList, working,
+ originalDirectories, original, obsolete, bestBase);
+ }
+ } else {
+ List<FileStatus> children = SHIMS.listLocatedStatus(fs, directory, hiddenFileFilter);
+ for (FileStatus child : children) {
+ getChildState(
+ child, null, txnList, working, originalDirectories, original, obsolete, bestBase);
}
}
- final List<FileStatus> original = new ArrayList<FileStatus>();
- // if we have a base, the original files are obsolete.
- if (bestBase != null) {
+ // If we have a base, the original files are obsolete.
+ if (bestBase.status != null) {
// remove the entries so we don't get confused later and think we should
// use them.
original.clear();
@@ -488,12 +494,12 @@ public class AcidUtils {
// Okay, we're going to need these originals. Recurse through them and figure out what we
// really need.
for (FileStatus origDir : originalDirectories) {
- findOriginals(fs, origDir, original);
+ findOriginals(fs, origDir, original, useFileIds);
}
}
Collections.sort(working);
- long current = bestBaseTxn;
+ long current = bestBase.txn;
int lastStmtId = -1;
for(ParsedDelta next: working) {
if (next.maxTransaction > current) {
@@ -516,7 +522,7 @@ public class AcidUtils {
}
}
- final Path base = bestBase == null ? null : bestBase.getPath();
+ final Path base = bestBase.status == null ? null : bestBase.status.getPath();
LOG.debug("in directory " + directory.toUri().toString() + " base = " + base + " deltas = " +
deltas.size());
@@ -528,7 +534,7 @@ public class AcidUtils {
}
@Override
- public List<FileStatus> getOriginalFiles() {
+ public List<HdfsFileStatusWithId> getOriginalFiles() {
return original;
}
@@ -544,23 +550,100 @@ public class AcidUtils {
};
}
+ private static void getChildState(FileStatus child, HdfsFileStatusWithId childWithId,
+ ValidTxnList txnList, List<ParsedDelta> working, List<FileStatus> originalDirectories,
+ List<HdfsFileStatusWithId> original, List<FileStatus> obsolete, TxnBase bestBase) {
+ Path p = child.getPath();
+ String fn = p.getName();
+ if (fn.startsWith(BASE_PREFIX) && child.isDir()) {
+ long txn = parseBase(p);
+ if (bestBase.status == null) {
+ bestBase.status = child;
+ bestBase.txn = txn;
+ } else if (bestBase.txn < txn) {
+ obsolete.add(bestBase.status);
+ bestBase.status = child;
+ bestBase.txn = txn;
+ } else {
+ obsolete.add(child);
+ }
+ } else if (fn.startsWith(DELTA_PREFIX) && child.isDir()) {
+ ParsedDelta delta = parseDelta(child);
+ if (txnList.isTxnRangeValid(delta.minTransaction,
+ delta.maxTransaction) !=
+ ValidTxnList.RangeResponse.NONE) {
+ working.add(delta);
+ }
+ } else if (child.isDir()) {
+ // This is just the directory. We need to recurse and find the actual files. But don't
+ // do this until we have determined there is no base. This saves time. Plus,
+ // it is possible that the cleaner is running and removing these original files,
+ // in which case recursing through them could cause us to get an error.
+ originalDirectories.add(child);
+ } else {
+ original.add(createOriginalObj(childWithId, child));
+ }
+ }
+
+ public static HdfsFileStatusWithId createOriginalObj(
+ HdfsFileStatusWithId childWithId, FileStatus child) {
+ return childWithId != null ? childWithId : new HdfsFileStatusWithoutId(child);
+ }
+
+ private static class HdfsFileStatusWithoutId implements HdfsFileStatusWithId {
+ private FileStatus fs;
+
+ public HdfsFileStatusWithoutId(FileStatus fs) {
+ this.fs = fs;
+ }
+
+ @Override
+ public FileStatus getFileStatus() {
+ return fs;
+ }
+
+ @Override
+ public Long getFileId() {
+ return null;
+ }
+ }
+
/**
- * Find the original files (non-ACID layout) recursively under the partition
- * directory.
+ * Find the original files (non-ACID layout) recursively under the partition directory.
* @param fs the file system
- * @param stat the file/directory to add
+ * @param stat the directory to add
* @param original the list of original files
* @throws IOException
*/
private static void findOriginals(FileSystem fs, FileStatus stat,
- List<FileStatus> original
- ) throws IOException {
- if (stat.isDir()) {
- for(FileStatus child: SHIMS.listLocatedStatus(fs, stat.getPath(), hiddenFileFilter)) {
- findOriginals(fs, child, original);
+ List<HdfsFileStatusWithId> original, boolean useFileIds) throws IOException {
+ assert stat.isDir();
+ List<HdfsFileStatusWithId> childrenWithId = null;
+ if (useFileIds) {
+ try {
+ childrenWithId = SHIMS.listLocatedHdfsStatus(fs, stat.getPath(), hiddenFileFilter);
+ } catch (Throwable t) {
+ LOG.error("Failed to get files with ID; using regular API", t);
+ useFileIds = false;
+ }
+ }
+ if (childrenWithId != null) {
+ for (HdfsFileStatusWithId child : childrenWithId) {
+ if (child.getFileStatus().isDir()) {
+ findOriginals(fs, child.getFileStatus(), original, useFileIds);
+ } else {
+ original.add(child);
+ }
}
} else {
- original.add(stat);
+ List<FileStatus> children = SHIMS.listLocatedStatus(fs, stat.getPath(), hiddenFileFilter);
+ for (FileStatus child : children) {
+ if (child.isDir()) {
+ findOriginals(fs, child, original, useFileIds);
+ } else {
+ original.add(createOriginalObj(null, child));
+ }
+ }
}
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 6ed7872..fd6d2ad 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -48,12 +48,14 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
import org.apache.hadoop.hive.ql.io.AcidInputFormat;
+import org.apache.hadoop.hive.ql.io.AcidInputFormat.DeltaMetaData;
import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
import org.apache.hadoop.hive.ql.io.InputFormatChecker;
import org.apache.hadoop.hive.ql.io.RecordIdentifier;
import org.apache.hadoop.hive.ql.io.StatsProvidingRecordReader;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.Context;
import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
@@ -63,6 +65,7 @@ import org.apache.hadoop.hive.serde2.SerDeStats;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
@@ -73,6 +76,7 @@ import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.StringUtils;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
@@ -436,26 +440,34 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
static final class SplitInfo extends ACIDSplitStrategy {
private final Context context;
private final FileSystem fs;
- private final FileStatus file;
+ private final HdfsFileStatusWithId fileWithId;
private final FileInfo fileInfo;
private final boolean isOriginal;
private final List<DeltaMetaData> deltas;
private final boolean hasBase;
SplitInfo(Context context, FileSystem fs,
- FileStatus file, FileInfo fileInfo,
+ HdfsFileStatusWithId fileWithId, FileInfo fileInfo,
boolean isOriginal,
List<DeltaMetaData> deltas,
boolean hasBase, Path dir, boolean[] covered) throws IOException {
super(dir, context.numBuckets, deltas, covered);
this.context = context;
this.fs = fs;
- this.file = file;
+ this.fileWithId = fileWithId;
this.fileInfo = fileInfo;
this.isOriginal = isOriginal;
this.deltas = deltas;
this.hasBase = hasBase;
}
+
+ @VisibleForTesting
+ public SplitInfo(Context context, FileSystem fs, FileStatus fileStatus, FileInfo fileInfo,
+ boolean isOriginal, ArrayList<DeltaMetaData> deltas, boolean hasBase, Path dir,
+ boolean[] covered) throws IOException {
+ this(context, fs, AcidUtils.createOriginalObj(null, fileStatus),
+ fileInfo, isOriginal, deltas, hasBase, dir, covered);
+ }
}
/**
@@ -465,14 +477,15 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
static final class ETLSplitStrategy implements SplitStrategy<SplitInfo> {
Context context;
FileSystem fs;
- List<FileStatus> files;
+ List<HdfsFileStatusWithId> files;
boolean isOriginal;
List<DeltaMetaData> deltas;
Path dir;
boolean[] covered;
- public ETLSplitStrategy(Context context, FileSystem fs, Path dir, List<FileStatus> children,
- boolean isOriginal, List<DeltaMetaData> deltas, boolean[] covered) {
+ public ETLSplitStrategy(Context context, FileSystem fs, Path dir,
+ List<HdfsFileStatusWithId> children, boolean isOriginal, List<DeltaMetaData> deltas,
+ boolean[] covered) {
this.context = context;
this.dir = dir;
this.fs = fs;
@@ -516,14 +529,15 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
@Override
public List<SplitInfo> getSplits() throws IOException {
List<SplitInfo> result = Lists.newArrayList();
- for (FileStatus file : files) {
+ for (HdfsFileStatusWithId file : files) {
FileInfo info = null;
if (context.cacheStripeDetails) {
- info = verifyCachedFileInfo(file);
+ info = verifyCachedFileInfo(file.getFileStatus());
}
// ignore files of 0 length
- if (file.getLen() > 0) {
- result.add(new SplitInfo(context, fs, file, info, isOriginal, deltas, true, dir, covered));
+ if (file.getFileStatus().getLen() > 0) {
+ result.add(new SplitInfo(
+ context, fs, file, info, isOriginal, deltas, true, dir, covered));
}
}
return result;
@@ -540,7 +554,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
* as opposed to query execution (split generation does not read or cache file footers).
*/
static final class BISplitStrategy extends ACIDSplitStrategy {
- List<FileStatus> fileStatuses;
+ List<HdfsFileStatusWithId> fileStatuses;
boolean isOriginal;
List<DeltaMetaData> deltas;
FileSystem fs;
@@ -548,7 +562,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
Path dir;
public BISplitStrategy(Context context, FileSystem fs,
- Path dir, List<FileStatus> fileStatuses, boolean isOriginal,
+ Path dir, List<HdfsFileStatusWithId> fileStatuses, boolean isOriginal,
List<DeltaMetaData> deltas, boolean[] covered) {
super(dir, context.numBuckets, deltas, covered);
this.context = context;
@@ -562,11 +576,12 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
@Override
public List<OrcSplit> getSplits() throws IOException {
List<OrcSplit> splits = Lists.newArrayList();
- for (FileStatus fileStatus : fileStatuses) {
+ for (HdfsFileStatusWithId file : fileStatuses) {
+ FileStatus fileStatus = file.getFileStatus();
String[] hosts = SHIMS.getLocationsWithOffset(fs, fileStatus).firstEntry().getValue()
.getHosts();
- OrcSplit orcSplit = new OrcSplit(fileStatus.getPath(), 0, fileStatus.getLen(), hosts,
- null, isOriginal, true, deltas, -1);
+ OrcSplit orcSplit = new OrcSplit(fileStatus.getPath(), file.getFileId(), 0,
+ fileStatus.getLen(), hosts, null, isOriginal, true, deltas, -1);
splits.add(orcSplit);
}
@@ -606,7 +621,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
if (!deltas.isEmpty()) {
for (int b = 0; b < numBuckets; ++b) {
if (!covered[b]) {
- splits.add(new OrcSplit(dir, b, 0, new String[0], null, false, false, deltas, -1));
+ splits.add(new OrcSplit(dir, null, b, 0, new String[0], null, false, false, deltas, -1));
}
}
}
@@ -627,21 +642,23 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
private final Context context;
private final FileSystem fs;
private final Path dir;
+ private final boolean useFileIds;
- FileGenerator(Context context, FileSystem fs, Path dir) {
+ FileGenerator(Context context, FileSystem fs, Path dir, boolean useFileIds) {
this.context = context;
this.fs = fs;
this.dir = dir;
+ this.useFileIds = useFileIds;
}
@Override
public SplitStrategy call() throws IOException {
final SplitStrategy splitStrategy;
AcidUtils.Directory dirInfo = AcidUtils.getAcidState(dir,
- context.conf, context.transactionList);
+ context.conf, context.transactionList, useFileIds);
List<DeltaMetaData> deltas = AcidUtils.serializeDeltas(dirInfo.getCurrentDirectories());
Path base = dirInfo.getBaseDirectory();
- List<FileStatus> original = dirInfo.getOriginalFiles();
+ List<HdfsFileStatusWithId> original = dirInfo.getOriginalFiles();
boolean[] covered = new boolean[context.numBuckets];
boolean isOriginal = base == null;
@@ -649,17 +666,16 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
if (base != null || !original.isEmpty()) {
// find the base files (original or new style)
- List<FileStatus> children = original;
+ List<HdfsFileStatusWithId> children = original;
if (base != null) {
- children = SHIMS.listLocatedStatus(fs, base,
- AcidUtils.hiddenFileFilter);
+ children = findBaseFiles(base, useFileIds);
}
long totalFileSize = 0;
- for (FileStatus child : children) {
- totalFileSize += child.getLen();
+ for (HdfsFileStatusWithId child : children) {
+ totalFileSize += child.getFileStatus().getLen();
AcidOutputFormat.Options opts = AcidUtils.parseBaseBucketFilename
- (child.getPath(), context.conf);
+ (child.getFileStatus().getPath(), context.conf);
int b = opts.getBucket();
// If the bucket is in the valid range, mark it as covered.
// I wish Hive actually enforced bucketing all of the time.
@@ -700,6 +716,24 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
return splitStrategy;
}
+
+ private List<HdfsFileStatusWithId> findBaseFiles(
+ Path base, boolean useFileIds) throws IOException {
+ if (useFileIds) {
+ try {
+ return SHIMS.listLocatedHdfsStatus(fs, base, AcidUtils.hiddenFileFilter);
+ } catch (Throwable t) {
+ LOG.error("Failed to get files with ID; using regular API", t);
+ }
+ }
+ // Fall back to regular API and create states without ID.
+ List<FileStatus> children = SHIMS.listLocatedStatus(fs, base, AcidUtils.hiddenFileFilter);
+ List<HdfsFileStatusWithId> result = new ArrayList<>(children.size());
+ for (FileStatus child : children) {
+ result.add(AcidUtils.createOriginalObj(null, child));
+ }
+ return result;
+ }
}
/**
@@ -709,6 +743,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
static final class SplitGenerator implements Callable<List<OrcSplit>> {
private final Context context;
private final FileSystem fs;
+ private final HdfsFileStatusWithId fileWithId;
private final FileStatus file;
private final long blockSize;
private final TreeMap<Long, BlockLocation> locations;
@@ -728,8 +763,9 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
public SplitGenerator(SplitInfo splitInfo) throws IOException {
this.context = splitInfo.context;
this.fs = splitInfo.fs;
- this.file = splitInfo.file;
- this.blockSize = file.getBlockSize();
+ this.fileWithId = splitInfo.fileWithId;
+ this.file = this.fileWithId.getFileStatus();
+ this.blockSize = this.file.getBlockSize();
this.fileInfo = splitInfo.fileInfo;
locations = SHIMS.getLocationsWithOffset(fs, file);
this.isOriginal = splitInfo.isOriginal;
@@ -837,8 +873,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
final double splitRatio = (double) length / (double) fileLen;
final long scaledProjSize = projColsUncompressedSize > 0 ?
(long) (splitRatio * projColsUncompressedSize) : fileLen;
- return new OrcSplit(file.getPath(), offset, length, hosts, fileMetaInfo,
- isOriginal, hasBase, deltas, scaledProjSize);
+ return new OrcSplit(file.getPath(), fileWithId.getFileId(), offset, length, hosts,
+ fileMetaInfo, isOriginal, hasBase, deltas, scaledProjSize);
}
/**
@@ -1020,9 +1056,10 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
List<Future<?>> splitFutures = Lists.newArrayList();
// multi-threaded file statuses and split strategy
+ boolean useFileIds = HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS);
for (Path dir : getInputPaths(conf)) {
FileSystem fs = dir.getFileSystem(conf);
- FileGenerator fileGenerator = new FileGenerator(context, fs, dir);
+ FileGenerator fileGenerator = new FileGenerator(context, fs, dir, useFileIds);
pathFutures.add(context.threadPool.submit(fileGenerator));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
index 8cf4cc0..cc03df7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
@@ -25,6 +25,8 @@ import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.io.AcidInputFormat;
import org.apache.hadoop.hive.ql.io.AcidUtils;
@@ -39,6 +41,8 @@ import org.apache.hadoop.mapred.FileSplit;
*
*/
public class OrcSplit extends FileSplit {
+ private static final Log LOG = LogFactory.getLog(OrcSplit.class);
+
private ReaderImpl.FileMetaInfo fileMetaInfo;
private boolean hasFooter;
private boolean isOriginal;
@@ -46,7 +50,9 @@ public class OrcSplit extends FileSplit {
private final List<AcidInputFormat.DeltaMetaData> deltas = new ArrayList<>();
private OrcFile.WriterVersion writerVersion;
private long projColsUncompressedSize;
+ private transient Long fileId;
+ static final int HAS_FILEID_FLAG = 8;
static final int BASE_FLAG = 4;
static final int ORIGINAL_FLAG = 2;
static final int FOOTER_FLAG = 1;
@@ -58,10 +64,13 @@ public class OrcSplit extends FileSplit {
super(null, 0, 0, (String[]) null);
}
- public OrcSplit(Path path, long offset, long length, String[] hosts,
+ public OrcSplit(Path path, Long fileId, long offset, long length, String[] hosts,
ReaderImpl.FileMetaInfo fileMetaInfo, boolean isOriginal, boolean hasBase,
List<AcidInputFormat.DeltaMetaData> deltas, long projectedDataSize) {
super(path, offset, length, hosts);
+ // We could avoid serializing file ID and just replace the path with inode-based path.
+ // However, that breaks bunch of stuff because Hive later looks up things by split path.
+ this.fileId = fileId;
this.fileMetaInfo = fileMetaInfo;
hasFooter = this.fileMetaInfo != null;
this.isOriginal = isOriginal;
@@ -77,7 +86,8 @@ public class OrcSplit extends FileSplit {
int flags = (hasBase ? BASE_FLAG : 0) |
(isOriginal ? ORIGINAL_FLAG : 0) |
- (hasFooter ? FOOTER_FLAG : 0);
+ (hasFooter ? FOOTER_FLAG : 0) |
+ (fileId != null ? HAS_FILEID_FLAG : 0);
out.writeByte(flags);
out.writeInt(deltas.size());
for(AcidInputFormat.DeltaMetaData delta: deltas) {
@@ -99,6 +109,9 @@ public class OrcSplit extends FileSplit {
footerBuff.limit() - footerBuff.position());
WritableUtils.writeVInt(out, fileMetaInfo.writerVersion.getId());
}
+ if (fileId != null) {
+ out.writeLong(fileId.longValue());
+ }
}
@Override
@@ -110,6 +123,7 @@ public class OrcSplit extends FileSplit {
hasFooter = (FOOTER_FLAG & flags) != 0;
isOriginal = (ORIGINAL_FLAG & flags) != 0;
hasBase = (BASE_FLAG & flags) != 0;
+ boolean hasFileId = (HAS_FILEID_FLAG & flags) != 0;
deltas.clear();
int numDeltas = in.readInt();
@@ -134,6 +148,9 @@ public class OrcSplit extends FileSplit {
fileMetaInfo = new ReaderImpl.FileMetaInfo(compressionType, bufferSize,
metadataSize, footerBuff, writerVersion);
}
+ if (hasFileId) {
+ fileId = in.readLong();
+ }
}
ReaderImpl.FileMetaInfo getFileMetaInfo(){
@@ -159,4 +176,8 @@ public class OrcSplit extends FileSplit {
public long getProjectedColumnsUncompressedSize() {
return projColsUncompressedSize;
}
+
+ public Long getFileId() {
+ return fileId;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index 8e431b2..02fa725 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.RecordIdentifier;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
@@ -133,7 +134,8 @@ public class CompactorMR {
// and discovering that in getSplits is too late as we then have no way to pass it to our
// mapper.
- AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, txns);
+ AcidUtils.Directory dir = AcidUtils.getAcidState(
+ new Path(sd.getLocation()), conf, txns, false);
StringableList dirsToSearch = new StringableList();
Path baseDir = null;
if (isMajor) {
@@ -141,12 +143,13 @@ public class CompactorMR {
// partition is just now being converted to ACID.
baseDir = dir.getBaseDirectory();
if (baseDir == null) {
- List<FileStatus> originalFiles = dir.getOriginalFiles();
+ List<HdfsFileStatusWithId> originalFiles = dir.getOriginalFiles();
if (!(originalFiles == null) && !(originalFiles.size() == 0)) {
// There are original format files
- for (FileStatus stat : originalFiles) {
- dirsToSearch.add(stat.getPath());
- LOG.debug("Adding original file " + stat.getPath().toString() + " to dirs to search");
+ for (HdfsFileStatusWithId stat : originalFiles) {
+ Path path = stat.getFileStatus().getPath();
+ dirsToSearch.add(path);
+ LOG.debug("Adding original file " + path + " to dirs to search");
}
// Set base to the location so that the input format reads the original files.
baseDir = new Path(sd.getLocation());
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
index 73715c6..9bf725d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
import org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler;
import org.apache.hadoop.hive.metastore.txn.TxnHandler;
import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
@@ -223,7 +224,7 @@ public class Initiator extends CompactorThread {
boolean noBase = false;
Path location = new Path(sd.getLocation());
FileSystem fs = location.getFileSystem(conf);
- AcidUtils.Directory dir = AcidUtils.getAcidState(location, conf, txns);
+ AcidUtils.Directory dir = AcidUtils.getAcidState(location, conf, txns, false);
Path base = dir.getBaseDirectory();
long baseSize = 0;
FileStatus stat = null;
@@ -236,9 +237,9 @@ public class Initiator extends CompactorThread {
baseSize = sumDirSize(fs, base);
}
- List<FileStatus> originals = dir.getOriginalFiles();
- for (FileStatus origStat : originals) {
- baseSize += origStat.getLen();
+ List<HdfsFileStatusWithId> originals = dir.getOriginalFiles();
+ for (HdfsFileStatusWithId origStat : originals) {
+ baseSize += origStat.getFileStatus().getLen();
}
long deltaSize = 0;
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
index f8ded12..b6ba862 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFile;
import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFileSystem;
import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockPath;
+import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId;
import org.junit.Test;
import java.util.List;
@@ -102,13 +103,14 @@ public class TestAcidUtils {
assertEquals(null, dir.getBaseDirectory());
assertEquals(0, dir.getCurrentDirectories().size());
assertEquals(0, dir.getObsolete().size());
- List<FileStatus> result = dir.getOriginalFiles();
+ List<HdfsFileStatusWithId> result = dir.getOriginalFiles();
assertEquals(5, result.size());
- assertEquals("mock:/tbl/part1/000000_0", result.get(0).getPath().toString());
- assertEquals("mock:/tbl/part1/000001_1", result.get(1).getPath().toString());
- assertEquals("mock:/tbl/part1/000002_0", result.get(2).getPath().toString());
- assertEquals("mock:/tbl/part1/random", result.get(3).getPath().toString());
- assertEquals("mock:/tbl/part1/subdir/000000_0", result.get(4).getPath().toString());
+ assertEquals("mock:/tbl/part1/000000_0", result.get(0).getFileStatus().getPath().toString());
+ assertEquals("mock:/tbl/part1/000001_1", result.get(1).getFileStatus().getPath().toString());
+ assertEquals("mock:/tbl/part1/000002_0", result.get(2).getFileStatus().getPath().toString());
+ assertEquals("mock:/tbl/part1/random", result.get(3).getFileStatus().getPath().toString());
+ assertEquals("mock:/tbl/part1/subdir/000000_0",
+ result.get(4).getFileStatus().getPath().toString());
}
@Test
@@ -136,13 +138,14 @@ public class TestAcidUtils {
obsolete.get(0).getPath().toString());
assertEquals("mock:/tbl/part1/delta_029_029",
obsolete.get(1).getPath().toString());
- List<FileStatus> result = dir.getOriginalFiles();
+ List<HdfsFileStatusWithId> result = dir.getOriginalFiles();
assertEquals(5, result.size());
- assertEquals("mock:/tbl/part1/000000_0", result.get(0).getPath().toString());
- assertEquals("mock:/tbl/part1/000001_1", result.get(1).getPath().toString());
- assertEquals("mock:/tbl/part1/000002_0", result.get(2).getPath().toString());
- assertEquals("mock:/tbl/part1/random", result.get(3).getPath().toString());
- assertEquals("mock:/tbl/part1/subdir/000000_0", result.get(4).getPath().toString());
+ assertEquals("mock:/tbl/part1/000000_0", result.get(0).getFileStatus().getPath().toString());
+ assertEquals("mock:/tbl/part1/000001_1", result.get(1).getFileStatus().getPath().toString());
+ assertEquals("mock:/tbl/part1/000002_0", result.get(2).getFileStatus().getPath().toString());
+ assertEquals("mock:/tbl/part1/random", result.get(3).getFileStatus().getPath().toString());
+ assertEquals("mock:/tbl/part1/subdir/000000_0",
+ result.get(4).getFileStatus().getPath().toString());
List<AcidUtils.ParsedDelta> deltas = dir.getCurrentDirectories();
assertEquals(2, deltas.size());
AcidUtils.ParsedDelta delt = deltas.get(0);
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 0c12c89..547e799 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -483,7 +483,7 @@ public class TestInputOutputFormat {
final OrcInputFormat.Context context = new OrcInputFormat.Context(
conf, n);
OrcInputFormat.FileGenerator gen = new OrcInputFormat.FileGenerator(
- context, fs, new MockPath(fs, "mock:/a/b"));
+ context, fs, new MockPath(fs, "mock:/a/b"), false);
final SplitStrategy splitStrategy = gen.call();
assertTrue(
String.format(
@@ -507,7 +507,7 @@ public class TestInputOutputFormat {
new MockFile("mock:/a/b/part-04", 1000, new byte[0]));
OrcInputFormat.FileGenerator gen =
new OrcInputFormat.FileGenerator(context, fs,
- new MockPath(fs, "mock:/a/b"));
+ new MockPath(fs, "mock:/a/b"), false);
SplitStrategy splitStrategy = gen.call();
assertEquals(true, splitStrategy instanceof OrcInputFormat.BISplitStrategy);
@@ -520,7 +520,7 @@ public class TestInputOutputFormat {
new MockFile("mock:/a/b/.part-03", 1000, new byte[1000]),
new MockFile("mock:/a/b/part-04", 1000, new byte[1000]));
gen = new OrcInputFormat.FileGenerator(context, fs,
- new MockPath(fs, "mock:/a/b"));
+ new MockPath(fs, "mock:/a/b"), false);
splitStrategy = gen.call();
assertEquals(true, splitStrategy instanceof OrcInputFormat.ETLSplitStrategy);
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
----------------------------------------------------------------------
diff --git a/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java b/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
index ffffcb7..a56309f 100644
--- a/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
+++ b/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
@@ -722,4 +722,15 @@ public class Hadoop20SShims extends HadoopShimsSecure {
Token<?> fsToken = fs.getDelegationToken(uname);
cred.addToken(fsToken.getService(), fsToken);
}
+
+ @Override
+ public List<HdfsFileStatusWithId> listLocatedHdfsStatus(
+ FileSystem fs, Path path, PathFilter filter) throws IOException {
+ throw new UnsupportedOperationException("Not supported on old version");
+ }
+
+ @Override
+ public long getFileId(FileSystem fs, String path) throws IOException {
+ throw new UnsupportedOperationException("Not supported on old version");
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
----------------------------------------------------------------------
diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index 9eae0ac..e5be8d6 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -61,10 +61,13 @@ import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.io.LongWritable;
@@ -662,6 +665,64 @@ public class Hadoop23Shims extends HadoopShimsSecure {
return result;
}
+ private static final class HdfsFileStatusWithIdImpl implements HdfsFileStatusWithId {
+ private final LocatedFileStatus lfs;
+ private final long fileId;
+
+ public HdfsFileStatusWithIdImpl(LocatedFileStatus lfs, long fileId) {
+ this.lfs = lfs;
+ this.fileId = fileId;
+ }
+
+ @Override
+ public FileStatus getFileStatus() {
+ return lfs;
+ }
+
+ @Override
+ public Long getFileId() {
+ return fileId;
+ }
+ }
+
+ @Override
+ public List<HdfsFileStatusWithId> listLocatedHdfsStatus(
+ FileSystem fs, Path p, PathFilter filter) throws IOException {
+ DistributedFileSystem dfs = ensureDfs(fs);
+ DFSClient dfsc = dfs.getClient();
+ final String src = p.toUri().getPath();
+ DirectoryListing current = dfsc.listPaths(src,
+ org.apache.hadoop.hdfs.protocol.HdfsFileStatus.EMPTY_NAME, true);
+ if (current == null) { // the directory does not exist
+ throw new FileNotFoundException("File " + p + " does not exist.");
+ }
+ final URI fsUri = fs.getUri();
+ List<HdfsFileStatusWithId> result = new ArrayList<HdfsFileStatusWithId>(
+ current.getPartialListing().length);
+ while (current != null) {
+ org.apache.hadoop.hdfs.protocol.HdfsFileStatus[] hfss = current.getPartialListing();
+ for (int i = 0; i < hfss.length; ++i) {
+ HdfsLocatedFileStatus next = (HdfsLocatedFileStatus)(hfss[i]);
+ if (filter != null) {
+ Path filterPath = next.getFullPath(p).makeQualified(fsUri, null);
+ if (!filter.accept(filterPath)) continue;
+ }
+ LocatedFileStatus lfs = next.makeQualifiedLocated(fsUri, p);
+ result.add(new HdfsFileStatusWithIdImpl(lfs, next.getFileId()));
+ }
+ current = current.hasMore() ? dfsc.listPaths(src, current.getLastName(), true) : null;
+ }
+ return result;
+ }
+
+ private DistributedFileSystem ensureDfs(FileSystem fs) {
+ if (!(fs instanceof DistributedFileSystem)) {
+ throw new UnsupportedOperationException("Only supported for DFS; got " + fs.getClass());
+ }
+ DistributedFileSystem dfs = (DistributedFileSystem)fs;
+ return dfs;
+ }
+
@Override
public BlockLocation[] getLocations(FileSystem fs,
FileStatus status) throws IOException {
@@ -1352,4 +1413,9 @@ public class Hadoop23Shims extends HadoopShimsSecure {
// Use method addDelegationTokens instead of getDelegationToken to get all the tokens including KMS.
fs.addDelegationTokens(uname, cred);
}
+
+ @Override
+ public long getFileId(FileSystem fs, String path) throws IOException {
+ return ensureDfs(fs).getClient().getFileInfo(path).getFileId();
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/3b6825b5/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
index 74785e5..2b6f322 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
@@ -256,6 +256,10 @@ public interface HadoopShims {
List<FileStatus> listLocatedStatus(FileSystem fs, Path path,
PathFilter filter) throws IOException;
+
+ List<HdfsFileStatusWithId> listLocatedHdfsStatus(
+ FileSystem fs, Path path, PathFilter filter) throws IOException;
+
/**
* For file status returned by listLocatedStatus, convert them into a list
* of block locations.
@@ -316,6 +320,11 @@ public interface HadoopShims {
public void debugLog();
}
+ public interface HdfsFileStatusWithId {
+ public FileStatus getFileStatus();
+ public Long getFileId();
+ }
+
public HCatHadoopShims getHCatShim();
public interface HCatHadoopShims {
@@ -731,4 +740,10 @@ public interface HadoopShims {
* @throws IOException If an error occurred on adding the token.
*/
public void addDelegationTokens(FileSystem fs, Credentials cred, String uname) throws IOException;
+
+ /**
+ * Gets file ID. Only supported on hadoop-2.
+ * @return inode ID of the file.
+ */
+ long getFileId(FileSystem fs, String path) throws IOException;
}
[3/7] hive git commit: HIVE-11317 - ACID: Improve transaction Abort
logic due to timeout (Eugene Koifman, reviewed by Alan Gates)
Posted by se...@apache.org.
HIVE-11317 - ACID: Improve transaction Abort logic due to timeout (Eugene Koifman, reviewed by Alan Gates)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/97a6cd35
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/97a6cd35
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/97a6cd35
Branch: refs/heads/hbase-metastore
Commit: 97a6cd35a444315885008f11c20c7c28249bd42c
Parents: e8329ee
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Sat Aug 15 10:22:55 2015 -0700
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Sat Aug 15 10:22:55 2015 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hive/conf/HiveConf.java | 4 +
.../deployers/config/hive/hive-site.mysql.xml | 22 +++
.../hive/hcatalog/streaming/TestStreaming.java | 54 ++++++-
.../hadoop/hive/metastore/HiveMetaStore.java | 18 +++
.../hive/metastore/HouseKeeperService.java | 39 +++++
.../hadoop/hive/metastore/txn/TxnHandler.java | 153 +++++++++++++------
.../hive/metastore/txn/TestTxnHandler.java | 7 +-
.../java/org/apache/hadoop/hive/ql/Driver.java | 15 +-
.../hive/ql/txn/AcidHouseKeeperService.java | 104 +++++++++++++
.../hive/ql/txn/compactor/CompactorMR.java | 6 +-
.../hadoop/hive/ql/txn/compactor/Initiator.java | 1 +
.../hadoop/hive/ql/txn/compactor/Worker.java | 2 +-
.../apache/hadoop/hive/ql/TestTxnCommands.java | 21 +++
.../apache/hadoop/hive/ql/TestTxnCommands2.java | 1 +
.../hive/ql/lockmgr/TestDbTxnManager.java | 35 +++--
15 files changed, 421 insertions(+), 61 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 730f5be..9a6781b 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1507,6 +1507,10 @@ public class HiveConf extends Configuration {
HIVE_COMPACTOR_CLEANER_RUN_INTERVAL("hive.compactor.cleaner.run.interval", "5000ms",
new TimeValidator(TimeUnit.MILLISECONDS), "Time between runs of the cleaner thread"),
+ HIVE_TIMEDOUT_TXN_REAPER_START("hive.timedout.txn.reaper.start", "100s",
+ new TimeValidator(TimeUnit.MILLISECONDS), "Time delay of 1st reaper run after metastore start"),
+ HIVE_TIMEDOUT_TXN_REAPER_INTERVAL("hive.timedout.txn.reaper.interval", "180s",
+ new TimeValidator(TimeUnit.MILLISECONDS), "Time interval describing how often the reaper runs"),
// For HBase storage handler
HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true,
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml
----------------------------------------------------------------------
diff --git a/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml b/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml
index 70ccc31..b6f1ab7 100644
--- a/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml
+++ b/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mysql.xml
@@ -62,6 +62,28 @@
<name>hive.exec.dynamic.partition.mode</name>
<value>nonstrict</value>
</property>
+ <property>
+ <name>hive.compactor.initiator.on</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>hive.compactor.worker.threads</name>
+ <value>2</value>
+ </property>
+ <property>
+ <name>hive.timedout.txn.reaper.start</name>
+ <value>2s</value>
+ </property>
+<!-- <property>
+ <name>hive.txn.timeout</name>
+ <value>60s</value>
+ </property>
+ -->
+ <property>
+ <name>hive.timedout.txn.reaper.interval</name>
+ <value>30s</value>
+ </property>
+
<!--end ACID related properties-->
<!--
<property>
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
index c0af533..c28d4aa 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java
@@ -18,7 +18,6 @@
package org.apache.hive.hcatalog.streaming;
-import junit.framework.Assert;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
@@ -36,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.HiveInputFormat;
@@ -43,6 +43,7 @@ import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat;
import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
import org.apache.hadoop.hive.ql.io.orc.OrcStruct;
+import org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.InputFormat;
@@ -51,6 +52,7 @@ import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.thrift.TException;
import org.junit.After;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -67,6 +69,7 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.TimeUnit;
public class TestStreaming {
@@ -301,6 +304,55 @@ public class TestStreaming {
connection.close();
}
+ /**
+ * check that transactions that have not heartbeated and timedout get properly aborted
+ * @throws Exception
+ */
+ @Test
+ public void testTimeOutReaper() throws Exception {
+ HiveEndPoint endPt = new HiveEndPoint(metaStoreURI, dbName2, tblName2, null);
+ DelimitedInputWriter writer = new DelimitedInputWriter(fieldNames2,",", endPt);
+ StreamingConnection connection = endPt.newConnection(false, null);
+
+ TransactionBatch txnBatch = connection.fetchTransactionBatch(5, writer);
+ txnBatch.beginNextTransaction();
+ conf.setTimeVar(HiveConf.ConfVars.HIVE_TIMEDOUT_TXN_REAPER_START, 0, TimeUnit.SECONDS);
+ //ensure txn timesout
+ conf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.MILLISECONDS);
+ AcidHouseKeeperService houseKeeperService = new AcidHouseKeeperService();
+ houseKeeperService.start(conf);
+ while(houseKeeperService.getIsAliveCounter() <= Integer.MIN_VALUE) {
+ Thread.sleep(100);//make sure it has run at least once
+ }
+ houseKeeperService.stop();
+ try {
+ //should fail because the TransactionBatch timed out
+ txnBatch.commit();
+ }
+ catch(TransactionError e) {
+ Assert.assertTrue("Expected aborted transaction", e.getCause() instanceof TxnAbortedException);
+ }
+ txnBatch.close();
+ txnBatch = connection.fetchTransactionBatch(10, writer);
+ txnBatch.beginNextTransaction();
+ txnBatch.commit();
+ txnBatch.beginNextTransaction();
+ int lastCount = houseKeeperService.getIsAliveCounter();
+ houseKeeperService.start(conf);
+ while(houseKeeperService.getIsAliveCounter() <= lastCount) {
+ Thread.sleep(100);//make sure it has run at least once
+ }
+ houseKeeperService.stop();
+ try {
+ //should fail because the TransactionBatch timed out
+ txnBatch.commit();
+ }
+ catch(TransactionError e) {
+ Assert.assertTrue("Expected aborted transaction", e.getCause() instanceof TxnAbortedException);
+ }
+ txnBatch.close();
+ connection.close();
+ }
@Test
public void testTransactionBatchEmptyAbort() throws Exception {
// 1) to partitioned table
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 65117c4..ae500bf 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -6225,6 +6225,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
startCompactorInitiator(conf);
startCompactorWorkers(conf);
startCompactorCleaner(conf);
+ startHouseKeeperService(conf);
} catch (Throwable e) {
LOG.error("Failure when starting the compactor, compactions may not happen, " +
StringUtils.stringifyException(e));
@@ -6284,4 +6285,21 @@ public class HiveMetaStore extends ThriftHiveMetastore {
thread.init(new AtomicBoolean(), new AtomicBoolean());
thread.start();
}
+ private static void startHouseKeeperService(HiveConf conf) throws Exception {
+ if(!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) {
+ return;
+ }
+ Class c = Class.forName("org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService");
+ //todo: when metastore adds orderly-shutdown logic, houseKeeper.stop()
+ //should be called form it
+ HouseKeeperService houseKeeper = (HouseKeeperService)c.newInstance();
+ try {
+ houseKeeper.start(conf);
+ }
+ catch (Exception ex) {
+ LOG.fatal("Failed to start " + houseKeeper.getClass() +
+ ". The system will not handle " + houseKeeper.getServiceDescription() +
+ ". Root Cause: ", ex);
+ }
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/metastore/src/java/org/apache/hadoop/hive/metastore/HouseKeeperService.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HouseKeeperService.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HouseKeeperService.java
new file mode 100644
index 0000000..eb4ea93
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HouseKeeperService.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.common.classification.InterfaceAudience;
+import org.apache.hadoop.hive.common.classification.InterfaceStability;
+import org.apache.hadoop.hive.conf.HiveConf;
+
+/**
+ * Runs arbitrary background logic inside the metastore service.
+ */
+@InterfaceAudience.LimitedPrivate({"Hive"})
+@InterfaceStability.Evolving
+public interface HouseKeeperService {
+ public void start(HiveConf hiveConf) throws Exception;
+ /**
+ * Should perform orderly shutdown
+ */
+ public void stop();
+ /**
+ * Returns short description of services this module provides.
+ */
+ public String getServiceDescription();
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 88e007c..795b2d9 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -46,6 +46,11 @@ import java.util.concurrent.TimeUnit;
/**
* A handler to answer transaction related calls that come into the metastore
* server.
+ *
+ * Note on log messages: Please include txnid:X and lockid info
+ * {@link org.apache.hadoop.hive.common.JavaUtils#lockIdToString(long)} in all messages.
+ * The txnid:X and lockid:Y matches how Thrift object toString() methods are generated,
+ * so keeping the format consistent makes grep'ing the logs much easier.
*/
public class TxnHandler {
// Compactor states
@@ -212,7 +217,6 @@ public class TxnHandler {
Statement stmt = null;
try {
dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
- timeOutTxns(dbConn);
stmt = dbConn.createStatement();
String s = "select ntxn_next - 1 from NEXT_TXN_ID";
LOG.debug("Going to execute query <" + s + ">");
@@ -463,8 +467,6 @@ public class TxnHandler {
try {
dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
long extLockId = rqst.getLockid();
- // Clean up timed out locks
- timeOutLocks(dbConn);
// Heartbeat on the lockid first, to assure that our lock is still valid.
// Then look up the lock info (hopefully in the cache). If these locks
@@ -1361,8 +1363,6 @@ public class TxnHandler {
// and prevent other operations (such as committing transactions, showing locks,
// etc.) that should not interfere with this one.
synchronized (lockLock) {
- // Clean up timed out locks before we attempt to acquire any.
- timeOutLocks(dbConn);
Statement stmt = null;
try {
stmt = dbConn.createStatement();
@@ -1732,15 +1732,22 @@ public class TxnHandler {
LOG.debug("Going to execute query <" + s + ">");
ResultSet rs = stmt.executeQuery(s);
if (!rs.next()) {
+ s = "select count(*) from COMPLETED_TXN_COMPONENTS where CTC_TXNID = " + txnid;
+ ResultSet rs2 = stmt.executeQuery(s);
+ boolean alreadyCommitted = rs2.next() && rs2.getInt(1) > 0;
LOG.debug("Going to rollback");
dbConn.rollback();
+ if(alreadyCommitted) {
+ //makes the message more informative - helps to find bugs in client code
+ throw new NoSuchTxnException("Transaction " + JavaUtils.txnIdToString(txnid) + " is already committed.");
+ }
throw new NoSuchTxnException("No such transaction " + JavaUtils.txnIdToString(txnid));
}
if (rs.getString(1).charAt(0) == TXN_ABORTED) {
LOG.debug("Going to rollback");
dbConn.rollback();
throw new TxnAbortedException("Transaction " + JavaUtils.txnIdToString(txnid) +
- " already aborted");
+ " already aborted");//todo: add time of abort, which is not currently tracked
}
s = "update TXNS set txn_last_heartbeat = " + now +
" where txn_id = " + txnid;
@@ -1802,61 +1809,121 @@ public class TxnHandler {
}
}
- // Clean time out locks from the database. This does a commit,
+ // Clean time out locks from the database not associated with a transactions, i.e. locks
+ // for read-only autoCommit=true statements. This does a commit,
// and thus should be done before any calls to heartbeat that will leave
// open transactions.
- private void timeOutLocks(Connection dbConn) throws SQLException, MetaException {
- long now = getDbTime(dbConn);
+ private void timeOutLocks(Connection dbConn) {
Statement stmt = null;
try {
+ long now = getDbTime(dbConn);
stmt = dbConn.createStatement();
// Remove any timed out locks from the table.
String s = "delete from HIVE_LOCKS where hl_last_heartbeat < " +
- (now - timeout);
+ (now - timeout) + " and (hl_txnid = 0 or hl_txnid is NULL)";//when txnid is > 0, the lock is
+ //associated with a txn and is handled by performTimeOuts()
+ //want to avoid expiring locks for a txn w/o expiring the txn itself
LOG.debug("Going to execute update <" + s + ">");
- stmt.executeUpdate(s);
+ int deletedLocks = stmt.executeUpdate(s);
+ if(deletedLocks > 0) {
+ LOG.info("Deleted " + deletedLocks + " locks from HIVE_LOCKS due to timeout");
+ }
LOG.debug("Going to commit");
dbConn.commit();
+ }
+ catch(SQLException ex) {
+ LOG.error("Failed to purge timedout locks due to: " + getMessage(ex), ex);
+ }
+ catch(Exception ex) {
+ LOG.error("Failed to purge timedout locks due to: " + ex.getMessage(), ex);
} finally {
closeStmt(stmt);
}
}
- // Abort timed out transactions. This does a commit,
- // and thus should be done before any calls to heartbeat that will leave
- // open transactions on the underlying database.
- private void timeOutTxns(Connection dbConn) throws SQLException, MetaException, RetryException {
- long now = getDbTime(dbConn);
+ /**
+ * Suppose you have a query "select a,b from T" and you want to limit the result set
+ * to the first 5 rows. The mechanism to do that differs in different DB.
+ * Make {@code noSelectsqlQuery} to be "a,b from T" and this method will return the
+ * appropriately modified row limiting query.
+ */
+ private String addLimitClause(Connection dbConn, int numRows, String noSelectsqlQuery) throws MetaException {
+ DatabaseProduct prod = determineDatabaseProduct(dbConn);
+ switch (prod) {
+ case DERBY:
+ //http://db.apache.org/derby/docs/10.7/ref/rrefsqljoffsetfetch.html
+ return "select " + noSelectsqlQuery + " fetch first " + numRows + " rows only";
+ case MYSQL:
+ //http://www.postgresql.org/docs/7.3/static/queries-limit.html
+ case POSTGRES:
+ //https://dev.mysql.com/doc/refman/5.0/en/select.html
+ return "select " + noSelectsqlQuery + " limit " + numRows;
+ case ORACLE:
+ //newer versions (12c and later) support OFFSET/FETCH
+ return "select * from (select " + noSelectsqlQuery + ") where rownum <= " + numRows;
+ case SQLSERVER:
+ //newer versions (2012 and later) support OFFSET/FETCH
+ //https://msdn.microsoft.com/en-us/library/ms189463.aspx
+ return "select TOP(" + numRows + ") " + noSelectsqlQuery;
+ default:
+ String msg = "Unrecognized database product name <" + prod + ">";
+ LOG.error(msg);
+ throw new MetaException(msg);
+ }
+ }
+ /**
+ * This will find transactions that have timed out and abort them.
+ * Will also delete locks which are not associated with a transaction and have timed out
+ * Tries to keep transactions (against metastore db) small to reduce lock contention.
+ */
+ public void performTimeOuts() {
+ Connection dbConn = null;
Statement stmt = null;
+ ResultSet rs = null;
try {
- stmt = dbConn.createStatement();
- // Abort any timed out locks from the table.
- String s = "select txn_id from TXNS where txn_state = '" + TXN_OPEN +
- "' and txn_last_heartbeat < " + (now - timeout);
- LOG.debug("Going to execute query <" + s + ">");
- ResultSet rs = stmt.executeQuery(s);
- List<Long> deadTxns = new ArrayList<Long>();
- // Limit the number of timed out transactions we do in one pass to keep from generating a
- // huge delete statement
- do {
- deadTxns.clear();
- for (int i = 0; i < TIMED_OUT_TXN_ABORT_BATCH_SIZE && rs.next(); i++) {
- deadTxns.add(rs.getLong(1));
+ dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
+ long now = getDbTime(dbConn);
+ timeOutLocks(dbConn);
+ while(true) {
+ stmt = dbConn.createStatement();
+ String s = " txn_id from TXNS where txn_state = '" + TXN_OPEN +
+ "' and txn_last_heartbeat < " + (now - timeout);
+ s = addLimitClause(dbConn, 2500, s);
+ LOG.debug("Going to execute query <" + s + ">");
+ rs = stmt.executeQuery(s);
+ if(!rs.next()) {
+ return;//no more timedout txns
}
- // We don't care whether all of the transactions get deleted or not,
- // if some didn't it most likely means someone else deleted them in the interum
- if (deadTxns.size() > 0) abortTxns(dbConn, deadTxns);
- } while (deadTxns.size() > 0);
- LOG.debug("Going to commit");
- dbConn.commit();
- } catch (SQLException e) {
- LOG.debug("Going to rollback");
- rollbackDBConn(dbConn);
- checkRetryable(dbConn, e, "abortTxn");
- throw new MetaException("Unable to update transaction database "
- + StringUtils.stringifyException(e));
- } finally {
- closeStmt(stmt);
+ List<List<Long>> timedOutTxns = new ArrayList<>();
+ List<Long> currentBatch = new ArrayList<>(TIMED_OUT_TXN_ABORT_BATCH_SIZE);
+ timedOutTxns.add(currentBatch);
+ do {
+ currentBatch.add(rs.getLong(1));
+ if(currentBatch.size() == TIMED_OUT_TXN_ABORT_BATCH_SIZE) {
+ currentBatch = new ArrayList<>(TIMED_OUT_TXN_ABORT_BATCH_SIZE);
+ timedOutTxns.add(currentBatch);
+ }
+ } while(rs.next());
+ close(rs, stmt, null);
+ dbConn.commit();
+ for(List<Long> batchToAbort : timedOutTxns) {
+ abortTxns(dbConn, batchToAbort);
+ dbConn.commit();
+ //todo: add TXNS.COMMENT filed and set it to 'aborted by system due to timeout'
+ LOG.info("Aborted the following transactions due to timeout: " + timedOutTxns.toString());
+ }
+ int numTxnsAborted = (timedOutTxns.size() - 1) * TIMED_OUT_TXN_ABORT_BATCH_SIZE +
+ timedOutTxns.get(timedOutTxns.size() - 1).size();
+ LOG.info("Aborted " + numTxnsAborted + " transactions due to timeout");
+ }
+ } catch (SQLException ex) {
+ LOG.warn("Aborting timedout transactions failed due to " + getMessage(ex), ex);
+ }
+ catch(MetaException e) {
+ LOG.warn("Aborting timedout transactions failed due to " + e.getMessage(), e);
+ }
+ finally {
+ close(rs, stmt, dbConn);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
index 6dc0bd3..59114fe 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
@@ -986,7 +986,8 @@ public class TestTxnHandler {
LockRequest req = new LockRequest(components, "me", "localhost");
LockResponse res = txnHandler.lock(req);
assertTrue(res.getState() == LockState.ACQUIRED);
- Thread.currentThread().sleep(10);
+ Thread.sleep(10);
+ txnHandler.performTimeOuts();
txnHandler.checkLock(new CheckLockRequest(res.getLockid()));
fail("Told there was a lock, when it should have timed out.");
} catch (NoSuchLockException e) {
@@ -1000,8 +1001,8 @@ public class TestTxnHandler {
long timeout = txnHandler.setTimeout(1);
try {
txnHandler.openTxns(new OpenTxnRequest(503, "me", "localhost"));
- Thread.currentThread().sleep(10);
- txnHandler.getOpenTxns();
+ Thread.sleep(10);
+ txnHandler.performTimeOuts();
GetOpenTxnsInfoResponse rsp = txnHandler.getOpenTxnsInfo();
int numAborted = 0;
for (TxnInfo txnInfo : rsp.getOpen_txns()) {
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index c0c1b2e..4030075 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -977,11 +977,19 @@ public class Driver implements CommandProcessor {
return 10;
}
- boolean existingTxn = txnMgr.isTxnOpen();
+ boolean initiatingTransaction = false;
+ boolean readOnlyQueryInAutoCommit = false;
if((txnMgr.getAutoCommit() && haveAcidWrite()) || plan.getOperation() == HiveOperation.START_TRANSACTION ||
(!txnMgr.getAutoCommit() && startTxnImplicitly)) {
+ if(txnMgr.isTxnOpen()) {
+ throw new RuntimeException("Already have an open transaction txnid:" + txnMgr.getCurrentTxnId());
+ }
// We are writing to tables in an ACID compliant way, so we need to open a transaction
txnMgr.openTxn(userFromUGI);
+ initiatingTransaction = true;
+ }
+ else {
+ readOnlyQueryInAutoCommit = txnMgr.getAutoCommit() && plan.getOperation() == HiveOperation.QUERY && !haveAcidWrite();
}
// Set the transaction id in all of the acid file sinks
if (haveAcidWrite()) {
@@ -997,9 +1005,9 @@ public class Driver implements CommandProcessor {
For multi-stmt txns this is not sufficient and will be managed via WriteSet tracking
in the lock manager.*/
txnMgr.acquireLocks(plan, ctx, userFromUGI);
- if(!existingTxn) {
+ if(initiatingTransaction || readOnlyQueryInAutoCommit) {
//For multi-stmt txns we should record the snapshot when txn starts but
- // don't update it after that until txn completes. Thus the check for {@code existingTxn}
+ // don't update it after that until txn completes. Thus the check for {@code initiatingTransaction}
//For autoCommit=true, Read-only statements, txn is implicit, i.e. lock in the snapshot
//for each statement.
recordValidTxns();
@@ -1294,6 +1302,7 @@ public class Driver implements CommandProcessor {
}
private CommandProcessorResponse rollback(CommandProcessorResponse cpr) {
+ //console.printError(cpr.toString());
try {
releaseLocksAndCommitOrRollback(ctx.getHiveLocks(), false);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidHouseKeeperService.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidHouseKeeperService.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidHouseKeeperService.java
new file mode 100644
index 0000000..d22ca8d
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidHouseKeeperService.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.txn;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HouseKeeperService;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
+import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
+import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory;
+
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Performs background tasks for Transaction management in Hive.
+ * Runs inside Hive Metastore Service.
+ */
+public class AcidHouseKeeperService implements HouseKeeperService {
+ private static final Log LOG = LogFactory.getLog(AcidHouseKeeperService.class);
+ private ScheduledExecutorService pool = null;
+ private AtomicInteger isAliveCounter = new AtomicInteger(Integer.MIN_VALUE);
+ @Override
+ public void start(HiveConf hiveConf) throws Exception {
+ HiveTxnManager mgr = TxnManagerFactory.getTxnManagerFactory().getTxnManager(hiveConf);
+ if(!mgr.supportsAcid()) {
+ LOG.info(AcidHouseKeeperService.class.getName() + " not started since " +
+ mgr.getClass().getName() + " does not support Acid.");
+ return;//there are no transactions in this case
+ }
+ pool = Executors.newScheduledThreadPool(1, new ThreadFactory() {
+ private final AtomicInteger threadCounter = new AtomicInteger();
+ @Override
+ public Thread newThread(Runnable r) {
+ return new Thread(r, "DeadTxnReaper-" + threadCounter.getAndIncrement());
+ }
+ });
+ TimeUnit tu = TimeUnit.MILLISECONDS;
+ pool.scheduleAtFixedRate(new TimedoutTxnReaper(hiveConf, this),
+ hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_TIMEDOUT_TXN_REAPER_START, tu),
+ hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_TIMEDOUT_TXN_REAPER_INTERVAL, tu),
+ TimeUnit.MILLISECONDS);
+ LOG.info("Started " + this.getClass().getName() + " with delay/interval = " +
+ hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_TIMEDOUT_TXN_REAPER_START, tu) + "/" +
+ hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_TIMEDOUT_TXN_REAPER_INTERVAL, tu) + " " + tu);
+ }
+ @Override
+ public void stop() {
+ if(pool != null && !pool.isShutdown()) {
+ pool.shutdown();
+ }
+ pool = null;
+ }
+ @Override
+ public String getServiceDescription() {
+ return "Abort expired transactions";
+ }
+ private static final class TimedoutTxnReaper implements Runnable {
+ private final TxnHandler txnHandler;
+ private final AcidHouseKeeperService owner;
+ private TimedoutTxnReaper(HiveConf hiveConf, AcidHouseKeeperService owner) {
+ txnHandler = new TxnHandler(hiveConf);
+ this.owner = owner;
+ }
+ @Override
+ public void run() {
+ try {
+ txnHandler.performTimeOuts();
+ owner.isAliveCounter.incrementAndGet();
+ LOG.info("timeout reaper ran");
+ }
+ catch(Throwable t) {
+ LOG.fatal("Serious error in " + Thread.currentThread().getName() + ": " + t.getMessage(), t);
+ }
+ }
+ }
+
+ /**
+ * This is used for testing only. Each time the housekeeper runs, counter is incremented by 1.
+ * Starts with {@link java.lang.Integer#MIN_VALUE}
+ */
+ public int getIsAliveCounter() {
+ return isAliveCounter.get();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index 6c77ba4..8e431b2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.common.ValidReadTxnList;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
@@ -52,6 +53,7 @@ import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.OutputCommitter;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapred.RunningJob;
import org.apache.hadoop.mapred.TaskAttemptContext;
import org.apache.hadoop.mapred.lib.NullOutputFormat;
import org.apache.hadoop.util.StringUtils;
@@ -183,7 +185,9 @@ public class CompactorMR {
LOG.debug("Setting minimum transaction to " + minTxn);
LOG.debug("Setting maximume transaction to " + maxTxn);
- JobClient.runJob(job).waitForCompletion();
+ RunningJob rj = JobClient.runJob(job);
+ LOG.info("Submitted " + (isMajor ? CompactionType.MAJOR : CompactionType.MINOR) + " compaction job '" + jobName + "' with jobID=" + rj.getID());
+ rj.waitForCompletion();
su.gatherStats();
}
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
index 32a9ef8..73715c6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
@@ -52,6 +52,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
/**
* A class to initiate compactions. This will run in a separate thread.
+ * It's critical that there exactly 1 of these in a given warehouse.
*/
public class Initiator extends CompactorThread {
static final private String CLASS_NAME = Initiator.class.getName();
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
index e164661..0548117 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
@@ -264,7 +264,7 @@ public class Worker extends CompactorThread {
sb.append(colName).append(",");
}
sb.setLength(sb.length() - 1);//remove trailing ,
- LOG.debug("running '" + sb.toString() + "'");
+ LOG.info("running '" + sb.toString() + "'");
Driver d = new Driver(conf, userName);
SessionState localSession = null;
if(SessionState.get() == null) {
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index c73621f..e13e6eb 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -9,6 +9,7 @@ import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.io.orc.FileDump;
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -24,6 +25,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
+import java.util.concurrent.TimeUnit;
/**
* The LockManager is not ready, but for no-concurrency straight-line path we can
@@ -388,6 +390,25 @@ public class TestTxnCommands {
int[][] updatedData = {{1,7},{3,7},{5,2},{5,3}};
Assert.assertEquals("Bulk update failed", stringifyValues(updatedData), rs);
}
+ @Test
+ public void testTimeOutReaper() throws Exception {
+ runStatementOnDriver("set autocommit false");
+ runStatementOnDriver("start transaction");
+ runStatementOnDriver("delete from " + Table.ACIDTBL + " where a = 5");
+ //make sure currently running txn is considered aborted by housekeeper
+ hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TIMEDOUT_TXN_REAPER_START, 0, TimeUnit.SECONDS);
+ hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.MILLISECONDS);
+ AcidHouseKeeperService houseKeeperService = new AcidHouseKeeperService();
+ //this will abort the txn
+ houseKeeperService.start(hiveConf);
+ while(houseKeeperService.getIsAliveCounter() <= Integer.MIN_VALUE) {
+ Thread.sleep(100);//make sure it has run at least once
+ }
+ houseKeeperService.stop();
+ //this should fail because txn aborted due to timeout
+ CommandProcessorResponse cpr = runStatementOnDriverNegative("delete from " + Table.ACIDTBL + " where a = 5");
+ Assert.assertTrue("Actual: " + cpr.getErrorMessage(), cpr.getErrorMessage().contains("Transaction manager has aborted the transaction txnid:1"));
+ }
/**
* takes raw data and turns it into a string as if from Driver.getResults()
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 57e4fb9..58c2fca 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -131,6 +131,7 @@ public class TestTxnCommands2 {
hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, enablePPD);//enables ORC PPD
int[][] tableData = {{1,2},{3,4}};
runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData));
+ List<String> rs2 = runStatementOnDriver("select a,b from " + Table.ACIDTBL + " where a > 1 order by a,b");
runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'");
Worker t = new Worker();
t.setThreadId((int) t.getId());
http://git-wip-us.apache.org/repos/asf/hive/blob/97a6cd35/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
index f57350d..db119e1 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
@@ -29,10 +29,10 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity;
import org.apache.hadoop.hive.ql.metadata.DummyPartition;import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.not;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -49,6 +49,7 @@ public class TestDbTxnManager {
private HiveConf conf = new HiveConf();
private HiveTxnManager txnMgr;
+ private AcidHouseKeeperService houseKeeperService = null;
private Context ctx;
private int nextInput;
private int nextOutput;
@@ -56,7 +57,6 @@ public class TestDbTxnManager {
HashSet<WriteEntity> writeEntities;
public TestDbTxnManager() throws Exception {
- conf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 500, TimeUnit.MILLISECONDS);
TxnDbUtil.setConfValues(conf);
SessionState.start(conf);
ctx = new Context(conf);
@@ -179,14 +179,30 @@ public class TestDbTxnManager {
locks = txnMgr.getLockManager().getLocks(false, false);
Assert.assertEquals(0, locks.size());
}
+
+ /**
+ * aborts timed out transactions
+ */
+ private void runReaper() throws Exception {
+ int lastCount = houseKeeperService.getIsAliveCounter();
+ houseKeeperService.start(conf);
+ while(houseKeeperService.getIsAliveCounter() <= lastCount) {
+ try {
+ Thread.sleep(100);//make sure it has run at least once
+ }
+ catch(InterruptedException ex) {
+ //...
+ }
+ }
+ houseKeeperService.stop();
+ }
@Test
public void testExceptions() throws Exception {
WriteEntity we = addPartitionOutput(newTable(true), WriteEntity.WriteType.INSERT);
QueryPlan qp = new MockQueryPlan(this);
txnMgr.acquireLocks(qp, ctx, "PeterI");
txnMgr.openTxn("NicholasII");
- Thread.sleep(1000);//let txn timeout
- txnMgr.getValidTxns();
+ runReaper();
LockException exception = null;
try {
txnMgr.commitTxn();
@@ -198,8 +214,7 @@ public class TestDbTxnManager {
Assert.assertEquals("Wrong Exception1", ErrorMsg.TXN_ABORTED, exception.getCanonicalErrorMsg());
exception = null;
txnMgr.openTxn("AlexanderIII");
- Thread.sleep(1000);
- txnMgr.getValidTxns();
+ runReaper();
try {
txnMgr.rollbackTxn();
}
@@ -213,8 +228,7 @@ public class TestDbTxnManager {
txnMgr.acquireLocks(qp, ctx, "PeterI");
List<HiveLock> locks = ctx.getHiveLocks();
Assert.assertThat("Unexpected lock count", locks.size(), is(1));
- Thread.sleep(1000);
- txnMgr.getValidTxns();
+ runReaper();
try {
txnMgr.heartbeat();
}
@@ -341,7 +355,6 @@ public class TestDbTxnManager {
Assert.assertTrue(sawException);
}
-
@Before
public void setUp() throws Exception {
TxnDbUtil.prepDb();
@@ -351,10 +364,14 @@ public class TestDbTxnManager {
nextOutput = 1;
readEntities = new HashSet<ReadEntity>();
writeEntities = new HashSet<WriteEntity>();
+ conf.setTimeVar(HiveConf.ConfVars.HIVE_TIMEDOUT_TXN_REAPER_START, 0, TimeUnit.SECONDS);
+ conf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.MILLISECONDS);
+ houseKeeperService = new AcidHouseKeeperService();
}
@After
public void tearDown() throws Exception {
+ if(houseKeeperService != null) houseKeeperService.stop();
if (txnMgr != null) txnMgr.closeTxnManager();
TxnDbUtil.cleanDb();
}
[7/7] hive git commit: HIVE-11588 : merge master into branch (Sergey
Shelukhin)
Posted by se...@apache.org.
HIVE-11588 : merge master into branch (Sergey Shelukhin)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9d9dd72a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9d9dd72a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9d9dd72a
Branch: refs/heads/hbase-metastore
Commit: 9d9dd72a06ee2db379dbbae3561d172223d7c96d
Parents: 2fe6086 3b6825b
Author: Sergey Shelukhin <se...@apache.org>
Authored: Mon Aug 17 15:20:25 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Mon Aug 17 15:20:25 2015 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/hive/conf/HiveConf.java | 6 +
data/conf/hive-log4j2.xml | 5 +-
data/conf/hive-site.xml | 6 -
.../deployers/config/hive/hive-site.mysql.xml | 22 +++
.../hive/hcatalog/streaming/TestStreaming.java | 54 +++++-
.../TestOperationLoggingAPIWithMr.java | 2 -
.../TestOperationLoggingAPIWithTez.java | 2 -
.../operation/TestOperationLoggingLayout.java | 2 -
.../hadoop/hive/metastore/HiveMetaStore.java | 18 ++
.../hive/metastore/HouseKeeperService.java | 39 ++++
.../hadoop/hive/metastore/txn/TxnHandler.java | 153 +++++++++++-----
.../hive/metastore/txn/TestTxnHandler.java | 7 +-
.../java/org/apache/hadoop/hive/ql/Driver.java | 15 +-
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 183 ++++++++++++++-----
.../hadoop/hive/ql/io/orc/OrcInputFormat.java | 97 +++++++---
.../apache/hadoop/hive/ql/io/orc/OrcSplit.java | 25 ++-
.../hadoop/hive/ql/lib/DefaultGraphWalker.java | 2 +-
.../calcite/reloperators/HiveFilter.java | 2 +-
.../calcite/translator/ExprNodeConverter.java | 26 ---
.../apache/hadoop/hive/ql/parse/ASTNode.java | 18 +-
.../hive/ql/txn/AcidHouseKeeperService.java | 104 +++++++++++
.../hive/ql/txn/compactor/CompactorMR.java | 19 +-
.../hadoop/hive/ql/txn/compactor/Initiator.java | 10 +-
.../hadoop/hive/ql/txn/compactor/Worker.java | 2 +-
.../apache/hadoop/hive/ql/TestTxnCommands.java | 21 +++
.../apache/hadoop/hive/ql/TestTxnCommands2.java | 1 +
.../apache/hadoop/hive/ql/io/TestAcidUtils.java | 27 +--
.../hive/ql/io/orc/TestInputOutputFormat.java | 6 +-
.../hive/ql/lockmgr/TestDbTxnManager.java | 35 +++-
.../hadoop/hive/shims/Hadoop20SShims.java | 11 ++
.../apache/hadoop/hive/shims/Hadoop23Shims.java | 66 +++++++
.../apache/hadoop/hive/shims/HadoopShims.java | 15 ++
32 files changed, 782 insertions(+), 219 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/9d9dd72a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/9d9dd72a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/9d9dd72a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
[5/7] hive git commit: HIVE-11575: Fix test failures in master due to
log4j changes (Prasanth Jayachandran reviewed by Sergey Shelukhin)
Posted by se...@apache.org.
HIVE-11575: Fix test failures in master due to log4j changes (Prasanth Jayachandran reviewed by Sergey Shelukhin)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e0594099
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e0594099
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e0594099
Branch: refs/heads/hbase-metastore
Commit: e0594099c2ea1652e49ff041c6dfb569f47a5912
Parents: 9d8515d
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Mon Aug 17 13:47:09 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Mon Aug 17 13:47:09 2015 -0700
----------------------------------------------------------------------
data/conf/hive-log4j2.xml | 5 ++---
data/conf/hive-site.xml | 6 ------
.../service/cli/operation/TestOperationLoggingAPIWithMr.java | 2 --
.../service/cli/operation/TestOperationLoggingAPIWithTez.java | 2 --
.../hive/service/cli/operation/TestOperationLoggingLayout.java | 2 --
5 files changed, 2 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/e0594099/data/conf/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/data/conf/hive-log4j2.xml b/data/conf/hive-log4j2.xml
index 51173a0..11c8e79 100644
--- a/data/conf/hive-log4j2.xml
+++ b/data/conf/hive-log4j2.xml
@@ -25,7 +25,6 @@
<Property name="hive.root.logger">DRFA</Property>
<Property name="hive.log.dir">${sys:test.tmp.dir}/log</Property>
<Property name="hive.log.file">hive.log</Property>
- <Property name="hive.ql.log.PerfLogger.level">INFO</Property>
</Properties>
<Appenders>
@@ -95,8 +94,8 @@
<Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
<AppenderRef ref="${sys:hive.root.logger}"/>
</Logger>
- <Logger name="org.apache.hadoop.hive.ql.log.PerfLogger" level="${sys:hive.ql.log.PerfLogger.level}" additivity="false">
- <AppenderRef ref="${sys:hive.root.logger}"/>
+ <Logger name="org.apache.hadoop.hive.ql.log.PerfLogger" level="${sys:hive.ql.log.PerfLogger.level}">
+ <AppenderRef ref="${sys:hive.ql.log.PerfLogger.logger}"/>
</Logger>
<Logger name="org.apache.hadoop.hive.ql.exec.Operator" level="INFO">
<AppenderRef ref="${sys:hive.root.logger}"/>
http://git-wip-us.apache.org/repos/asf/hive/blob/e0594099/data/conf/hive-site.xml
----------------------------------------------------------------------
diff --git a/data/conf/hive-site.xml b/data/conf/hive-site.xml
index 8f7fb28..a58017e 100644
--- a/data/conf/hive-site.xml
+++ b/data/conf/hive-site.xml
@@ -241,12 +241,6 @@
</property>
<property>
- <name>hive.ql.log.PerfLogger.level</name>
- <value>WARN,DRFA</value>
- <description>Used to change the perflogger level</description>
-</property>
-
-<property>
<name>hive.fetch.task.conversion</name>
<value>minimal</value>
</property>
http://git-wip-us.apache.org/repos/asf/hive/blob/e0594099/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java
index 920b563..0155b75 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java
@@ -62,8 +62,6 @@ public class TestOperationLoggingAPIWithMr extends OperationLoggingAPITestBase{
};
hiveConf = new HiveConf();
hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose");
- // We need to set the below parameter to test performance level logging
- hiveConf.set("hive.ql.log.PerfLogger.level", "INFO,DRFA");
miniHS2 = new MiniHS2(hiveConf);
confOverlay = new HashMap<String, String>();
confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
http://git-wip-us.apache.org/repos/asf/hive/blob/e0594099/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java
index 31f34b2..ab29861 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java
@@ -40,8 +40,6 @@ public class TestOperationLoggingAPIWithTez extends OperationLoggingAPITestBase
};
hiveConf = new HiveConf();
hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose");
- // We need to set the below parameter to test performance level logging
- hiveConf.set("hive.ql.log.PerfLogger.level", "INFO,DRFA");
// Change the engine to tez
hiveConf.setVar(ConfVars.HIVE_EXECUTION_ENGINE, "tez");
// Set tez execution summary to false.
http://git-wip-us.apache.org/repos/asf/hive/blob/e0594099/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
index 93c16de..56f6a31 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
@@ -39,8 +39,6 @@ public class TestOperationLoggingLayout {
tableName = "TestOperationLoggingLayout_table";
hiveConf = new HiveConf();
hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "execution");
- // We need to set the below parameter to test performance level logging
- hiveConf.set("hive.ql.log.PerfLogger.level", "INFO,DRFA");
miniHS2 = new MiniHS2(hiveConf);
confOverlay = new HashMap<String, String>();
confOverlay.put(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
[2/7] hive git commit: HIVE-11557: CBO (Calcite Return Path): Convert
to flat AND/OR (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Posted by se...@apache.org.
HIVE-11557: CBO (Calcite Return Path): Convert to flat AND/OR (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e8329ee0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e8329ee0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e8329ee0
Branch: refs/heads/hbase-metastore
Commit: e8329ee0068dbfe2ca92399d0109f308d6cf7541
Parents: 147347a
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Sat Aug 15 11:11:42 2015 +0300
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Sat Aug 15 11:11:42 2015 +0300
----------------------------------------------------------------------
.../calcite/translator/ExprNodeConverter.java | 26 --------------------
1 file changed, 26 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/e8329ee0/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index 00bf009..ec22f1a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
import java.math.BigDecimal;
import java.sql.Date;
import java.sql.Timestamp;
-import java.util.ArrayList;
import java.util.Calendar;
import java.util.LinkedList;
import java.util.List;
@@ -43,11 +42,9 @@ import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.type.SqlTypeUtil;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.common.type.HiveChar;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
-import org.apache.hadoop.hive.common.type.HiveVarchar;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.RexVisitor;
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.Schema;
@@ -139,29 +136,6 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
&& SqlTypeUtil.equalSansNullability(dTFactory, call.getType(),
call.operands.get(0).getType())) {
return args.get(0);
- } else if (ASTConverter.isFlat(call)) {
- // If Expr is flat (and[p,q,r,s] or[p,q,r,s]) then recursively build the
- // exprnode
- GenericUDF hiveUdf = SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType(), 2);
- ArrayList<ExprNodeDesc> tmpExprArgs = new ArrayList<ExprNodeDesc>();
- tmpExprArgs.addAll(args.subList(0, 2));
- try {
- gfDesc = ExprNodeGenericFuncDesc.newInstance(hiveUdf, tmpExprArgs);
- } catch (UDFArgumentException e) {
- LOG.error(e);
- throw new RuntimeException(e);
- }
- for (int i = 2; i < call.operands.size(); i++) {
- tmpExprArgs = new ArrayList<ExprNodeDesc>();
- tmpExprArgs.add(gfDesc);
- tmpExprArgs.add(args.get(i));
- try {
- gfDesc = ExprNodeGenericFuncDesc.newInstance(hiveUdf, tmpExprArgs);
- } catch (UDFArgumentException e) {
- LOG.error(e);
- throw new RuntimeException(e);
- }
- }
} else {
GenericUDF hiveUdf = SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType(),
args.size());
[4/7] hive git commit: HIVE-11490 : Lazily call
ASTNode::toStringTree() after tree modification (Hari Subramaniyan,
reviewed by Ashutosh Chauhan)
Posted by se...@apache.org.
HIVE-11490 : Lazily call ASTNode::toStringTree() after tree modification (Hari Subramaniyan, reviewed by Ashutosh Chauhan)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9d8515df
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9d8515df
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9d8515df
Branch: refs/heads/hbase-metastore
Commit: 9d8515df6ff722cd81b9b42a582c422adeac8849
Parents: 97a6cd3
Author: Hari Subramaniyan <ha...@apache.org>
Authored: Mon Aug 17 11:32:06 2015 -0700
Committer: Hari Subramaniyan <ha...@apache.org>
Committed: Mon Aug 17 11:32:06 2015 -0700
----------------------------------------------------------------------
.../hadoop/hive/ql/lib/DefaultGraphWalker.java | 2 +-
.../org/apache/hadoop/hive/ql/parse/ASTNode.java | 18 +++++++++---------
2 files changed, 10 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/9d8515df/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java
index cf9131d..583c113 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultGraphWalker.java
@@ -108,7 +108,7 @@ public class DefaultGraphWalker implements GraphWalker {
while (toWalk.size() > 0) {
Node nd = toWalk.remove(0);
walk(nd);
- if (nodeOutput != null) {
+ if (nodeOutput != null && getDispatchedList().contains(nd)) {
nodeOutput.put(nd, retMap.get(nd));
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/9d8515df/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
index 136d481..b96e2eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
@@ -143,9 +143,11 @@ public class ASTNode extends CommonTree implements Node,Serializable {
retNode = (ASTNode) retNode.parent;
}
rootNode=retNode;
- rootNode.astStr = new StringBuffer();
- rootNode.toStringTree(rootNode);
- rootNode.isValidASTStr = true;
+ if (!rootNode.isValidASTStr) {
+ rootNode.astStr = new StringBuffer();
+ rootNode.toStringTree(rootNode);
+ rootNode.isValidASTStr = true;
+ }
return retNode;
}
@@ -159,9 +161,6 @@ public class ASTNode extends CommonTree implements Node,Serializable {
rootNode.astStr = null;
rootNode.isValidASTStr = false;
}
- // The root might have changed because of tree modifications.
- // Compute the new root for this tree and set the astStr.
- getRootNodeWithValidASTStr(false);
}
private int getMemoizedStringLen() {
@@ -225,9 +224,10 @@ public class ASTNode extends CommonTree implements Node,Serializable {
@Override
public String toStringTree() {
- // The tree modifier functions invalidate the old astStr, rootNode, etc.
- // Hence, we can use the memoized root node and string values here.
- ASTNode rootNode = (ASTNode)this.getRootNodeWithValidASTStr(true);
+
+ // The root might have changed because of tree modifications.
+ // Compute the new root for this tree and set the astStr.
+ getRootNodeWithValidASTStr(true);
// If rootNotModified is false, then startIndx and endIndx will be stale.
if (startIndx >= 0 && endIndx <= rootNode.getMemoizedStringLen()) {