You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by an...@apache.org on 2017/11/16 10:23:30 UTC
phoenix git commit: PHOENIX-4368 Fix MapReduce related classes
Repository: phoenix
Updated Branches:
refs/heads/5.x-HBase-2.0 6a45c40fc -> eba5459c5
PHOENIX-4368 Fix MapReduce related classes
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/eba5459c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/eba5459c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/eba5459c
Branch: refs/heads/5.x-HBase-2.0
Commit: eba5459c537bde41e32c4d096681d674a9c21c1a
Parents: 6a45c40
Author: Ankit Singhal <an...@gmail.com>
Authored: Thu Nov 16 15:53:22 2017 +0530
Committer: Ankit Singhal <an...@gmail.com>
Committed: Thu Nov 16 15:53:22 2017 +0530
----------------------------------------------------------------------
.../phoenix/mapreduce/AbstractBulkLoadTool.java | 12 +--
.../mapreduce/FormatToBytesWritableMapper.java | 2 -
.../mapreduce/FormatToKeyValueReducer.java | 5 +-
.../ImportPreUpsertKeyValueProcessor.java | 5 +-
.../mapreduce/MultiHfileOutputFormat.java | 77 ++++++++++----------
.../phoenix/mapreduce/PhoenixInputFormat.java | 4 +-
.../mapreduce/index/IndexScrutinyTool.java | 5 +-
.../phoenix/mapreduce/index/IndexTool.java | 6 +-
8 files changed, 54 insertions(+), 62 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eba5459c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index 90a69af..13c7ab6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -17,7 +17,6 @@
*/
package org.apache.phoenix.mapreduce;
-import java.io.IOException;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
@@ -30,14 +29,13 @@ import java.util.UUID;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
@@ -46,14 +44,12 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
-import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
-import org.apache.phoenix.index.IndexMaintainer;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.jdbc.PhoenixDriver;
@@ -65,7 +61,6 @@ import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.util.ColumnInfo;
import org.apache.phoenix.util.IndexUtil;
-import org.apache.phoenix.util.MetaDataUtil;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.SchemaUtil;
@@ -73,7 +68,6 @@ import org.apache.phoenix.util.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
@@ -131,7 +125,7 @@ public abstract class AbstractBulkLoadTool extends Configured implements Tool {
Options options = getOptions();
- CommandLineParser parser = new PosixParser();
+ CommandLineParser parser = new DefaultParser();
CommandLine cmdLine = null;
try {
cmdLine = parser.parse(options, args);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eba5459c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
index 3925bdb..b44d333 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
@@ -33,7 +33,6 @@ import javax.annotation.Nullable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@@ -151,7 +150,6 @@ public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWri
preUpdateProcessor = PhoenixConfigurationUtil.loadPreUpsertProcessor(conf);
}
- @SuppressWarnings("deprecation")
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException,
InterruptedException {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eba5459c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
index 72af1a7..eb417a7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
@@ -29,9 +29,9 @@ import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.io.WritableUtils;
@@ -55,7 +55,6 @@ import org.slf4j.LoggerFactory;
/**
* Reducer class for the bulkload jobs.
- * Performs similar functionality to {@link KeyValueSortReducer}
*/
public class FormatToKeyValueReducer
extends Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue> {
@@ -139,7 +138,7 @@ public class FormatToKeyValueReducer
protected void reduce(TableRowkeyPair key, Iterable<ImmutableBytesWritable> values,
Reducer<TableRowkeyPair, ImmutableBytesWritable, TableRowkeyPair, KeyValue>.Context context)
throws IOException, InterruptedException {
- TreeSet<KeyValue> map = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
+ TreeSet<KeyValue> map = new TreeSet<KeyValue>(CellComparatorImpl.COMPARATOR);
for (ImmutableBytesWritable aggregatedArray : values) {
DataInputStream input = new DataInputStream(new ByteArrayInputStream(aggregatedArray.get()));
while (input.available() != 0) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eba5459c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
index 22d40d4..aa76572 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/ImportPreUpsertKeyValueProcessor.java
@@ -17,11 +17,10 @@
*/
package org.apache.phoenix.mapreduce;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.KeyValue;
-
import java.util.List;
+import org.apache.hadoop.hbase.Cell;
+
/**
* A listener hook to process KeyValues that are being written to HFiles for bulk import.
* Implementing this interface and configuring it via the {@link
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eba5459c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index bb38923..e9891df 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -17,11 +17,15 @@
*/
package org.apache.phoenix.mapreduce;
+import static org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TASK_KEY;
+import static org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TIME_KEY;
+import static org.apache.hadoop.hbase.regionserver.HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY;
+import static org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY;
+
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
-import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -34,31 +38,31 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
import org.apache.hadoop.hbase.mapreduce.KeyValueSerialization;
import org.apache.hadoop.hbase.mapreduce.MutationSerialization;
import org.apache.hadoop.hbase.mapreduce.ResultSerialization;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.SequenceFile;
@@ -72,7 +76,6 @@ import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair;
import org.apache.phoenix.mapreduce.bulkload.TargetTableRef;
import org.apache.phoenix.mapreduce.bulkload.TargetTableRefFunctions;
import org.apache.phoenix.util.EnvironmentEdgeManager;
-import org.apache.phoenix.util.PhoenixKeyValueUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -129,7 +132,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
// Invented config. Add to hbase-*.xml if other than default compression.
final String defaultCompressionStr = conf.get("hfile.compression",
Compression.Algorithm.NONE.getName());
- final Algorithm defaultCompression = AbstractHFileWriter
+ final Algorithm defaultCompression = HFileWriterImpl
.compressionByName(defaultCompressionStr);
final boolean compactionExclude = conf.getBoolean(
"hbase.mapreduce.hfileoutputformat.compaction.exclude", false);
@@ -139,13 +142,13 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
private final Map<byte [], WriterLength> writers =
new TreeMap<byte [], WriterLength>(Bytes.BYTES_COMPARATOR);
private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY;
- private final byte [] now = Bytes.toBytes(EnvironmentEdgeManager.currentTimeMillis());
+ private final long now = EnvironmentEdgeManager.currentTimeMillis();
private boolean rollRequested = false;
@Override
public void write(TableRowkeyPair row, V cell)
throws IOException {
- KeyValue kv = KeyValueUtil.maybeCopyCell(cell);
+ Cell kv = cell;
// null input == user explicitly wants to flush
if (row == null && kv == null) {
rollWriters();
@@ -155,7 +158,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
// phoenix-2216: start : extract table name from the rowkey
String tableName = row.getTableName();
byte [] rowKey = row.getRowkey().get();
- long length = kv.getLength();
+ int length = (CellUtil.estimatedSerializedSizeOf(kv)) - Bytes.SIZEOF_INT;
byte [] family = CellUtil.cloneFamily(kv);
byte[] tableAndFamily = join(tableName, Bytes.toString(family));
WriterLength wl = this.writers.get(tableAndFamily);
@@ -188,7 +191,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
}
// we now have the proper WAL writer. full steam ahead
- kv.updateLatestStamp(this.now);
+ CellUtil.setTimestamp(cell,this.now);
wl.writer.append(kv);
wl.written += length;
@@ -258,9 +261,9 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
contextBuilder.withDataBlockEncoding(encoding);
HFileContext hFileContext = contextBuilder.build();
- wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
+ wl.writer = new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs)
.withOutputDir(familydir).withBloomType(bloomType)
- .withComparator(KeyValue.COMPARATOR)
+ .withComparator(CellComparatorImpl.COMPARATOR)
.withFileContext(hFileContext).build();
// join and put it in the writers map .
@@ -272,15 +275,15 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
return wl;
}
- private void close(final StoreFile.Writer w) throws IOException {
+ private void close(final StoreFileWriter w) throws IOException {
if (w != null) {
- w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
+ w.appendFileInfo(BULKLOAD_TIME_KEY,
Bytes.toBytes(EnvironmentEdgeManager.currentTimeMillis()));
- w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
+ w.appendFileInfo(BULKLOAD_TASK_KEY,
Bytes.toBytes(context.getTaskAttemptID().toString()));
- w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY,
+ w.appendFileInfo(MAJOR_COMPACTION_KEY,
Bytes.toBytes(true));
- w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY,
+ w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY,
Bytes.toBytes(compactionExclude));
w.appendTrackedTimestampsToMetadata();
w.close();
@@ -301,7 +304,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
*/
static class WriterLength {
long written = 0;
- StoreFile.Writer writer = null;
+ StoreFileWriter writer = null;
}
/**
@@ -330,7 +333,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
}
Map<byte[], String> stringMap = createFamilyConfValueMap(tableConfigs,COMPRESSION_FAMILIES_CONF_KEY);
for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
- Algorithm algorithm = AbstractHFileWriter.compressionByName(e.getValue());
+ Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue());
compressionMap.put(e.getKey(), algorithm);
}
return compressionMap;
@@ -520,7 +523,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
@VisibleForTesting
- static String configureCompression(HTableDescriptor tableDescriptor)
+ static String configureCompression(TableDescriptor tableDescriptor)
throws UnsupportedEncodingException {
StringBuilder compressionConfigValue = new StringBuilder();
@@ -528,9 +531,9 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
// could happen with mock table instance
return compressionConfigValue.toString();
}
- Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
+ ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies();
int i = 0;
- for (HColumnDescriptor familyDescriptor : families) {
+ for (ColumnFamilyDescriptor familyDescriptor : families) {
if (i++ > 0) {
compressionConfigValue.append('&');
}
@@ -538,7 +541,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
familyDescriptor.getNameAsString(), "UTF-8"));
compressionConfigValue.append('=');
compressionConfigValue.append(URLEncoder.encode(
- familyDescriptor.getCompression().getName(), "UTF-8"));
+ familyDescriptor.getCompressionType().getName(), "UTF-8"));
}
return compressionConfigValue.toString();
}
@@ -553,16 +556,16 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
* on failure to read column family descriptors
*/
@VisibleForTesting
- static String configureBlockSize(HTableDescriptor tableDescriptor)
+ static String configureBlockSize(TableDescriptor tableDescriptor)
throws UnsupportedEncodingException {
StringBuilder blockSizeConfigValue = new StringBuilder();
if (tableDescriptor == null) {
// could happen with mock table instance
return blockSizeConfigValue.toString();
}
- Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
+ ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies();
int i = 0;
- for (HColumnDescriptor familyDescriptor : families) {
+ for (ColumnFamilyDescriptor familyDescriptor : families) {
if (i++ > 0) {
blockSizeConfigValue.append('&');
}
@@ -584,7 +587,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
* @throws IOException
* on failure to read column family descriptors
*/
- static String configureBloomType(HTableDescriptor tableDescriptor)
+ static String configureBloomType(TableDescriptor tableDescriptor)
throws UnsupportedEncodingException {
StringBuilder bloomTypeConfigValue = new StringBuilder();
@@ -593,9 +596,9 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
// could happen with mock table instance
return bloomTypeConfigValue.toString();
}
- Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
+ ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies();
int i = 0;
- for (HColumnDescriptor familyDescriptor : families) {
+ for (ColumnFamilyDescriptor familyDescriptor : families) {
if (i++ > 0) {
bloomTypeConfigValue.append('&');
}
@@ -604,7 +607,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
bloomTypeConfigValue.append('=');
String bloomType = familyDescriptor.getBloomFilterType().toString();
if (bloomType == null) {
- bloomType = HColumnDescriptor.DEFAULT_BLOOMFILTER;
+ bloomType = ColumnFamilyDescriptorBuilder.DEFAULT_BLOOMFILTER.toString();
}
bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8"));
}
@@ -620,7 +623,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
* @throws IOException
* on failure to read column family descriptors
*/
- static String configureDataBlockEncoding(HTableDescriptor tableDescriptor) throws UnsupportedEncodingException {
+ static String configureDataBlockEncoding(TableDescriptor tableDescriptor) throws UnsupportedEncodingException {
StringBuilder dataBlockEncodingConfigValue = new StringBuilder();
@@ -628,9 +631,9 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
// could happen with mock table instance
return dataBlockEncodingConfigValue.toString();
}
- Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
+ ColumnFamilyDescriptor[] families = tableDescriptor.getColumnFamilies();
int i = 0;
- for (HColumnDescriptor familyDescriptor : families) {
+ for (ColumnFamilyDescriptor familyDescriptor : families) {
if (i++ > 0) {
dataBlockEncodingConfigValue.append('&');
}
@@ -671,7 +674,7 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
getRegionStartKeys(tableName,
hbaseConn.getRegionLocator(TableName.valueOf(tableName)));
tablesStartKeys.addAll(startKeys);
- HTableDescriptor tableDescriptor = hbaseConn.getTable(TableName.valueOf(tableName)).getTableDescriptor();
+ TableDescriptor tableDescriptor = hbaseConn.getTable(TableName.valueOf(tableName)).getDescriptor();
String compressionConfig = configureCompression(tableDescriptor);
String bloomTypeConfig = configureBloomType(tableDescriptor);
String blockSizeConfig = configureBlockSize(tableDescriptor);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eba5459c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index 2871809..ede6ed9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.mapreduce.RegionSizeCalculator;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.RegionSizeCalculator;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
@@ -114,7 +114,7 @@ public class PhoenixInputFormat<T extends DBWritable> extends InputFormat<NullWr
// Get the region size
long regionSize = sizeCalculator.getRegionSize(
- location.getRegionInfo().getRegionName()
+ location.getRegion().getRegionName()
);
// Generate splits based off statistics, or just region splits?
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eba5459c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
index cae89ff..124ee75 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexScrutinyTool.java
@@ -26,11 +26,11 @@ import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@@ -157,7 +157,7 @@ public class IndexScrutinyTool extends Configured implements Tool {
private CommandLine parseOptions(String[] args) {
final Options options = getOptions();
- CommandLineParser parser = new PosixParser();
+ CommandLineParser parser = new DefaultParser();
CommandLine cmdLine = null;
try {
cmdLine = parser.parse(options, args);
@@ -302,7 +302,6 @@ public class IndexScrutinyTool extends Configured implements Tool {
// root dir not a subdirectory of hbase dir
Path rootDir = new Path("hdfs:///index-snapshot-dir");
FSUtils.setRootDir(configuration, rootDir);
- Path restoreDir = new Path(FSUtils.getRootDir(configuration), "restore-dir");
// set input for map reduce job using hbase snapshots
//PhoenixMapReduceUtil.setInput(job, PhoenixIndexDBWritable.class, snapshotName,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/eba5459c/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index 2aa3d3e..3f7d3b3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -32,11 +32,11 @@ import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
-import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
@@ -151,7 +151,7 @@ public class IndexTool extends Configured implements Tool {
final Options options = getOptions();
- CommandLineParser parser = new PosixParser();
+ CommandLineParser parser = new DefaultParser();
CommandLine cmdLine = null;
try {
cmdLine = parser.parse(options, args);