You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by da...@apache.org on 2015/09/22 07:04:13 UTC
[42/50] [abbrv] hive git commit: HIVE-11654 After HIVE-10289,
HBase metastore tests failing (Daniel Dai via gates)
HIVE-11654 After HIVE-10289, HBase metastore tests failing (Daniel Dai via gates)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e150af94
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e150af94
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e150af94
Branch: refs/heads/master
Commit: e150af9457079c87c267094f3861528286e951ea
Parents: f014f0d
Author: Alan Gates <ga...@hortonworks.com>
Authored: Fri Aug 28 10:48:35 2015 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Fri Aug 28 10:48:35 2015 -0700
----------------------------------------------------------------------
.../TestHBaseAggrStatsCacheIntegration.java | 4 +-
.../hive/metastore/hbase/HBaseReadWrite.java | 36 ++++-----
.../hadoop/hive/metastore/hbase/HBaseStore.java | 79 ++++++++++++++------
.../hadoop/hive/metastore/hbase/HBaseUtils.java | 36 ++++-----
.../hive/metastore/hbase/TestHBaseStore.java | 73 +++++++++++++-----
5 files changed, 144 insertions(+), 84 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/e150af94/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java
index ad76b2e..899fee1 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java
@@ -140,7 +140,7 @@ public class TestHBaseAggrStatsCacheIntegration extends HBaseIntegrationTests {
Checker statChecker = new Checker() {
@Override
public void checkStats(AggrStats aggrStats) throws Exception {
- Assert.assertEquals(4, aggrStats.getPartsFound());
+ Assert.assertEquals(2, aggrStats.getPartsFound());
Assert.assertEquals(2, aggrStats.getColStatsSize());
ColumnStatisticsObj cso = aggrStats.getColStats().get(0);
Assert.assertEquals("col1", cso.getColName());
@@ -152,7 +152,7 @@ public class TestHBaseAggrStatsCacheIntegration extends HBaseIntegrationTests {
cso = aggrStats.getColStats().get(1);
Assert.assertEquals("col2", cso.getColName());
- Assert.assertEquals("string", cso.getColType());
+ Assert.assertEquals("varchar", cso.getColType());
StringColumnStatsData scsd = cso.getStatsData().getStringStats();
Assert.assertEquals(10.3, scsd.getAvgColLen(), 0.1);
Assert.assertEquals(2000, scsd.getMaxColLen());
http://git-wip-us.apache.org/repos/asf/hive/blob/e150af94/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
index 8a1448c..d38c561 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
@@ -550,7 +550,7 @@ public class HBaseReadWrite {
* @param newPart partitiion to replace it with
* @throws IOException
*/
- void replacePartition(Partition oldPart, Partition newPart) throws IOException {
+ void replacePartition(Partition oldPart, Partition newPart, List<String> partTypes) throws IOException {
byte[] hash;
byte[] oldHash = HBaseUtils.hashStorageDescriptor(oldPart.getSd(), md);
byte[] newHash = HBaseUtils.hashStorageDescriptor(newPart.getSd(), md);
@@ -565,7 +565,7 @@ public class HBaseReadWrite {
store(PART_TABLE, serialized[0], CATALOG_CF, CATALOG_COL, serialized[1]);
partCache.put(newPart.getDbName(), newPart.getTableName(), newPart);
if (!oldPart.getTableName().equals(newPart.getTableName())) {
- deletePartition(oldPart.getDbName(), oldPart.getTableName(), oldPart.getValues());
+ deletePartition(oldPart.getDbName(), oldPart.getTableName(), partTypes, oldPart.getValues());
}
}
@@ -592,7 +592,7 @@ public class HBaseReadWrite {
conn.flush(htab);
}
- void replacePartitions(List<Partition> oldParts, List<Partition> newParts) throws IOException {
+ void replacePartitions(List<Partition> oldParts, List<Partition> newParts, List<String> oldPartTypes) throws IOException {
if (oldParts.size() != newParts.size()) {
throw new RuntimeException("Number of old and new partitions must match.");
}
@@ -616,7 +616,7 @@ public class HBaseReadWrite {
partCache.put(newParts.get(i).getDbName(), newParts.get(i).getTableName(), newParts.get(i));
if (!newParts.get(i).getTableName().equals(oldParts.get(i).getTableName())) {
// We need to remove the old record as well.
- deletePartition(oldParts.get(i).getDbName(), oldParts.get(i).getTableName(),
+ deletePartition(oldParts.get(i).getDbName(), oldParts.get(i).getTableName(), oldPartTypes,
oldParts.get(i).getValues(), false);
}
}
@@ -710,16 +710,17 @@ public class HBaseReadWrite {
i < table.getPartitionKeys().size() && i < partVals.size(); i++) {
if ("*".equals(partVals.get(i))) {
+ PartitionKeyComparator.Operator op = new PartitionKeyComparator.Operator(
+ PartitionKeyComparator.Operator.Type.LIKE,
+ table.getPartitionKeys().get(i).getName(),
+ ".*");
+ ops.add(op);
+ } else {
PartitionKeyComparator.Range range = new PartitionKeyComparator.Range(
table.getPartitionKeys().get(i).getName(),
new PartitionKeyComparator.Mark(partVals.get(i), true),
new PartitionKeyComparator.Mark(partVals.get(i), true));
ranges.add(range);
- } else {
- PartitionKeyComparator.Operator op = new PartitionKeyComparator.Operator(
- PartitionKeyComparator.Operator.Type.LIKE,
- table.getPartitionKeys().get(i).getName(),
- ".*");
}
}
}
@@ -771,20 +772,20 @@ public class HBaseReadWrite {
* partition columns they are values for
* @throws IOException
*/
- void deletePartition(String dbName, String tableName, List<String> partVals) throws IOException {
- deletePartition(dbName, tableName, partVals, true);
+ void deletePartition(String dbName, String tableName, List<String> partTypes,
+ List<String> partVals) throws IOException {
+ deletePartition(dbName, tableName, partTypes, partVals, true);
}
- private void deletePartition(String dbName, String tableName, List<String> partVals,
- boolean decrementRefCnt) throws IOException {
+ private void deletePartition(String dbName, String tableName, List<String> partTypes,
+ List<String> partVals, boolean decrementRefCnt) throws IOException {
// Find the partition so I can get the storage descriptor and drop it
partCache.remove(dbName, tableName, partVals);
if (decrementRefCnt) {
Partition p = getPartition(dbName, tableName, partVals, false);
decrementStorageDescriptorRefCount(p.getSd());
}
- byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName,
- HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals);
+ byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, partTypes, partVals);
delete(PART_TABLE, key, null, null);
}
@@ -1610,10 +1611,9 @@ public class HBaseReadWrite {
// recontruct the key. We have to pull the dbName and tableName out of the key to
// find the partition values.
byte[] key = results[i].getRow();
- List<String> reconstructedKey = HBaseUtils.parseKey(key, HBaseUtils.getPartitionNames(getTable(dbName, tblName).getPartitionKeys()),
- HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()));
List<String> reconstructedPartVals =
- reconstructedKey.subList(2, reconstructedKey.size());
+ HBaseUtils.deserializePartitionKey(getTable(dbName, tblName).getPartitionKeys(), key,
+ staticConf);
String partName = valToPartMap.get(reconstructedPartVals);
assert partName != null;
csd.setIsTblLevel(false);
http://git-wip-us.apache.org/repos/asf/hive/blob/e150af94/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index 568a347..4cda9cc 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -139,9 +139,10 @@ public class HBaseStore implements RawStore {
boolean commit = false;
openTransaction();
try {
-
+ Database dbCopy = db.deepCopy();
+ dbCopy.setName(HiveStringUtils.normalizeIdentifier(dbCopy.getName()));
// HiveMetaStore already checks for existence of the database, don't recheck
- getHBase().putDb(db);
+ getHBase().putDb(dbCopy);
commit = true;
} catch (IOException e) {
LOG.error("Unable to create database ", e);
@@ -195,7 +196,9 @@ public class HBaseStore implements RawStore {
boolean commit = false;
openTransaction();
try {
- getHBase().putDb(db);
+ Database dbCopy = db.deepCopy();
+ dbCopy.setName(HiveStringUtils.normalizeIdentifier(dbCopy.getName()));
+ getHBase().putDb(dbCopy);
commit = true;
return true;
} catch (IOException e) {
@@ -211,7 +214,8 @@ public class HBaseStore implements RawStore {
boolean commit = false;
openTransaction();
try {
- List<Database> dbs = getHBase().scanDatabases(likeToRegex(pattern));
+ List<Database> dbs = getHBase().scanDatabases(
+ pattern==null?null:HiveStringUtils.normalizeIdentifier(likeToRegex(pattern)));
List<String> dbNames = new ArrayList<String>(dbs.size());
for (Database db : dbs) dbNames.add(db.getName());
commit = true;
@@ -250,7 +254,10 @@ public class HBaseStore implements RawStore {
openTransaction();
// HiveMetaStore above us checks if the table already exists, so we can blindly store it here.
try {
- getHBase().putTable(tbl);
+ Table tblCopy = tbl.deepCopy();
+ tblCopy.setDbName(HiveStringUtils.normalizeIdentifier(tblCopy.getDbName()));
+ tblCopy.setTableName(HiveStringUtils.normalizeIdentifier(tblCopy.getTableName()));
+ getHBase().putTable(tblCopy);
commit = true;
} catch (IOException e) {
LOG.error("Unable to create table ", e);
@@ -303,7 +310,10 @@ public class HBaseStore implements RawStore {
boolean commit = false;
openTransaction();
try {
- getHBase().putPartition(part);
+ Partition partCopy = part.deepCopy();
+ partCopy.setDbName(HiveStringUtils.normalizeIdentifier(part.getDbName()));
+ partCopy.setTableName(HiveStringUtils.normalizeIdentifier(part.getTableName()));
+ getHBase().putPartition(partCopy);
commit = true;
return true;
} catch (IOException e) {
@@ -320,7 +330,14 @@ public class HBaseStore implements RawStore {
boolean commit = false;
openTransaction();
try {
- getHBase().putPartitions(parts);
+ List<Partition> partsCopy = new ArrayList<Partition>();
+ for (int i=0;i<parts.size();i++) {
+ Partition partCopy = parts.get(i).deepCopy();
+ partCopy.setDbName(HiveStringUtils.normalizeIdentifier(partCopy.getDbName()));
+ partCopy.setTableName(HiveStringUtils.normalizeIdentifier(partCopy.getTableName()));
+ partsCopy.add(i, partCopy);
+ }
+ getHBase().putPartitions(partsCopy);
commit = true;
return true;
} catch (IOException e) {
@@ -383,11 +400,12 @@ public class HBaseStore implements RawStore {
boolean commit = false;
openTransaction();
try {
- getHBase().deletePartition(HiveStringUtils.normalizeIdentifier(dbName),
- HiveStringUtils.normalizeIdentifier(tableName), part_vals);
+ dbName = HiveStringUtils.normalizeIdentifier(dbName);
+ tableName = HiveStringUtils.normalizeIdentifier(tableName);
+ getHBase().deletePartition(dbName, tableName, HBaseUtils.getPartitionKeyTypes(
+ getTable(dbName, tableName).getPartitionKeys()), part_vals);
// Drop any cached stats that reference this partitions
- getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(dbName),
- HiveStringUtils.normalizeIdentifier(tableName),
+ getHBase().getStatsCache().invalidate(dbName, tableName,
buildExternalPartName(dbName, tableName, part_vals));
commit = true;
return true;
@@ -419,26 +437,31 @@ public class HBaseStore implements RawStore {
}
@Override
- public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException,
+ public void alterTable(String dbName, String tableName, Table newTable) throws InvalidObjectException,
MetaException {
boolean commit = false;
openTransaction();
try {
- getHBase().replaceTable(getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbname),
- HiveStringUtils.normalizeIdentifier(name)), newTable);
+ Table newTableCopy = newTable.deepCopy();
+ newTableCopy.setDbName(HiveStringUtils.normalizeIdentifier(newTableCopy.getDbName()));
+ List<String> oldPartTypes = getTable(dbName, tableName).getPartitionKeys()==null?
+ null:HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys());
+ newTableCopy.setTableName(HiveStringUtils.normalizeIdentifier(newTableCopy.getTableName()));
+ getHBase().replaceTable(getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbName),
+ HiveStringUtils.normalizeIdentifier(tableName)), newTableCopy);
if (newTable.getPartitionKeys() != null && newTable.getPartitionKeys().size() > 0
- && !name.equals(newTable.getTableName())) {
+ && !tableName.equals(newTable.getTableName())) {
// They renamed the table, so we need to change each partition as well, since it changes
// the key.
try {
- List<Partition> oldParts = getPartitions(dbname, name, -1);
+ List<Partition> oldParts = getPartitions(dbName, tableName, -1);
List<Partition> newParts = new ArrayList<>(oldParts.size());
for (Partition oldPart : oldParts) {
Partition newPart = oldPart.deepCopy();
newPart.setTableName(newTable.getTableName());
newParts.add(newPart);
}
- getHBase().replacePartitions(oldParts, newParts);
+ getHBase().replacePartitions(oldParts, newParts, oldPartTypes);
} catch (NoSuchObjectException e) {
LOG.debug("No partitions found for old table so not worrying about it");
}
@@ -446,8 +469,8 @@ public class HBaseStore implements RawStore {
}
commit = true;
} catch (IOException e) {
- LOG.error("Unable to alter table " + tableNameForErrorMsg(dbname, name), e);
- throw new MetaException("Unable to alter table " + tableNameForErrorMsg(dbname, name));
+ LOG.error("Unable to alter table " + tableNameForErrorMsg(dbName, tableName), e);
+ throw new MetaException("Unable to alter table " + tableNameForErrorMsg(dbName, tableName));
} finally {
commitOrRoleBack(commit);
}
@@ -459,7 +482,7 @@ public class HBaseStore implements RawStore {
openTransaction();
try {
List<Table> tables = getHBase().scanTables(HiveStringUtils.normalizeIdentifier(dbName),
- likeToRegex(pattern));
+ pattern==null?null:HiveStringUtils.normalizeIdentifier(likeToRegex(pattern)));
List<String> tableNames = new ArrayList<String>(tables.size());
for (Table table : tables) tableNames.add(table.getTableName());
commit = true;
@@ -544,9 +567,13 @@ public class HBaseStore implements RawStore {
boolean commit = false;
openTransaction();
try {
+ Partition new_partCopy = new_part.deepCopy();
+ new_partCopy.setDbName(HiveStringUtils.normalizeIdentifier(new_partCopy.getDbName()));
+ new_partCopy.setTableName(HiveStringUtils.normalizeIdentifier(new_partCopy.getTableName()));
Partition oldPart = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(db_name),
HiveStringUtils.normalizeIdentifier(tbl_name), part_vals);
- getHBase().replacePartition(oldPart, new_part);
+ getHBase().replacePartition(oldPart, new_partCopy, HBaseUtils.getPartitionKeyTypes(
+ getTable(db_name, tbl_name).getPartitionKeys()));
// Drop any cached stats that reference this partitions
getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name),
HiveStringUtils.normalizeIdentifier(tbl_name),
@@ -567,11 +594,19 @@ public class HBaseStore implements RawStore {
boolean commit = false;
openTransaction();
try {
+ List<Partition> new_partsCopy = new ArrayList<Partition>();
+ for (int i=0;i<new_parts.size();i++) {
+ Partition newPartCopy = new_parts.get(i).deepCopy();
+ newPartCopy.setDbName(HiveStringUtils.normalizeIdentifier(newPartCopy.getDbName()));
+ newPartCopy.setTableName(HiveStringUtils.normalizeIdentifier(newPartCopy.getTableName()));
+ new_partsCopy.add(i, newPartCopy);
+ }
List<Partition> oldParts = getHBase().getPartitions(HiveStringUtils.normalizeIdentifier(db_name),
HiveStringUtils.normalizeIdentifier(tbl_name),
HBaseUtils.getPartitionKeyTypes(getTable(HiveStringUtils.normalizeIdentifier(db_name),
HiveStringUtils.normalizeIdentifier(tbl_name)).getPartitionKeys()), part_vals_list);
- getHBase().replacePartitions(oldParts, new_parts);
+ getHBase().replacePartitions(oldParts, new_partsCopy, HBaseUtils.getPartitionKeyTypes(
+ getTable(db_name, tbl_name).getPartitionKeys()));
for (List<String> part_vals : part_vals_list) {
getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name),
HiveStringUtils.normalizeIdentifier(tbl_name),
http://git-wip-us.apache.org/repos/asf/hive/blob/e150af94/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
index cc90a76..1885089 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
@@ -117,24 +117,6 @@ class HBaseUtils {
return protoKey.getBytes(ENCODING);
}
- static List<String> parseKey(byte[] serialized, List<String> partNames, List<String> partTypes) {
- BinarySortableSerDe serDe = new BinarySortableSerDe();
- Properties props = new Properties();
- props.setProperty(serdeConstants.LIST_COLUMNS, "dbName,tableName," + StringUtils.join(partNames, ","));
- props.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,string," + StringUtils.join(partTypes, ","));
- List<String> partVals = null;
- try {
- serDe.initialize(new Configuration(), props);
- List deserializedkeys = ((List)serDe.deserialize(new BytesWritable(serialized)));
- partVals = new ArrayList<String>();
- for (Object deserializedkey : deserializedkeys) {
- partVals.add(deserializedkey.toString());
- }
- } catch (SerDeException e) {
- }
- return partVals;
- }
-
private static HbaseMetastoreProto.Parameters buildParameters(Map<String, String> params) {
List<HbaseMetastoreProto.ParameterEntry> entries = new ArrayList<>();
for (Map.Entry<String, String> e : params.entrySet()) {
@@ -910,7 +892,7 @@ class HBaseUtils {
return k.split(KEY_SEPARATOR_STR);
}
- private static List<String> deserializePartitionKey(List<FieldSchema> partitions, byte[] key,
+ static List<String> deserializePartitionKey(List<FieldSchema> partitions, byte[] key,
Configuration conf) {
StringBuffer names = new StringBuffer();
names.append("dbName,tableName,");
@@ -932,9 +914,19 @@ class HBaseUtils {
serDe.initialize(new Configuration(), props);
List deserializedkeys = ((List)serDe.deserialize(new BytesWritable(key))).subList(2, partitions.size()+2);
List<String> partitionKeys = new ArrayList<String>();
- for (Object deserializedKey : deserializedkeys) {
- partitionKeys.add(deserializedKey!=null?deserializedKey.toString():
- HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME));
+ for (int i=0;i<deserializedkeys.size();i++) {
+ Object deserializedKey = deserializedkeys.get(i);
+ if (deserializedKey==null) {
+ partitionKeys.add(HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME));
+ } else {
+ TypeInfo inputType =
+ TypeInfoUtils.getTypeInfoFromTypeString(partitions.get(i).getType());
+ ObjectInspector inputOI =
+ TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(inputType);
+ Converter converter = ObjectInspectorConverters.getConverter(inputOI,
+ PrimitiveObjectInspectorFactory.javaStringObjectInspector);
+ partitionKeys.add((String)converter.convert(deserializedKey));
+ }
}
return partitionKeys;
} catch (SerDeException e) {
http://git-wip-us.apache.org/repos/asf/hive/blob/e150af94/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
index fac7dcc..b6dfcf3 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java
@@ -96,18 +96,26 @@ public class TestHBaseStore {
}
}
static final long DEFAULT_TIME = System.currentTimeMillis();
+ static final String PART_KEY = "part";
static final String BOOLEAN_COL = "boolCol";
static final String BOOLEAN_TYPE = "boolean";
+ static final String BOOLEAN_VAL = "true";
static final String LONG_COL = "longCol";
static final String LONG_TYPE = "long";
+ static final String INT_TYPE = "int";
+ static final String INT_VAL = "1234";
static final String DOUBLE_COL = "doubleCol";
static final String DOUBLE_TYPE = "double";
+ static final String DOUBLE_VAL = "3.1415";
static final String STRING_COL = "stringCol";
static final String STRING_TYPE = "string";
+ static final String STRING_VAL = "stringval";
static final String BINARY_COL = "binaryCol";
static final String BINARY_TYPE = "binary";
+ static final String BINARY_VAL = "1";
static final String DECIMAL_COL = "decimalCol";
static final String DECIMAL_TYPE = "decimal(5,3)";
+ static final String DECIMAL_VAL = "12.123";
static List<ColumnStatisticsObj> booleanColStatsObjs = new ArrayList<ColumnStatisticsObj>(
NUM_PARTITIONS);
static List<ColumnStatisticsObj> longColStatsObjs = new ArrayList<ColumnStatisticsObj>(
@@ -991,12 +999,13 @@ public class TestHBaseStore {
@Test
public void booleanPartitionStatistics() throws Exception {
- // Add partition stats for: BOOLEAN_COL and partition: {PART_KEYS(0), PART_VALS(0)} to DB
+ createMockTableAndPartition(BOOLEAN_TYPE, BOOLEAN_VAL);
+ // Add partition stats for: BOOLEAN_COL and partition: {PART_KEY, BOOLEAN_VAL} to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
ColumnStatistics stats = new ColumnStatistics();
// Get a default ColumnStatisticsDesc for partition level stats
- ColumnStatisticsDesc desc = getMockPartColStatsDesc(0, 0);
+ ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, BOOLEAN_VAL);
stats.setStatsDesc(desc);
// Get one of the pre-created ColumnStatisticsObj
ColumnStatisticsObj obj = booleanColStatsObjs.get(0);
@@ -1004,7 +1013,7 @@ public class TestHBaseStore {
// Add to DB
stats.addToStatsObj(obj);
List<String> parVals = new ArrayList<String>();
- parVals.add(PART_VALS.get(0));
+ parVals.add(BOOLEAN_VAL);
store.updatePartitionColumnStatistics(stats, parVals);
// Get from DB
List<String> partNames = new ArrayList<String>();
@@ -1033,12 +1042,13 @@ public class TestHBaseStore {
@Test
public void longPartitionStatistics() throws Exception {
- // Add partition stats for: LONG_COL and partition: {PART_KEYS(0), PART_VALS(0)} to DB
+ createMockTableAndPartition(INT_TYPE, INT_VAL);
+ // Add partition stats for: LONG_COL and partition: {PART_KEY, INT_VAL} to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
ColumnStatistics stats = new ColumnStatistics();
// Get a default ColumnStatisticsDesc for partition level stats
- ColumnStatisticsDesc desc = getMockPartColStatsDesc(0, 0);
+ ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, INT_VAL);
stats.setStatsDesc(desc);
// Get one of the pre-created ColumnStatisticsObj
ColumnStatisticsObj obj = longColStatsObjs.get(0);
@@ -1046,7 +1056,7 @@ public class TestHBaseStore {
// Add to DB
stats.addToStatsObj(obj);
List<String> parVals = new ArrayList<String>();
- parVals.add(PART_VALS.get(0));
+ parVals.add(INT_VAL);
store.updatePartitionColumnStatistics(stats, parVals);
// Get from DB
List<String> partNames = new ArrayList<String>();
@@ -1076,12 +1086,13 @@ public class TestHBaseStore {
@Test
public void doublePartitionStatistics() throws Exception {
- // Add partition stats for: DOUBLE_COL and partition: {PART_KEYS(0), PART_VALS(0)} to DB
+ createMockTableAndPartition(DOUBLE_TYPE, DOUBLE_VAL);
+ // Add partition stats for: DOUBLE_COL and partition: {PART_KEY, DOUBLE_VAL} to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
ColumnStatistics stats = new ColumnStatistics();
// Get a default ColumnStatisticsDesc for partition level stats
- ColumnStatisticsDesc desc = getMockPartColStatsDesc(0, 0);
+ ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, DOUBLE_VAL);
stats.setStatsDesc(desc);
// Get one of the pre-created ColumnStatisticsObj
ColumnStatisticsObj obj = doubleColStatsObjs.get(0);
@@ -1089,7 +1100,7 @@ public class TestHBaseStore {
// Add to DB
stats.addToStatsObj(obj);
List<String> parVals = new ArrayList<String>();
- parVals.add(PART_VALS.get(0));
+ parVals.add(DOUBLE_VAL);
store.updatePartitionColumnStatistics(stats, parVals);
// Get from DB
List<String> partNames = new ArrayList<String>();
@@ -1119,12 +1130,13 @@ public class TestHBaseStore {
@Test
public void stringPartitionStatistics() throws Exception {
- // Add partition stats for: STRING_COL and partition: {PART_KEYS(0), PART_VALS(0)} to DB
+ createMockTableAndPartition(STRING_TYPE, STRING_VAL);
+ // Add partition stats for: STRING_COL and partition: {PART_KEY, STRING_VAL} to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
ColumnStatistics stats = new ColumnStatistics();
// Get a default ColumnStatisticsDesc for partition level stats
- ColumnStatisticsDesc desc = getMockPartColStatsDesc(0, 0);
+ ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, STRING_VAL);
stats.setStatsDesc(desc);
// Get one of the pre-created ColumnStatisticsObj
ColumnStatisticsObj obj = stringColStatsObjs.get(0);
@@ -1132,7 +1144,7 @@ public class TestHBaseStore {
// Add to DB
stats.addToStatsObj(obj);
List<String> parVals = new ArrayList<String>();
- parVals.add(PART_VALS.get(0));
+ parVals.add(STRING_VAL);
store.updatePartitionColumnStatistics(stats, parVals);
// Get from DB
List<String> partNames = new ArrayList<String>();
@@ -1162,12 +1174,13 @@ public class TestHBaseStore {
@Test
public void binaryPartitionStatistics() throws Exception {
- // Add partition stats for: BINARY_COL and partition: {PART_KEYS(0), PART_VALS(0)} to DB
+ createMockTableAndPartition(BINARY_TYPE, BINARY_VAL);
+ // Add partition stats for: BINARY_COL and partition: {PART_KEY, BINARY_VAL} to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
ColumnStatistics stats = new ColumnStatistics();
// Get a default ColumnStatisticsDesc for partition level stats
- ColumnStatisticsDesc desc = getMockPartColStatsDesc(0, 0);
+ ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, BINARY_VAL);
stats.setStatsDesc(desc);
// Get one of the pre-created ColumnStatisticsObj
ColumnStatisticsObj obj = binaryColStatsObjs.get(0);
@@ -1175,7 +1188,7 @@ public class TestHBaseStore {
// Add to DB
stats.addToStatsObj(obj);
List<String> parVals = new ArrayList<String>();
- parVals.add(PART_VALS.get(0));
+ parVals.add(BINARY_VAL);
store.updatePartitionColumnStatistics(stats, parVals);
// Get from DB
List<String> partNames = new ArrayList<String>();
@@ -1204,12 +1217,13 @@ public class TestHBaseStore {
@Test
public void decimalPartitionStatistics() throws Exception {
- // Add partition stats for: DECIMAL_COL and partition: {PART_KEYS(0), PART_VALS(0)} to DB
+ createMockTableAndPartition(DECIMAL_TYPE, DECIMAL_VAL);
+ // Add partition stats for: DECIMAL_COL and partition: {PART_KEY, DECIMAL_VAL} to DB
// Because of the way our mock implementation works we actually need to not create the table
// before we set statistics on it.
ColumnStatistics stats = new ColumnStatistics();
// Get a default ColumnStatisticsDesc for partition level stats
- ColumnStatisticsDesc desc = getMockPartColStatsDesc(0, 0);
+ ColumnStatisticsDesc desc = getMockPartColStatsDesc(PART_KEY, DECIMAL_VAL);
stats.setStatsDesc(desc);
// Get one of the pre-created ColumnStatisticsObj
ColumnStatisticsObj obj = decimalColStatsObjs.get(0);
@@ -1217,7 +1231,7 @@ public class TestHBaseStore {
// Add to DB
stats.addToStatsObj(obj);
List<String> parVals = new ArrayList<String>();
- parVals.add(PART_VALS.get(0));
+ parVals.add(DECIMAL_VAL);
store.updatePartitionColumnStatistics(stats, parVals);
// Get from DB
List<String> partNames = new ArrayList<String>();
@@ -1245,6 +1259,25 @@ public class TestHBaseStore {
Assert.assertEquals(decimalData.getNumDVs(), decimalDataFromDB.getNumDVs());
}
+ private Table createMockTableAndPartition(String partType, String partVal) throws Exception {
+ List<FieldSchema> cols = new ArrayList<FieldSchema>();
+ cols.add(new FieldSchema("col1", partType, ""));
+ List<String> vals = new ArrayList<String>();
+ vals.add(partVal);
+ SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+ Map<String, String> params = new HashMap<String, String>();
+ params.put("key", "value");
+ StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17,
+ serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
+ int currentTime = (int)(System.currentTimeMillis() / 1000);
+ Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols,
+ emptyParameters, null, null, null);
+ store.createTable(table);
+ Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd,
+ emptyParameters);
+ store.addPartition(part);
+ return table;
+ }
/**
* Returns a dummy table level ColumnStatisticsDesc with default values
*/
@@ -1260,13 +1293,13 @@ public class TestHBaseStore {
/**
* Returns a dummy partition level ColumnStatisticsDesc
*/
- private ColumnStatisticsDesc getMockPartColStatsDesc(int partKeyIndex, int partValIndex) {
+ private ColumnStatisticsDesc getMockPartColStatsDesc(String partKey, String partVal) {
ColumnStatisticsDesc desc = new ColumnStatisticsDesc();
desc.setLastAnalyzed(DEFAULT_TIME);
desc.setDbName(DB);
desc.setTableName(TBL);
// part1=val1
- desc.setPartName(PART_KEYS.get(partKeyIndex) + PART_KV_SEPARATOR + PART_VALS.get(partValIndex));
+ desc.setPartName(partKey + PART_KV_SEPARATOR + partVal);
desc.setIsTblLevel(false);
return desc;
}