You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by sa...@apache.org on 2016/03/23 20:53:29 UTC
phoenix git commit: Changes to fix index tests
Repository: phoenix
Updated Branches:
refs/heads/encodecolumns 42294c4ba -> ed0907b93
Changes to fix index tests
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ed0907b9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ed0907b9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ed0907b9
Branch: refs/heads/encodecolumns
Commit: ed0907b93026765de22522ca27caa46536fff22b
Parents: 42294c4
Author: Samarth <sa...@salesforce.com>
Authored: Wed Mar 23 12:53:12 2016 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Wed Mar 23 12:53:12 2016 -0700
----------------------------------------------------------------------
.../phoenix/compile/ProjectionCompiler.java | 2 +-
.../apache/phoenix/compile/UnionCompiler.java | 2 +-
.../UngroupedAggregateRegionObserver.java | 2 +-
.../apache/phoenix/execute/BaseQueryPlan.java | 16 +++--
.../expression/KeyValueColumnExpression.java | 2 +-
.../apache/phoenix/index/IndexMaintainer.java | 69 ++++++++++++--------
.../index/PhoenixTransactionalIndexer.java | 1 -
.../phoenix/iterate/BaseResultIterators.java | 1 -
.../mapreduce/FormatToKeyValueReducer.java | 1 +
.../apache/phoenix/schema/MetaDataClient.java | 15 ++---
.../java/org/apache/phoenix/schema/PColumn.java | 2 +
.../java/org/apache/phoenix/schema/PTable.java | 1 -
.../org/apache/phoenix/schema/PTableImpl.java | 6 +-
.../java/org/apache/phoenix/util/IndexUtil.java | 4 --
14 files changed, 68 insertions(+), 56 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
index b6a6771..e07b350 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ProjectionCompiler.java
@@ -702,7 +702,7 @@ public class ProjectionCompiler {
public Void visit(ProjectedColumnExpression expression) {
if (expression.getDataType().isArrayType()) {
indexProjectedColumns.add(expression);
- //TODO: samarth confirm this change that column names
+ //TODO: samarth confirm this change is to have encodedColumnNames as false.
KeyValueColumnExpression keyValueColumnExpression = new KeyValueColumnExpression(expression.getColumn(), false);
indexKVs.add(keyValueColumnExpression);
copyOfChildren.set(0, keyValueColumnExpression);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
index 0331015..846dccf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
@@ -73,7 +73,7 @@ public class UnionCompiler {
ColumnProjector colProj = plan.getProjector().getColumnProjector(i);
Expression sourceExpression = colProj.getExpression();
String name = selectNodes == null ? colProj.getName() : selectNodes.get(i).getAlias();
- //TODO: samarth confirm this is the right change
+ //TODO: samarth confirm that column qualifier can be null here.
PColumnImpl projectedColumn = new PColumnImpl(PNameFactory.newName(name), UNION_FAMILY_NAME,
sourceExpression.getDataType(), sourceExpression.getMaxLength(), sourceExpression.getScale(), sourceExpression.isNullable(),
i, sourceExpression.getSortOrder(), 500, null, false, sourceExpression.toString(), false, false, null);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 3bdcee3..950817f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -246,7 +246,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ);
}
emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF);
- emptyKVQualifier = scan.getAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER);
+ emptyKVQualifier = scan.getAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER);//TODO: samarth check this
}
TupleProjector tupleProjector = null;
HRegion dataRegion = null;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index ee6d4ab..3d10545 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -74,6 +74,7 @@ import org.apache.phoenix.util.LogUtil;
import org.apache.phoenix.util.SQLCloseable;
import org.apache.phoenix.util.SQLCloseables;
import org.apache.phoenix.util.ScanUtil;
+import org.apache.phoenix.util.SchemaUtil;
import org.cloudera.htrace.TraceScope;
import com.google.common.collect.ImmutableSet;
@@ -280,11 +281,6 @@ public abstract class BaseQueryPlan implements QueryPlan {
// TODO: can have an hint to skip joining back to data table, in that case if any column to
// project is not present in the index then we need to skip this plan.
if (!dataColumns.isEmpty()) {
- // Set data columns to be join back from data table.
- serializeDataTableColumnsToJoin(scan, dataColumns);
- KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
- // Set key value schema of the data columns.
- serializeSchemaIntoScan(scan, schema);
PTable parentTable = context.getCurrentTable().getTable();
String parentSchemaName = parentTable.getParentSchemaName().getString();
String parentTableName = parentTable.getParentTableName().getString();
@@ -295,6 +291,12 @@ public abstract class BaseQueryPlan implements QueryPlan {
FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)),
context.getConnection()).resolveTable(parentSchemaName, parentTableName);
PTable dataTable = dataTableRef.getTable();
+ // Set data columns to be join back from data table.
+ serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
+ KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
+ // Set key value schema of the data columns.
+ serializeSchemaIntoScan(scan, schema);
+
// Set index maintainer of the local index.
serializeIndexMaintainerIntoScan(scan, dataTable);
// Set view constants if exists.
@@ -397,14 +399,14 @@ public abstract class BaseQueryPlan implements QueryPlan {
}
}
- private void serializeDataTableColumnsToJoin(Scan scan, Set<PColumn> dataColumns) {
+ private void serializeDataTableColumnsToJoin(Scan scan, Set<PColumn> dataColumns, PTable dataTable) {
ByteArrayOutputStream stream = new ByteArrayOutputStream();
try {
DataOutputStream output = new DataOutputStream(stream);
WritableUtils.writeVInt(output, dataColumns.size());
for (PColumn column : dataColumns) {
Bytes.writeByteArray(output, column.getFamilyName().getBytes());
- Bytes.writeByteArray(output, column.getName().getBytes());
+ Bytes.writeByteArray(output, SchemaUtil.getColumnQualifier(column, dataTable));
}
scan.setAttribute(BaseScannerRegionObserver.DATA_TABLE_COLUMNS_TO_JOIN, stream.toByteArray());
} catch (IOException e) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/expression/KeyValueColumnExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/KeyValueColumnExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/KeyValueColumnExpression.java
index 163cc72..3787d6e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/KeyValueColumnExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/KeyValueColumnExpression.java
@@ -41,7 +41,7 @@ import org.apache.phoenix.util.SchemaUtil;
public class KeyValueColumnExpression extends ColumnExpression {
private byte[] cf;
private byte[] cq;
- private String displayName; // client-side only
+ private String displayName; // client-side only. TODO: samarth see what can you do for encoded column names.
public KeyValueColumnExpression() {
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 7e6d0f6..8e8d493 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -32,8 +32,11 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
+import co.cask.tephra.TxConstants;
+
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
@@ -95,8 +98,6 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
-import co.cask.tephra.TxConstants;
-
/**
*
* Class that builds index row key from data row key and current state of
@@ -292,7 +293,6 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
private final boolean isDataTableSalted;
private final RowKeySchema dataRowKeySchema;
- private List<ImmutableBytesPtr> indexQualifiers;
private int estimatedIndexRowKeyBytes;
private int estimatedExpressionSize;
private int[] dataPkPosition;
@@ -301,6 +301,9 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
private boolean rowKeyOrderOptimizable;
private boolean usesEncodedColumnNames;
private ImmutableBytesPtr emptyKeyValueQualifierPtr;
+ // Map of covered columns where the key part is the column reference for column in data table
+ // and value is the column reference for column in the index table.
+ private Map<ColumnReference, ColumnReference> coveredColumnsMap;
private IndexMaintainer(RowKeySchema dataRowKeySchema, boolean isDataTableSalted) {
this.dataRowKeySchema = dataRowKeySchema;
@@ -320,7 +323,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
* that is serialized in it. Because of this we are forced to have the indexes inherit the
* storage scheme of the parent data tables.
*/
- this.usesEncodedColumnNames = SchemaUtil.usesEncodedColumnNames(index);
+ this.usesEncodedColumnNames = SchemaUtil.usesEncodedColumnNames(dataTable);
byte[] indexTableName = index.getPhysicalName().getBytes();
// Use this for the nDataSaltBuckets as we need this for local indexes
// TODO: persist nDataSaltBuckets separately, but maintain b/w compat.
@@ -370,6 +373,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
this.indexedColumnTypes = Lists.<PDataType>newArrayListWithExpectedSize(nIndexPKColumns-nDataPKColumns);
this.indexedExpressions = Lists.newArrayListWithExpectedSize(nIndexPKColumns-nDataPKColumns);
this.coveredColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexColumns-nIndexPKColumns);
+ this.coveredColumnsMap = Maps.newHashMapWithExpectedSize(nIndexColumns - nIndexPKColumns);
this.nIndexSaltBuckets = nIndexSaltBuckets == null ? 0 : nIndexSaltBuckets;
this.dataEmptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(dataTable);
this.emptyKeyValueCFPtr = SchemaUtil.getEmptyColumnFamilyPtr(index);
@@ -435,9 +439,12 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
for (int i = 0; i < index.getColumnFamilies().size(); i++) {
PColumnFamily family = index.getColumnFamilies().get(i);
for (PColumn indexColumn : family.getColumns()) {
- PColumn column = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
- byte[] cq = SchemaUtil.getColumnQualifier(column, index);
- this.coveredColumns.add(new ColumnReference(column.getFamilyName().getBytes(), cq));
+ PColumn dataColumn = IndexUtil.getDataColumn(dataTable, indexColumn.getName().getString());
+ byte[] dataColumnCq = SchemaUtil.getColumnQualifier(dataColumn, dataTable);
+ byte[] indexColumnCq = SchemaUtil.getColumnQualifier(indexColumn, index);
+ this.coveredColumns.add(new ColumnReference(dataColumn.getFamilyName().getBytes(), dataColumnCq));
+ this.coveredColumnsMap.put(new ColumnReference(dataColumn.getFamilyName().getBytes(), dataColumnCq),
+ new ColumnReference(indexColumn.getFamilyName().getBytes(), indexColumnCq));
}
}
this.estimatedIndexRowKeyBytes = estimateIndexRowKeyByteSize(indexColByteSize);
@@ -855,9 +862,9 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
emptyKeyValueQualifierPtr));
put.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
}
- int i = 0;
for (ColumnReference ref : this.getCoverededColumns()) {
- ImmutableBytesPtr cq = this.indexQualifiers.get(i++);
+ //FIXME: samarth figure out a backward compatible way to handle this as coveredcolumnsmap won't be availble for older phoenix clients.
+ ImmutableBytesPtr cq = this.coveredColumnsMap.get(ref).getQualifierWritable();
ImmutableBytesWritable value = valueGetter.getLatestValue(ref);
byte[] indexRowKey = this.buildRowKey(valueGetter, dataRowKeyPtr, regionStartKey, regionEndKey);
ImmutableBytesPtr rowKey = new ImmutableBytesPtr(indexRowKey);
@@ -977,11 +984,15 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
delete = new Delete(indexRowKey);
delete.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
}
+ ColumnReference indexColumn = coveredColumnsMap.get(ref);
// If point delete for data table, then use point delete for index as well
- if (kv.getTypeByte() == KeyValue.Type.Delete.getCode()) {
- delete.deleteColumn(ref.getFamily(), IndexUtil.getIndexColumnName(ref.getFamily(), ref.getQualifier()), ts);
+ if (kv.getTypeByte() == KeyValue.Type.Delete.getCode()) {
+ //FIXME: samarth change this. Index column qualifiers are not derivable from data table cqs.
+ // Figure out a backward compatible way of going this since coveredColumnsMap won't be available
+ // for older clients.
+ delete.deleteColumn(indexColumn.getFamily(), indexColumn.getQualifier(), ts);
} else {
- delete.deleteColumns(ref.getFamily(), IndexUtil.getIndexColumnName(ref.getFamily(), ref.getQualifier()), ts);
+ delete.deleteColumns(indexColumn.getFamily(), indexColumn.getQualifier(), ts);
}
}
}
@@ -1036,10 +1047,16 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
isLocalIndex = encodedCoveredolumnsAndLocalIndex < 0;
int nCoveredColumns = Math.abs(encodedCoveredolumnsAndLocalIndex) - 1;
coveredColumns = Sets.newLinkedHashSetWithExpectedSize(nCoveredColumns);
+ coveredColumnsMap = Maps.newHashMapWithExpectedSize(nCoveredColumns);
for (int i = 0; i < nCoveredColumns; i++) {
- byte[] cf = Bytes.readByteArray(input);
- byte[] cq = Bytes.readByteArray(input);
- coveredColumns.add(new ColumnReference(cf,cq));
+ byte[] dataTableCf = Bytes.readByteArray(input);
+ byte[] dataTableCq = Bytes.readByteArray(input);
+ byte[] indexTableCf = Bytes.readByteArray(input);
+ byte[] indexTableCq = Bytes.readByteArray(input);
+ ColumnReference dataColumn = new ColumnReference(dataTableCf, dataTableCq);
+ coveredColumns.add(dataColumn);
+ ColumnReference indexColumn = new ColumnReference(indexTableCf, indexTableCq);
+ coveredColumnsMap.put(dataColumn, indexColumn);
}
// Hack to serialize whether the index row key is optimizable
int len = WritableUtils.readVInt(input);
@@ -1143,9 +1160,13 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
}
// Encode coveredColumns.size() and whether or not this is a local index
WritableUtils.writeVInt(output, (coveredColumns.size() + 1) * (isLocalIndex ? -1 : 1));
- for (ColumnReference ref : coveredColumns) {
- Bytes.writeByteArray(output, ref.getFamily());
- Bytes.writeByteArray(output, ref.getQualifier());
+ for (Entry<ColumnReference, ColumnReference> ref : coveredColumnsMap.entrySet()) {
+ ColumnReference dataColumn = ref.getKey();
+ ColumnReference indexColumn = ref.getValue();
+ Bytes.writeByteArray(output, dataColumn.getFamily());
+ Bytes.writeByteArray(output, dataColumn.getQualifier());
+ Bytes.writeByteArray(output, indexColumn.getFamily());
+ Bytes.writeByteArray(output, indexColumn.getQualifier());
}
// TODO: remove when rowKeyOrderOptimizable hack no longer needed
WritableUtils.writeVInt(output,indexTableName.length * (rowKeyOrderOptimizable ? 1 : -1));
@@ -1215,15 +1236,9 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
* Init calculated state reading/creating
*/
private void initCachedState() {
- dataEmptyKeyValueRef = new ColumnReference(emptyKeyValueCFPtr.copyBytesIfNecessary(), SchemaUtil
- .getEmptyKeyValueInfo(usesEncodedColumnNames).getFirst());
- emptyKeyValueQualifierPtr = new ImmutableBytesPtr(SchemaUtil.getEmptyKeyValueInfo(usesEncodedColumnNames).getFirst());
- indexQualifiers = Lists.newArrayListWithExpectedSize(this.coveredColumns.size());
- for (ColumnReference ref : coveredColumns) {
- indexQualifiers.add(new ImmutableBytesPtr(IndexUtil.getIndexColumnName(
- ref.getFamily(), ref.getQualifier())));
- }
-
+ byte[] emptyKvQualifier = SchemaUtil.getEmptyKeyValueInfo(usesEncodedColumnNames).getFirst();
+ dataEmptyKeyValueRef = new ColumnReference(emptyKeyValueCFPtr.copyBytesIfNecessary(), emptyKvQualifier);
+ emptyKeyValueQualifierPtr = new ImmutableBytesPtr(emptyKvQualifier);
this.allColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size() + coveredColumns.size());
// columns that are required to evaluate all expressions in indexedExpressions (not including columns in data row key)
this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index 6ee981a..0eeb662 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -221,7 +221,6 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
Scan scan = new Scan();
// Project all mutable columns
for (ColumnReference ref : mutableColumns) {
- //TODO: samarth confirm this is ok
scan.addColumn(ref.getFamily(), ref.getQualifier());
}
// Indexes inherit the storage scheme of the data table which means all the indexes have the same
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index eabaaa3..7593718 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -297,7 +297,6 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
if (whereCol.getSecond() == null) {
scan.addFamily(family);
} else {
- //TODO: samarth confirm this
scan.addColumn(family, whereCol.getSecond());
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
index 04fe959..cc1559a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
@@ -152,6 +152,7 @@ public class FormatToKeyValueReducer
}
map.add(kv);
}
+ //FIXME: samarth need to supply the right empty column qualifier here.
KeyValue empty = builder.buildPut(key.getRowkey(),
emptyFamilyName.get(tableIndex),
QueryConstants.EMPTY_COLUMN_BYTES_PTR, ByteUtil.EMPTY_BYTE_ARRAY_PTR);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 455e2b1..d7203ec 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1880,19 +1880,16 @@ public class MetaDataClient {
nextColumnQualifiers = SchemaUtil.getNextColumnQualifiers(parent);
}
}
- } else if (parent != null && tableType == PTableType.INDEX) {
+ } else {
// New indexes on existing tables can have encoded column names. But unfortunately, due to
// backward compatibility reasons, we aren't able to change IndexMaintainer and the state
// that is serialized in it. Because of this we are forced to have the indexes inherit the
- // storage scheme of the parent data tables.
- storageScheme = parent.getStorageScheme();
+ // storage scheme of the parent data tables. Otherwise, we always attempt to create tables
+ // with encoded column names.
+ storageScheme = parent != null ? parent.getStorageScheme() : StorageScheme.ENCODED_COLUMN_NAMES;
if (storageScheme == StorageScheme.ENCODED_COLUMN_NAMES) {
- nextColumnQualifiers = SchemaUtil.getNextColumnQualifiers(parent);
+ nextColumnQualifiers = Maps.newHashMapWithExpectedSize(colDefs.size() - pkColumns.size());
}
- } else {
- // we always attempt to create tables with encoded column names.
- storageScheme = StorageScheme.ENCODED_COLUMN_NAMES;
- nextColumnQualifiers = Maps.newHashMapWithExpectedSize(colDefs.size() - pkColumns.size());
}
for (ColumnDef colDef : colDefs) {
@@ -3082,7 +3079,7 @@ public class MetaDataClient {
Set<ColumnReference> coveredColumns = indexMaintainer.getCoverededColumns();
List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
for(PColumn columnToDrop : tableColumnsToDrop) {
- ColumnReference columnToDropRef = new ColumnReference(columnToDrop.getFamilyName().getBytes(), columnToDrop.getName().getBytes());
+ ColumnReference columnToDropRef = new ColumnReference(columnToDrop.getFamilyName().getBytes(), SchemaUtil.getColumnQualifier(columnToDrop, index));
if (indexColumns.contains(columnToDropRef)) {
indexesToDrop.add(new TableRef(index));
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
index b63c97b..3ffb845 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PColumn.java
@@ -63,6 +63,8 @@ public interface PColumn extends PDatum {
/**
* @return name of the HBase column qualifier
+ * TODO: samarth I think we should should change this to return byte[] array.
+ * Then we won't have to worry about calling SchemaUtil... everywhere. I think.
*/
Integer getColumnQualifier();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index 662f114..f54f87e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -237,7 +237,6 @@ public interface PTable extends PMetaDataEntity {
* can be found
* @throws AmbiguousColumnException if multiple columns are found with the given name
*/
- //TODO: samarth inspect all the callers of this method.
PColumn getPColumnForColumnName(String name) throws ColumnNotFoundException, AmbiguousColumnException;
/**
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index b8a3dc0..0fdba22 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.coprocessor.generated.PGuidePostsProtos;
import org.apache.phoenix.coprocessor.generated.PGuidePostsProtos.PGuidePosts;
import org.apache.phoenix.coprocessor.generated.PTableProtos;
@@ -787,10 +788,11 @@ public class PTableImpl implements PTable {
// Because we cannot enforce a not null constraint on a KV column (since we don't know if the row exists when
// we upsert it), se instead add a KV that is always emtpy. This allows us to imitate SQL semantics given the
// way HBase works.
+ Pair<byte[], byte[]> emptyKvInfo = SchemaUtil.getEmptyKeyValueInfo(PTableImpl.this);
addQuietly(setValues, kvBuilder, kvBuilder.buildPut(keyPtr,
SchemaUtil.getEmptyColumnFamilyPtr(PTableImpl.this),
- QueryConstants.EMPTY_COLUMN_BYTES_PTR, ts,
- QueryConstants.EMPTY_COLUMN_VALUE_BYTES_PTR));
+ new ImmutableBytesPtr(emptyKvInfo.getFirst()), ts,
+ new ImmutableBytesPtr(emptyKvInfo.getSecond())));
mutations.add(setValues);
if (!unsetValues.isEmpty()) {
mutations.add(unsetValues);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/ed0907b9/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 587473f..a0f8995 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -148,10 +148,6 @@ public class IndexUtil {
return (dataColumnFamilyName == null ? "" : dataColumnFamilyName) + INDEX_COLUMN_NAME_SEP + dataColumnName;
}
- public static byte[] getIndexColumnName(byte[] dataColumnFamilyName, byte[] dataColumnName) {
- return ByteUtil.concat(dataColumnFamilyName == null ? ByteUtil.EMPTY_BYTE_ARRAY : dataColumnFamilyName, INDEX_COLUMN_NAME_SEP_BYTES, dataColumnName);
- }
-
public static String getIndexColumnName(PColumn dataColumn) {
String dataColumnFamilyName = SchemaUtil.isPKColumn(dataColumn) ? null : dataColumn.getFamilyName().getString();
return getIndexColumnName(dataColumnFamilyName, dataColumn.getName().getString());