You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by sa...@apache.org on 2016/05/28 00:45:07 UTC
[1/4] phoenix git commit: Optimize order by and grouped aggregations
by taking advantage of column encoding
Repository: phoenix
Updated Branches:
refs/heads/encodecolumns 6461e9594 -> 9525c72fb
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
index 90b75a6..376567a 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/LiteralResultIteratorPlanTest.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.execute;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY;
import static org.apache.phoenix.util.PhoenixRuntime.CONNECTIONLESS;
import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
@@ -49,6 +50,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.parse.ParseNodeFactory;
import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.ColumnRef;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PColumnImpl;
@@ -170,7 +172,7 @@ public class LiteralResultIteratorPlanTest {
String name = ParseNodeFactory.createTempAlias();
Expression expr = LiteralExpression.newConstant(row[i]);
columns.add(new PColumnImpl(PNameFactory.newName(name),
- PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY), expr.getDataType(), expr.getMaxLength(),
+ PNameFactory.newName(VALUE_COLUMN_FAMILY), expr.getDataType(), expr.getMaxLength(),
expr.getScale(), expr.isNullable(), i, expr.getSortOrder(), null, null, false, name, false, false, null));
}
try {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
index fe29e52..ff62f63 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/UnnestArrayPlanTest.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.execute;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY;
import static org.apache.phoenix.util.PhoenixRuntime.CONNECTIONLESS;
import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
@@ -117,8 +118,8 @@ public class UnnestArrayPlanTest {
LiteralExpression dummy = LiteralExpression.newConstant(null, arrayType);
RowKeyValueAccessor accessor = new RowKeyValueAccessor(Arrays.asList(dummy), 0);
UnnestArrayPlan plan = new UnnestArrayPlan(subPlan, new RowKeyColumnExpression(dummy, accessor), withOrdinality);
- PColumn elemColumn = new PColumnImpl(PNameFactory.newName("ELEM"), PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY), baseType, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, null);
- PColumn indexColumn = withOrdinality ? new PColumnImpl(PNameFactory.newName("IDX"), PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, null) : null;
+ PColumn elemColumn = new PColumnImpl(PNameFactory.newName("ELEM"), PNameFactory.newName(VALUE_COLUMN_FAMILY), baseType, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, null);
+ PColumn indexColumn = withOrdinality ? new PColumnImpl(PNameFactory.newName("IDX"), PNameFactory.newName(VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, null) : null;
List<PColumn> columns = withOrdinality ? Arrays.asList(elemColumn, indexColumn) : Arrays.asList(elemColumn);
ProjectedColumnExpression elemExpr = new ProjectedColumnExpression(elemColumn, columns, 0, elemColumn.getName().getString());
ProjectedColumnExpression indexExpr = withOrdinality ? new ProjectedColumnExpression(indexColumn, columns, 1, indexColumn.getName().getString()) : null;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-protocol/src/main/PTable.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/PTable.proto b/phoenix-protocol/src/main/PTable.proto
index 6f30a91..46da943 100644
--- a/phoenix-protocol/src/main/PTable.proto
+++ b/phoenix-protocol/src/main/PTable.proto
@@ -93,10 +93,5 @@ message PTable {
optional int64 indexDisableTimestamp = 29;
optional bool isNamespaceMapped = 30;
optional bytes storageScheme = 31;
- repeated EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;
-}
-
-message EncodedColumnQualifierCounter {
- required string familyName = 1;
- required int32 counter = 2;
-}
+ optional int32 encodedColumnQualifierCounter = 32;
+}
\ No newline at end of file
[4/4] phoenix git commit: Optimize order by and grouped aggregations
by taking advantage of column encoding
Posted by sa...@apache.org.
Optimize order by and grouped aggregations by taking advantage of column encoding
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9525c72f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9525c72f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9525c72f
Branch: refs/heads/encodecolumns
Commit: 9525c72fb45522c84bbb2fbde62042e1af735284
Parents: 6461e95
Author: Samarth <sa...@salesforce.com>
Authored: Fri May 27 17:44:53 2016 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Fri May 27 17:44:53 2016 -0700
----------------------------------------------------------------------
.../apache/phoenix/end2end/AlterTableIT.java | 108 +-
.../phoenix/end2end/AlterTableWithViewsIT.java | 112 +-
.../apache/phoenix/end2end/CreateTableIT.java | 47 +-
.../org/apache/phoenix/end2end/GroupByIT.java | 3 -
.../phoenix/end2end/RowValueConstructorIT.java | 2 +-
.../apache/phoenix/end2end/UpsertValuesIT.java | 2 +-
.../apache/phoenix/compile/FromCompiler.java | 4 +-
.../apache/phoenix/compile/JoinCompiler.java | 2 +-
.../phoenix/compile/ListJarsQueryPlan.java | 1 +
.../compile/TupleProjectionCompiler.java | 25 +-
.../apache/phoenix/compile/UnionCompiler.java | 4 +-
.../coprocessor/BaseScannerRegionObserver.java | 22 +-
.../coprocessor/DelegateRegionScanner.java | 5 +
.../GroupedAggregateRegionObserver.java | 26 +-
.../coprocessor/HashJoinRegionScanner.java | 2 +-
.../coprocessor/MetaDataEndpointImpl.java | 17 +-
.../phoenix/coprocessor/ScanRegionObserver.java | 17 +-
.../UngroupedAggregateRegionObserver.java | 12 +-
.../coprocessor/generated/PTableProtos.java | 1080 ++----------------
.../phoenix/execute/SortMergeJoinPlan.java | 1 +
.../apache/phoenix/execute/TupleProjector.java | 6 +-
.../index/PhoenixTransactionalIndexer.java | 22 +-
.../phoenix/iterate/BaseResultIterators.java | 21 +-
.../iterate/LookAheadResultIterator.java | 2 +-
.../phoenix/iterate/MappedByteBufferQueue.java | 1 +
.../phoenix/iterate/OrderedResultIterator.java | 3 +-
.../iterate/RegionScannerResultIterator.java | 14 +-
.../phoenix/jdbc/PhoenixDatabaseMetaData.java | 3 +-
.../apache/phoenix/jdbc/PhoenixResultSet.java | 2 +-
.../apache/phoenix/join/HashCacheFactory.java | 1 +
.../apache/phoenix/query/QueryConstants.java | 41 +-
.../phoenix/query/QueryServicesOptions.java | 2 +-
.../apache/phoenix/schema/DelegateTable.java | 5 +-
.../apache/phoenix/schema/KeyValueSchema.java | 2 +
.../apache/phoenix/schema/MetaDataClient.java | 189 ++-
.../java/org/apache/phoenix/schema/PTable.java | 50 +-
.../org/apache/phoenix/schema/PTableImpl.java | 70 +-
.../schema/tuple/BoundedSkipNullCellsList.java | 354 ++++--
.../tuple/PositionBasedMultiKeyValueTuple.java | 19 +-
.../phoenix/schema/tuple/ResultTuple.java | 33 +-
.../java/org/apache/phoenix/util/IndexUtil.java | 8 +-
.../org/apache/phoenix/util/ResultUtil.java | 60 -
.../java/org/apache/phoenix/util/ScanUtil.java | 9 +
.../phoenix/execute/CorrelatePlanTest.java | 4 +-
.../execute/LiteralResultIteratorPlanTest.java | 4 +-
.../phoenix/execute/UnnestArrayPlanTest.java | 5 +-
phoenix-protocol/src/main/PTable.proto | 9 +-
47 files changed, 888 insertions(+), 1543 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 900a040..d588c63 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -26,6 +26,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM;
import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY;
+import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.apache.phoenix.util.TestUtil.closeConnection;
import static org.apache.phoenix.util.TestUtil.closeStatement;
@@ -63,6 +64,7 @@ import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.EncodedCQCounter;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.schema.TableNotFoundException;
import org.apache.phoenix.util.IndexUtil;
@@ -2244,13 +2246,11 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
long initBaseTableSeqNumber = baseTable.getSequenceNumber();
// assert that the client side cache is updated.
- Map<String, Integer> cqCounters = baseTable.getEncodedCQCounters();
- assertEquals(1, cqCounters.size());
- int counter = cqCounters.get(DEFAULT_COLUMN_FAMILY);
- assertEquals(1, counter);
-
+ EncodedCQCounter cqCounter = baseTable.getEncodedCQCounter();
+ assertEquals((Integer)ENCODED_CQ_COUNTER_INITIAL_VALUE, cqCounter.getValue());
+
// assert that the server side metadata is updated correctly.
- assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, 1);
+ assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE, true);
assertSequenceNumber(schemaName, baseTableName, initBaseTableSeqNumber);
// now create a view and validate client and server side metadata
@@ -2260,19 +2260,13 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
PTable view = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullViewName));
// verify that the client side cache is updated. Base table's cq counters should be updated.
- cqCounters = baseTable.getEncodedCQCounters();
- counter = cqCounters.get(DEFAULT_COLUMN_FAMILY);
- assertEquals(2, counter);
- counter = cqCounters.get("A");
- assertEquals(2, counter);
- cqCounters = view.getEncodedCQCounters();
- assertTrue("A view should always have the column qualifier counters map empty", cqCounters.isEmpty());
-
+ assertEquals((Integer)(ENCODED_CQ_COUNTER_INITIAL_VALUE + 2), baseTable.getEncodedCQCounter().getValue());
+ assertNull("A view should always have the null cq counter", view.getEncodedCQCounter().getValue());
+
// assert that the server side metadata for the base table and the view is also updated correctly.
- assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, 2);
- assertEncodedCQCounter("A", schemaName, baseTableName, 2);
- assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "VIEW_COL1", schemaName, viewName, 1);
- assertEncodedCQValue("A", "VIEW_COL2", schemaName, viewName, 1);
+ assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 2, true);
+ assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "VIEW_COL1", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE);
+ assertEncodedCQValue("A", "VIEW_COL2", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1);
assertSequenceNumber(schemaName, baseTableName, initBaseTableSeqNumber + 1);
assertSequenceNumber(schemaName, viewName, PTable.INITIAL_SEQ_NUM);
}
@@ -2305,18 +2299,14 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
// assert that the client side cache is updated.
baseTable = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
- Map<String, Integer> cqCounters = baseTable.getEncodedCQCounters();
- int counter = cqCounters.get(DEFAULT_COLUMN_FAMILY);
- assertEquals(3, counter);
- counter = cqCounters.get("B");
- assertEquals(2, counter);
-
+ EncodedCQCounter encodedCqCounter = baseTable.getEncodedCQCounter();
+ assertEquals((Integer)(ENCODED_CQ_COUNTER_INITIAL_VALUE + 3), encodedCqCounter.getValue());
+
// assert that the server side metadata is updated correctly.
- assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, 3);
- assertEncodedCQCounter("B", schemaName, baseTableName, 2);
- assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "COL4", schemaName, baseTableName, 1);
- assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "COL5", schemaName, baseTableName, 2);
- assertEncodedCQValue("B", "COL6", schemaName, baseTableName, 1);
+ assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 3, true);
+ assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "COL4", schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE);
+ assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "COL5", schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1);
+ assertEncodedCQValue("B", "COL6", schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 2);
assertSequenceNumber(schemaName, baseTableName, initBaseTableSeqNumber + 1);
// Create a view
@@ -2331,26 +2321,19 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
// assert that the client cache for the base table is updated
baseTable = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
- cqCounters = baseTable.getEncodedCQCounters();
- counter = cqCounters.get(DEFAULT_COLUMN_FAMILY);
- assertEquals(5, counter);
- counter = cqCounters.get("B");
- assertEquals(3, counter);
- counter = cqCounters.get("A");
- assertEquals(3, counter);
-
+ encodedCqCounter = baseTable.getEncodedCQCounter();
+ assertEquals((Integer)(ENCODED_CQ_COUNTER_INITIAL_VALUE + 8), encodedCqCounter.getValue());
+
// assert client cache for view
PTable view = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullViewName));
- cqCounters = view.getEncodedCQCounters();
- assertTrue("A view should always have the column qualifier counters map empty", cqCounters.isEmpty());
-
+ encodedCqCounter = view.getEncodedCQCounter();
+ assertNull("A view should always have the column qualifier counter as null", view.getEncodedCQCounter().getValue());
+
// assert that the server side metadata for the base table and the view is also updated correctly.
- assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, 5);
- assertEncodedCQCounter("A", schemaName, baseTableName, 3);
- assertEncodedCQCounter("B", schemaName, baseTableName, 3);
- assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "VIEW_COL3", schemaName, viewName, 4);
- assertEncodedCQValue("A", "VIEW_COL4", schemaName, viewName, 2);
- assertEncodedCQValue("B", "VIEW_COL5", schemaName, viewName, 2);
+ assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 8, true);
+ assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "VIEW_COL3", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 5);
+ assertEncodedCQValue("A", "VIEW_COL4", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 6);
+ assertEncodedCQValue("B", "VIEW_COL5", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 7);
// Adding a column to the should increment the base table's sequence number too since we update the cq counters for column families.
assertSequenceNumber(schemaName, baseTableName, initBaseTableSeqNumber + 3);
assertSequenceNumber(schemaName, viewName, PTable.INITIAL_SEQ_NUM + 1);
@@ -2361,25 +2344,18 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
baseTable = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
// assert that the client cache for the base table is updated
- cqCounters = baseTable.getEncodedCQCounters();
- counter = cqCounters.get(DEFAULT_COLUMN_FAMILY);
- assertEquals(6, counter);
- counter = cqCounters.get("B");
- assertEquals(3, counter);
- counter = cqCounters.get("A");
- assertEquals(4, counter);
+ encodedCqCounter = baseTable.getEncodedCQCounter();
+ assertEquals((Integer)(ENCODED_CQ_COUNTER_INITIAL_VALUE + 10), encodedCqCounter.getValue());
// assert client cache for view
view = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullViewName));
- cqCounters = view.getEncodedCQCounters();
- assertTrue("A view should always have the column qualifier counters map empty", cqCounters.isEmpty());
+ encodedCqCounter = view.getEncodedCQCounter();
+ assertNull("A view should always have the column qualifier counter as null", view.getEncodedCQCounter().getValue());
// assert that the server side metadata for the base table and the view is also updated correctly.
- assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, 6);
- assertEncodedCQCounter("A", schemaName, baseTableName, 4);
- assertEncodedCQCounter("B", schemaName, baseTableName, 3);
- assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "COL10", schemaName, viewName, 5);
- assertEncodedCQValue("A", "COL11", schemaName, viewName, 3);
+ assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, (ENCODED_CQ_COUNTER_INITIAL_VALUE + 10), true);
+ assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "COL10", schemaName, viewName, (ENCODED_CQ_COUNTER_INITIAL_VALUE + 8));
+ assertEncodedCQValue("A", "COL11", schemaName, viewName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 9);
assertSequenceNumber(schemaName, baseTableName, initBaseTableSeqNumber + 4);
assertSequenceNumber(schemaName, viewName, PTable.INITIAL_SEQ_NUM + 2);
}
@@ -2401,7 +2377,7 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
}
}
- private void assertEncodedCQCounter(String columnFamily, String schemaName, String tableName, int expectedValue) throws Exception {
+ private void assertEncodedCQCounter(String columnFamily, String schemaName, String tableName, int expectedValue, boolean rowExists) throws Exception {
String query = "SELECT " + COLUMN_QUALIFIER_COUNTER + " FROM SYSTEM.CATALOG WHERE " + TABLE_SCHEM + " = ? AND " + TABLE_NAME
+ " = ? " + " AND " + COLUMN_FAMILY + " = ? AND " + COLUMN_QUALIFIER_COUNTER + " IS NOT NULL";
try (Connection conn = DriverManager.getConnection(getUrl())) {
@@ -2410,9 +2386,13 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
stmt.setString(2, tableName);
stmt.setString(3, columnFamily);
ResultSet rs = stmt.executeQuery();
- assertTrue(rs.next());
- assertEquals(expectedValue, rs.getInt(1));
- assertFalse(rs.next());
+ if (rowExists) {
+ assertTrue(rs.next());
+ assertEquals(expectedValue, rs.getInt(1));
+ assertFalse(rs.next());
+ } else {
+ assertFalse(rs.next());
+ }
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
index 7458ed9..2f3441f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
@@ -544,61 +544,7 @@ public class AlterTableWithViewsIT extends BaseHBaseManagedTimeIT {
- @Test
- public void testAlteringViewThatHasChildViews() throws Exception {
- String baseTable = "testAlteringViewThatHasChildViews";
- String childView = "childView";
- String grandChildView = "grandChildView";
- try (Connection conn = DriverManager.getConnection(getUrl());
- Connection viewConn = isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn ) {
- String ddlFormat = "CREATE TABLE IF NOT EXISTS " + baseTable + " ("
- + " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
- + " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)"
- + " ) %s";
- conn.createStatement().execute(generateDDL(ddlFormat));
-
- String childViewDDL = "CREATE VIEW " + childView + " AS SELECT * FROM " + baseTable;
- viewConn.createStatement().execute(childViewDDL);
-
- String addColumnToChildViewDDL =
- "ALTER VIEW " + childView + " ADD CHILD_VIEW_COL VARCHAR";
- viewConn.createStatement().execute(addColumnToChildViewDDL);
-
- String grandChildViewDDL =
- "CREATE VIEW " + grandChildView + " AS SELECT * FROM " + childView;
- viewConn.createStatement().execute(grandChildViewDDL);
-
- // dropping base table column from child view should succeed
- String dropColumnFromChildView = "ALTER VIEW " + childView + " DROP COLUMN V2";
- viewConn.createStatement().execute(dropColumnFromChildView);
-
- // dropping view specific column from child view should succeed
- dropColumnFromChildView = "ALTER VIEW " + childView + " DROP COLUMN CHILD_VIEW_COL";
- viewConn.createStatement().execute(dropColumnFromChildView);
-
- // Adding column to view that has child views is allowed
- String addColumnToChildView = "ALTER VIEW " + childView + " ADD V5 VARCHAR";
- viewConn.createStatement().execute(addColumnToChildView);
- // V5 column should be visible now for childView
- viewConn.createStatement().execute("SELECT V5 FROM " + childView);
-
- // However, column V5 shouldn't have propagated to grandChildView. Not till PHOENIX-2054 is fixed.
- try {
- viewConn.createStatement().execute("SELECT V5 FROM " + grandChildView);
- } catch (SQLException e) {
- assertEquals(SQLExceptionCode.COLUMN_NOT_FOUND.getErrorCode(), e.getErrorCode());
- }
-
- // dropping column from the grand child view, however, should work.
- String dropColumnFromGrandChildView =
- "ALTER VIEW " + grandChildView + " DROP COLUMN CHILD_VIEW_COL";
- viewConn.createStatement().execute(dropColumnFromGrandChildView);
-
- // similarly, dropping column inherited from the base table should work.
- dropColumnFromGrandChildView = "ALTER VIEW " + grandChildView + " DROP COLUMN V2";
- viewConn.createStatement().execute(dropColumnFromGrandChildView);
- }
- }
+
@Test
public void testDivergedViewsStayDiverged() throws Exception {
@@ -676,4 +622,60 @@ public class AlterTableWithViewsIT extends BaseHBaseManagedTimeIT {
}
}
+ @Test
+ public void testAlteringViewThatHasChildViews() throws Exception {
+ String baseTable = "testAlteringViewThatHasChildViews";
+ String childView = "childView";
+ String grandChildView = "grandChildView";
+ try (Connection conn = DriverManager.getConnection(getUrl());
+ Connection viewConn = isMultiTenant ? DriverManager.getConnection(TENANT_SPECIFIC_URL1) : conn ) {
+ String ddlFormat = "CREATE TABLE IF NOT EXISTS " + baseTable + " ("
+ + " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
+ + " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)"
+ + " ) %s";
+ conn.createStatement().execute(generateDDL(ddlFormat));
+
+ String childViewDDL = "CREATE VIEW " + childView + " AS SELECT * FROM " + baseTable;
+ viewConn.createStatement().execute(childViewDDL);
+
+ String addColumnToChildViewDDL =
+ "ALTER VIEW " + childView + " ADD CHILD_VIEW_COL VARCHAR";
+ viewConn.createStatement().execute(addColumnToChildViewDDL);
+
+ String grandChildViewDDL =
+ "CREATE VIEW " + grandChildView + " AS SELECT * FROM " + childView;
+ viewConn.createStatement().execute(grandChildViewDDL);
+
+ // dropping base table column from child view should succeed
+ String dropColumnFromChildView = "ALTER VIEW " + childView + " DROP COLUMN V2";
+ viewConn.createStatement().execute(dropColumnFromChildView);
+
+ // dropping view specific column from child view should succeed
+ dropColumnFromChildView = "ALTER VIEW " + childView + " DROP COLUMN CHILD_VIEW_COL";
+ viewConn.createStatement().execute(dropColumnFromChildView);
+
+ // Adding column to view that has child views is allowed
+ String addColumnToChildView = "ALTER VIEW " + childView + " ADD V5 VARCHAR";
+ viewConn.createStatement().execute(addColumnToChildView);
+ // V5 column should be visible now for childView
+ viewConn.createStatement().execute("SELECT V5 FROM " + childView);
+
+ // However, column V5 shouldn't have propagated to grandChildView. Not till PHOENIX-2054 is fixed.
+ try {
+ viewConn.createStatement().execute("SELECT V5 FROM " + grandChildView);
+ } catch (SQLException e) {
+ assertEquals(SQLExceptionCode.COLUMN_NOT_FOUND.getErrorCode(), e.getErrorCode());
+ }
+
+ // dropping column from the grand child view, however, should work.
+ String dropColumnFromGrandChildView =
+ "ALTER VIEW " + grandChildView + " DROP COLUMN CHILD_VIEW_COL";
+ viewConn.createStatement().execute(dropColumnFromGrandChildView);
+
+ // similarly, dropping column inherited from the base table should work.
+ dropColumnFromGrandChildView = "ALTER VIEW " + grandChildView + " DROP COLUMN V2";
+ viewConn.createStatement().execute(dropColumnFromGrandChildView);
+ }
+ }
+
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index dd64ea2..feccb8f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -20,16 +20,17 @@ package org.apache.phoenix.end2end;
import static org.apache.hadoop.hbase.HColumnDescriptor.DEFAULT_REPLICATION_SCOPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODED_COLUMN_QUALIFIER;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODED_COLUMN_QUALIFIER;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM;
import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY;
+import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -40,7 +41,6 @@ import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
-import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -50,11 +50,13 @@ import org.apache.phoenix.exception.SQLExceptionCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.NewerTableAlreadyExistsException;
-import org.apache.phoenix.schema.SchemaNotFoundException;
import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.EncodedCQCounter;
import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.SchemaNotFoundException;
import org.apache.phoenix.schema.TableAlreadyExistsException;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
@@ -535,13 +537,11 @@ public class CreateTableIT extends BaseClientManagedTimeIT {
long initialSequenceNumber = table.getSequenceNumber();
// assert that the client side cache is updated.
- Map<String, Integer> cqCounters = table.getEncodedCQCounters();
- assertEquals(1, cqCounters.size());
- int counter = cqCounters.get(DEFAULT_COLUMN_FAMILY);
- assertEquals(1, counter);
-
+ EncodedCQCounter cqCounter = table.getEncodedCQCounter();
+ assertEquals((Integer)ENCODED_CQ_COUNTER_INITIAL_VALUE, cqCounter.getValue());
+
// assert that the server side metadata is updated correctly.
- assertColumnFamilyCounter(DEFAULT_COLUMN_FAMILY, schemaName, tableName, 1);
+ assertColumnFamilyCounter(DEFAULT_COLUMN_FAMILY, schemaName, tableName, ENCODED_CQ_COUNTER_INITIAL_VALUE, true);
assertSequenceNumber(schemaName, tableName, initialSequenceNumber);
// now add a column and validate client and server side metadata
@@ -549,18 +549,15 @@ public class CreateTableIT extends BaseClientManagedTimeIT {
table = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
// verify that the client side cache is updated.
- cqCounters = table.getEncodedCQCounters();
- counter = cqCounters.get(DEFAULT_COLUMN_FAMILY);
- assertEquals(3, counter);
- counter = cqCounters.get("A");
- assertEquals(2, counter);
+ cqCounter = table.getEncodedCQCounter();
+ assertEquals((Integer)14, cqCounter.getValue());
+
// assert that the server side metadata is also updated correctly.
- assertColumnFamilyCounter(DEFAULT_COLUMN_FAMILY, schemaName, tableName, 3);
- assertColumnFamilyCounter("A", schemaName, tableName, 2);
- assertColumnQualifier(DEFAULT_COLUMN_FAMILY, "COL4", schemaName, tableName, 1);
- assertColumnQualifier(DEFAULT_COLUMN_FAMILY, "COL6", schemaName, tableName, 2);
- assertColumnQualifier("A", "COL5", schemaName, tableName, 1);
+ assertColumnFamilyCounter(DEFAULT_COLUMN_FAMILY, schemaName, tableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 3, true);
+ assertColumnQualifier(DEFAULT_COLUMN_FAMILY, "COL4", schemaName, tableName, ENCODED_CQ_COUNTER_INITIAL_VALUE);
+ assertColumnQualifier(DEFAULT_COLUMN_FAMILY, "COL6", schemaName, tableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1);
+ assertColumnQualifier("A", "COL5", schemaName, tableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 2);
assertSequenceNumber(schemaName, tableName, initialSequenceNumber + 1);
}
}
@@ -581,7 +578,7 @@ public class CreateTableIT extends BaseClientManagedTimeIT {
}
}
- private void assertColumnFamilyCounter(String columnFamily, String schemaName, String tableName, int expectedValue) throws Exception {
+ private void assertColumnFamilyCounter(String columnFamily, String schemaName, String tableName, int expectedValue, boolean rowExists) throws Exception {
String query = "SELECT " + COLUMN_QUALIFIER_COUNTER + " FROM SYSTEM.CATALOG WHERE " + TABLE_SCHEM + " = ? AND " + TABLE_NAME
+ " = ? " + " AND " + COLUMN_FAMILY + " = ? AND " + COLUMN_QUALIFIER_COUNTER + " IS NOT NULL";
try (Connection conn = DriverManager.getConnection(getUrl())) {
@@ -590,9 +587,11 @@ public class CreateTableIT extends BaseClientManagedTimeIT {
stmt.setString(2, tableName);
stmt.setString(3, columnFamily);
ResultSet rs = stmt.executeQuery();
- assertTrue(rs.next());
- assertEquals(expectedValue, rs.getInt(1));
- assertFalse(rs.next());
+ assertEquals(rowExists, rs.next());
+ if (rowExists) {
+ assertEquals(expectedValue, rs.getInt(1));
+ assertFalse(rs.next());
+ }
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByIT.java
index 51ab070..8eace13 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByIT.java
@@ -33,7 +33,6 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.sql.Connection;
-import java.sql.Date;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
@@ -47,8 +46,6 @@ import java.util.Properties;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.PhoenixRuntime;
import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
index be8ec59..6d3749f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
@@ -105,7 +105,7 @@ public class RowValueConstructorIT extends BaseClientManagedTimeIT {
count++;
}
// we have 6 values for a_integer present in the atable where a >= 4. x_integer is null for a_integer = 4. So the query should have returned 5 rows.
- assertTrue(count == 5);
+ assertEquals(5, count);
} finally {
conn.close();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
index 3fec718..ead3cc0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
@@ -958,8 +958,8 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT {
ResultScanner scanner = table.getScanner(new Scan());
Result next = scanner.next();
assertTrue(next.containsColumn(Bytes.toBytes("CF1"), PInteger.INSTANCE.toBytes(1)));
- assertTrue(next.containsColumn(Bytes.toBytes("CF2"), PInteger.INSTANCE.toBytes(1)));
assertTrue(next.containsColumn(Bytes.toBytes("CF2"), PInteger.INSTANCE.toBytes(2)));
+ assertTrue(next.containsColumn(Bytes.toBytes("CF2"), PInteger.INSTANCE.toBytes(3)));
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index eec8c8a..ccd9a03 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -71,6 +71,7 @@ import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.PTable.StorageScheme;
import org.apache.phoenix.schema.PTableImpl;
import org.apache.phoenix.schema.PTableKey;
import org.apache.phoenix.schema.PTableType;
@@ -340,6 +341,7 @@ public class FromCompiler {
if (connection.getSchema() != null) {
schema = schema != null ? schema : connection.getSchema();
}
+ //TODO: samarth should we change the ptableimpl constructor here to set non-encoded column name scheme and null counter
PTable theTable = new PTableImpl(connection.getTenantId(), schema, table.getName().getTableName(),
scn == null ? HConstants.LATEST_TIMESTAMP : scn, families);
theTable = this.addDynamicColumns(table.getDynamicColumns(), theTable);
@@ -781,7 +783,7 @@ public class FromCompiler {
MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM, null, null, columns, null, null,
Collections.<PTable> emptyList(), false, Collections.<PName> emptyList(), null, null, false, false,
false, null, null, null, false, false, 0, 0L, SchemaUtil
- .isNamespaceMappingEnabled(PTableType.SUBQUERY, connection.getQueryServices().getProps()), null, null);
+ .isNamespaceMappingEnabled(PTableType.SUBQUERY, connection.getQueryServices().getProps()), StorageScheme.NON_ENCODED_COLUMN_NAMES, PTable.EncodedCQCounter.NULL_COUNTER);
String alias = subselectNode.getAlias();
TableRef tableRef = new TableRef(alias, t, MetaDataProtocol.MIN_TABLE_TIMESTAMP, false);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 69b9bfb..a37d071 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -1308,7 +1308,7 @@ public class JoinCompiler {
left.getBucketNum(), merged,left.getParentSchemaName(), left.getParentTableName(), left.getIndexes(),
left.isImmutableRows(), Collections.<PName>emptyList(), null, null, PTable.DEFAULT_DISABLE_WAL,
left.isMultiTenant(), left.getStoreNulls(), left.getViewType(), left.getViewIndexId(), left.getIndexType(),
- left.rowKeyOrderOptimizable(), left.isTransactional(), left.getUpdateCacheFrequency(), left.getIndexDisableTimestamp(), left.isNamespaceMapped(), StorageScheme.NON_ENCODED_COLUMN_NAMES, null);
+ left.rowKeyOrderOptimizable(), left.isTransactional(), left.getUpdateCacheFrequency(), left.getIndexDisableTimestamp(), left.isNamespaceMapped(), StorageScheme.NON_ENCODED_COLUMN_NAMES, PTable.EncodedCQCounter.NULL_COUNTER);
//FIXME: samarth
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
index cceef9a..520f9e3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ListJarsQueryPlan.java
@@ -161,6 +161,7 @@ public class ListJarsQueryPlan implements QueryPlan {
Type.Put.getCode(), HConstants.EMPTY_BYTE_ARRAY);
List<Cell> cells = new ArrayList<Cell>(1);
cells.add(cell);
+ //TODO: samarth confirm if passing false is the right thing to do here.
return new ResultTuple(Result.create(cells));
} catch (IOException e) {
throw new SQLException(e);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
index f9e7f44..75eb66f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
@@ -17,6 +17,8 @@
*/
package org.apache.phoenix.compile;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY;
+
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
@@ -24,7 +26,6 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
-import org.apache.phoenix.execute.TupleProjector;
import org.apache.phoenix.parse.AliasedNode;
import org.apache.phoenix.parse.ColumnParseNode;
import org.apache.phoenix.parse.FamilyWildcardParseNode;
@@ -43,7 +44,9 @@ import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.EncodedCQCounter;
import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.PTable.StorageScheme;
import org.apache.phoenix.schema.PTableImpl;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.ProjectedColumn;
@@ -154,7 +157,7 @@ public class TupleProjectionCompiler {
table.getParentName(), table.getIndexes(), table.isImmutableRows(), Collections.<PName> emptyList(),
null, null, table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(),
table.getViewIndexId(),
- table.getIndexType(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounters());
+ table.getIndexType(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounter());
}
public static PTable createProjectedTable(TableRef tableRef, List<ColumnRef> sourceColumnRefs, boolean retainPKColumns) throws SQLException {
@@ -162,6 +165,8 @@ public class TupleProjectionCompiler {
boolean hasSaltingColumn = retainPKColumns && table.getBucketNum() != null;
List<PColumn> projectedColumns = new ArrayList<PColumn>();
int position = hasSaltingColumn ? 1 : 0;
+ StorageScheme storageScheme = StorageScheme.NON_ENCODED_COLUMN_NAMES;
+ Integer counter = null;
for (int i = position; i < sourceColumnRefs.size(); i++) {
ColumnRef sourceColumnRef = sourceColumnRefs.get(i);
PColumn sourceColumn = sourceColumnRef.getColumn();
@@ -172,17 +177,29 @@ public class TupleProjectionCompiler {
PColumn column = new ProjectedColumn(PNameFactory.newName(aliasedName),
retainPKColumns && SchemaUtil.isPKColumn(sourceColumn) ?
- null : PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY),
+ null : PNameFactory.newName(VALUE_COLUMN_FAMILY),
position++, sourceColumn.isNullable(), sourceColumnRef);
+ if (EncodedColumnsUtil.hasEncodedColumnName(sourceColumn)) {
+ if (counter == null) {
+ counter = 1;
+ } else {
+ counter++;
+ }
+ }
projectedColumns.add(column);
}
+ EncodedCQCounter cqCounter = PTable.EncodedCQCounter.NULL_COUNTER;
+ if (counter != null) {
+ cqCounter = new EncodedCQCounter(counter); //TODO: samarth I am not sure whether the exact count matters here or not
+ storageScheme = StorageScheme.ENCODED_COLUMN_NAMES;
+ }
return PTableImpl.makePTable(table.getTenantId(), PROJECTED_TABLE_SCHEMA, table.getName(), PTableType.PROJECTED,
null, table.getTimeStamp(), table.getSequenceNumber(), table.getPKName(),
retainPKColumns ? table.getBucketNum() : null, projectedColumns, null, null,
Collections.<PTable> emptyList(), table.isImmutableRows(), Collections.<PName> emptyList(), null, null,
table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(),
table.getViewIndexId(), null, table.rowKeyOrderOptimizable(), table.isTransactional(),
- table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounters());
+ table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), storageScheme, cqCounter);
}
// For extracting column references from single select statement
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
index 9c89817..6376d60 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
@@ -35,6 +35,7 @@ import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTableImpl;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.PTable.StorageScheme;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.util.SchemaUtil;
@@ -81,12 +82,13 @@ public class UnionCompiler {
projectedColumns.add(projectedColumn);
}
Long scn = statement.getConnection().getSCN();
+ //TODO: samarth this is likely just an in memory reference for compilation purposes. Probably ok to pass non-encoded scheme and null counter.
PTable tempTable = PTableImpl.makePTable(statement.getConnection().getTenantId(), UNION_SCHEMA_NAME,
UNION_TABLE_NAME, PTableType.SUBQUERY, null, HConstants.LATEST_TIMESTAMP,
scn == null ? HConstants.LATEST_TIMESTAMP : scn, null, null, projectedColumns, null, null, null, true,
null, null, null, true, true, true, null, null, null, false, false, 0, 0L,
SchemaUtil.isNamespaceMappingEnabled(PTableType.SUBQUERY,
- statement.getConnection().getQueryServices().getProps()), null, null);
+ statement.getConnection().getQueryServices().getProps()), StorageScheme.NON_ENCODED_COLUMN_NAMES, PTable.EncodedCQCounter.NULL_COUNTER);
TableRef tableRef = new TableRef(null, tempTable, 0, false);
return tableRef;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 18a2057..4b0454c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -49,6 +49,8 @@ import org.apache.phoenix.schema.KeyValueSchema;
import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
import org.apache.phoenix.schema.ValueBitSet;
import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+import org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple;
+import org.apache.phoenix.schema.tuple.PositionBasedResultTuple;
import org.apache.phoenix.schema.tuple.ResultTuple;
import org.apache.phoenix.schema.tuple.Tuple;
import org.apache.phoenix.util.IndexUtil;
@@ -246,14 +248,14 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
* @param indexMaintainer
* @param viewConstants
*/
- protected RegionScanner getWrappedScanner(final ObserverContext<RegionCoprocessorEnvironment> c,
+ RegionScanner getWrappedScanner(final ObserverContext<RegionCoprocessorEnvironment> c,
final RegionScanner s, final int offset, final Scan scan,
final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
final HRegion dataRegion, final IndexMaintainer indexMaintainer,
final byte[][] viewConstants, final TupleProjector projector,
- final ImmutableBytesWritable ptr) {
+ final ImmutableBytesWritable ptr, final boolean useQualiferAsListIndex) {
return getWrappedScanner(c, s, null, null, offset, scan, dataColumns, tupleProjector,
- dataRegion, indexMaintainer, null, viewConstants, null, null, projector, ptr);
+ dataRegion, indexMaintainer, null, viewConstants, null, null, projector, ptr, useQualiferAsListIndex);
}
/**
@@ -271,7 +273,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
* @param tx current transaction
* @param viewConstants
*/
- protected RegionScanner getWrappedScanner(final ObserverContext<RegionCoprocessorEnvironment> c,
+ RegionScanner getWrappedScanner(final ObserverContext<RegionCoprocessorEnvironment> c,
final RegionScanner s, final Set<KeyValueColumnExpression> arrayKVRefs,
final Expression[] arrayFuncRefs, final int offset, final Scan scan,
final ColumnReference[] dataColumns, final TupleProjector tupleProjector,
@@ -279,7 +281,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
Transaction tx,
final byte[][] viewConstants, final KeyValueSchema kvSchema,
final ValueBitSet kvSchemaBitSet, final TupleProjector projector,
- final ImmutableBytesWritable ptr) {
+ final ImmutableBytesWritable ptr, final boolean useQualifierAsListIndex) {
return new RegionScanner() {
@Override
@@ -344,11 +346,14 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
tupleProjector, dataRegion, indexMaintainer, viewConstants, ptr);
}
if (projector != null) {
- Tuple tuple = projector.projectResults(new ResultTuple(Result.create(result)));
+ // TODO: samarth think if this is the right thing to do here.
+ Tuple toProject = useQualifierAsListIndex ? new PositionBasedResultTuple(result) : new ResultTuple(Result.create(result));
+ Tuple tuple = projector.projectResults(toProject);
result.clear();
result.add(tuple.getValue(0));
- if(arrayElementCell != null)
+ if (arrayElementCell != null) {
result.add(arrayElementCell);
+ }
}
// There is a scanattribute set to retrieve the specific array element
return next;
@@ -375,7 +380,8 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
tupleProjector, dataRegion, indexMaintainer, viewConstants, ptr);
}
if (projector != null) {
- Tuple tuple = projector.projectResults(new ResultTuple(Result.create(result)));
+ Tuple toProject = useQualifierAsListIndex ? new PositionBasedMultiKeyValueTuple(result) : new ResultTuple(Result.create(result));
+ Tuple tuple = projector.projectResults(toProject);
result.clear();
result.add(tuple.getValue(0));
if(arrayElementCell != null)
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
index f88a931..089c4fe 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java
@@ -56,22 +56,27 @@ public class DelegateRegionScanner implements RegionScanner {
delegate.close();
}
+ @Override
public long getMaxResultSize() {
return delegate.getMaxResultSize();
}
+ @Override
public boolean next(List<Cell> arg0, int arg1) throws IOException {
return delegate.next(arg0, arg1);
}
+ @Override
public boolean next(List<Cell> arg0) throws IOException {
return delegate.next(arg0);
}
+ @Override
public boolean nextRaw(List<Cell> arg0, int arg1) throws IOException {
return delegate.nextRaw(arg0, arg1);
}
+ @Override
public boolean nextRaw(List<Cell> arg0) throws IOException {
return delegate.nextRaw(arg0);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index 52a25d3..39a4ab8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -138,6 +138,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
+ boolean useQualifierAsIndex = ScanUtil.useQualifierAsIndex(ScanUtil.getMinMaxQualifiersFromScan(scan), j != null);
if (ScanUtil.isLocalIndex(scan) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
@@ -147,7 +148,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
innerScanner =
getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector,
- dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
+ dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
}
if (j != null) {
@@ -163,9 +164,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
}
if (keyOrdered) { // Optimize by taking advantage that the rows are
// already in the required group by key order
- return scanOrdered(c, scan, innerScanner, expressions, aggregators, limit);
+ return scanOrdered(c, scan, innerScanner, expressions, aggregators, limit, j != null);
} else { // Otherwse, collect them all up in an in memory map
- return scanUnordered(c, scan, innerScanner, expressions, aggregators, limit);
+ return scanUnordered(c, scan, innerScanner, expressions, aggregators, limit, j != null);
}
}
@@ -371,7 +372,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
*/
private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan,
final RegionScanner scanner, final List<Expression> expressions,
- final ServerAggregators aggregators, long limit) throws IOException {
+ final ServerAggregators aggregators, long limit, boolean isJoin) throws IOException {
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan
+ ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
@@ -386,7 +387,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
(int) (Bytes.toInt(estDistValsBytes) * 1.5f));
}
Pair<Integer, Integer> minMaxQualifiers = getMinMaxQualifiersFromScan(scan);
- boolean useEncodedScheme = minMaxQualifiers != null;
+ boolean useQualifierAsIndex = ScanUtil.useQualifierAsIndex(ScanUtil.getMinMaxQualifiersFromScan(scan), isJoin);
final boolean spillableEnabled =
conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
@@ -397,7 +398,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
boolean success = false;
try {
boolean hasMore;
- Tuple result = useEncodedScheme ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
+ Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
}
@@ -406,7 +407,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
try {
synchronized (scanner) {
do {
- List<Cell> results = useEncodedScheme ? new BoundedSkipNullCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond()) : new ArrayList<Cell>();
+ List<Cell> results = useQualifierAsIndex ? new BoundedSkipNullCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond()) : new ArrayList<Cell>();
// Results are potentially returned even when the return
// value of s.next is false
// since this is an indication of whether or not there are
@@ -450,14 +451,14 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
*/
private RegionScanner scanOrdered(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner scanner, final List<Expression> expressions,
- final ServerAggregators aggregators, final long limit) throws IOException {
+ final ServerAggregators aggregators, final long limit, final boolean isJoin) throws IOException {
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over ordered rows with scan " + scan + ", group by "
+ expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
}
final Pair<Integer, Integer> minMaxQualifiers = getMinMaxQualifiersFromScan(scan);
- final boolean useEncodedScheme = minMaxQualifiers != null;
+ final boolean useQualifierAsIndex = ScanUtil.useQualifierAsIndex(ScanUtil.getMinMaxQualifiersFromScan(scan), isJoin);
return new BaseRegionScanner(scanner) {
private long rowCount = 0;
private ImmutableBytesWritable currentKey = null;
@@ -467,7 +468,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
boolean hasMore;
boolean atLimit;
boolean aggBoundary = false;
- Tuple result = useEncodedScheme ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
+ Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
ImmutableBytesWritable key = null;
Aggregator[] rowAggregators = aggregators.getAggregators();
// If we're calculating no aggregate functions, we can exit at the
@@ -478,7 +479,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
try {
synchronized (scanner) {
do {
- List<Cell> kvs = useEncodedScheme ? new BoundedSkipNullCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond()) : new ArrayList<Cell>();
+ List<Cell> kvs = useQualifierAsIndex ? new BoundedSkipNullCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond()) : new ArrayList<Cell>();
// Results are potentially returned even when the return
// value of s.next is false
// since this is an indication of whether or not there
@@ -516,6 +517,9 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
KeyValueUtil.newKeyValue(currentKey.get(), currentKey.getOffset(),
currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN,
AGG_TIMESTAMP, value, 0, value.length);
+ //TODO: samarth aaha how do we handle this. It looks like we are adding stuff like this to the results
+ // that we are returning. Bounded skip null cell list won't handle this properly. Interesting. So how do we
+ // handle this. Does having a reserved set of column qualifiers help here?
results.add(keyValue);
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Adding new aggregate row: "
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
index 2650225..8c2c3d6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java
@@ -110,7 +110,7 @@ public class HashJoinRegionScanner implements RegionScanner {
private void processResults(List<Cell> result, boolean hasBatchLimit) throws IOException {
if (result.isEmpty())
return;
-
+ //TODO: samarth make joins work with position based lookup.
Tuple tuple = new ResultTuple(Result.create(result));
// For backward compatibility. In new versions, HashJoinInfo.forceProjection()
// always returns true.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 58a637a..3b57097 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -25,6 +25,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CLASS_NAME_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME_INDEX;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE_BYTES;
@@ -33,7 +34,6 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAM
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_VALUE_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DISABLE_WAL_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODED_COLUMN_QUALIFIER_BYTES;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FAMILY_NAME_INDEX;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS_BYTES;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES;
@@ -85,11 +85,9 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
-import java.util.Map;
-import java.util.Set;
+
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.Coprocessor;
@@ -184,6 +182,7 @@ import org.apache.phoenix.schema.PMetaDataEntity;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.EncodedCQCounter;
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTable.LinkType;
import org.apache.phoenix.schema.PTable.StorageScheme;
@@ -925,7 +924,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
List<PColumn> columns = Lists.newArrayListWithExpectedSize(columnCount);
List<PTable> indexes = new ArrayList<PTable>();
List<PName> physicalTables = new ArrayList<PName>();
- Map<String, Integer> encodedColumnQualifierCounters = new HashMap<>(); //TODO: samarth size properly.
+ int counter = 0;
while (true) {
results.clear();
scanner.next(results);
@@ -940,9 +939,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
PName famName = newPName(colKv.getRowArray(), colKv.getRowOffset() + colKeyOffset, colKeyLength-colKeyOffset);
if (colName.getString().isEmpty() && famName != null) {
if (isQualifierCounterKv(colKv)) {
- Integer counter = (Integer)PInteger.INSTANCE.toObject(colKv.getValueArray(),
- colKv.getValueOffset(), colKv.getValueLength());
- encodedColumnQualifierCounters.put(famName.getString(), counter);
+ counter = PInteger.INSTANCE.getCodec().decodeInt(colKv.getValueArray(),
+ colKv.getValueOffset(), SortOrder.ASC);
} else {
LinkType linkType = LinkType.fromSerializedValue(colKv.getValueArray()[colKv.getValueOffset()]);
if (linkType == LinkType.INDEX_TABLE) {
@@ -956,6 +954,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
}
}
+ EncodedCQCounter cqCounter = (storageScheme == StorageScheme.NON_ENCODED_COLUMN_NAMES || tableType == PTableType.VIEW) ? PTable.EncodedCQCounter.NULL_COUNTER : new EncodedCQCounter(counter);
PName physicalTableName = physicalTables.isEmpty() ? PNameFactory.newName(SchemaUtil.getPhysicalTableName(
Bytes.toBytes(SchemaUtil.getTableName(schemaName.getBytes(), tableName.getBytes())), isNamespaceMapped)
.getNameAsString()) : physicalTables.get(0);
@@ -980,7 +979,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
tableType == INDEX ? dataTableName : null, indexes, isImmutableRows, physicalTables, defaultFamilyName,
viewStatement, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType,
rowKeyOrderOptimizable, transactional, updateCacheFrequency, stats, baseColumnCount,
- indexDisableTimestamp, isNamespaceMapped, storageScheme, encodedColumnQualifierCounters);
+ indexDisableTimestamp, isNamespaceMapped, storageScheme, cqCounter);
}
private PSchema getSchema(RegionScanner scanner, long clientTimeStamp) throws IOException, SQLException {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index 72f6d09..61b98d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -27,6 +27,8 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Set;
+import co.cask.tephra.Transaction;
+
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Result;
@@ -36,7 +38,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.WritableUtils;
import org.apache.phoenix.cache.GlobalCache;
import org.apache.phoenix.cache.TenantCache;
@@ -68,8 +69,6 @@ import org.apache.phoenix.util.ServerUtil;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
-import co.cask.tephra.Transaction;
-
/**
*
@@ -108,7 +107,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
}
}
- public static OrderedResultIterator deserializeFromScan(Scan scan, RegionScanner s) {
+ private static OrderedResultIterator deserializeFromScan(Scan scan, RegionScanner s, boolean isJoin) {
byte[] topN = scan.getAttribute(BaseScannerRegionObserver.TOPN);
if (topN == null) {
return null;
@@ -126,7 +125,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
orderByExpression.readFields(input);
orderByExpressions.add(orderByExpression);
}
- ResultIterator inner = new RegionScannerResultIterator(s);
+ ResultIterator inner = new RegionScannerResultIterator(s, ScanUtil.getMinMaxQualifiersFromScan(scan), isJoin);
return new OrderedResultIterator(inner, orderByExpressions, thresholdBytes, limit >= 0 ? limit : null, null,
estimatedRowSize);
} catch (IOException e) {
@@ -219,10 +218,12 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
+ //TODO: samarth get rid of this join shit. Joins should support position based look up.
+ boolean useQualifierAsIndex = ScanUtil.useQualifierAsIndex(ScanUtil.getMinMaxQualifiersFromScan(scan), j != null) && scan.getAttribute(BaseScannerRegionObserver.TOPN) != null;
innerScanner =
getWrappedScanner(c, innerScanner, arrayKVRefs, arrayFuncRefs, offset, scan,
dataColumns, tupleProjector, dataRegion, indexMaintainer, tx,
- viewConstants, kvSchema, kvSchemaBitSet, j == null ? p : null, ptr);
+ viewConstants, kvSchema, kvSchemaBitSet, j == null ? p : null, ptr, useQualifierAsIndex);
final ImmutableBytesWritable tenantId = ScanUtil.getTenantId(scan);
if (j != null) {
@@ -230,10 +231,10 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
}
if (scanOffset != null) {
innerScanner = getOffsetScanner(c, innerScanner,
- new OffsetResultIterator(new RegionScannerResultIterator(innerScanner), scanOffset),
+ new OffsetResultIterator(new RegionScannerResultIterator(innerScanner, ScanUtil.getMinMaxQualifiersFromScan(scan), j != null), scanOffset),
scan.getAttribute(QueryConstants.LAST_SCAN) != null);
}
- final OrderedResultIterator iterator = deserializeFromScan(scan,innerScanner);
+ final OrderedResultIterator iterator = deserializeFromScan(scan, innerScanner, j != null);
if (iterator == null) {
return innerScanner;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 7c3bd28..b412b88 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -39,6 +39,8 @@ import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
+import co.cask.tephra.TxConstants;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
@@ -118,8 +120,6 @@ import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
-import co.cask.tephra.TxConstants;
-
/**
* Region observer that aggregates ungrouped rows(i.e. SQL query with aggregation function and no GROUP BY).
@@ -259,6 +259,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
boolean localIndexScan = ScanUtil.isLocalIndex(scan);
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
+ boolean useQualifierAsIndex = ScanUtil.useQualifierAsIndex(ScanUtil.getMinMaxQualifiersFromScan(scan), j != null) && scan.getAttribute(BaseScannerRegionObserver.TOPN) != null;
if ((localIndexScan && !isDelete && !isDescRowKeyOrderUpgrade) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
@@ -268,7 +269,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
theScanner =
getWrappedScanner(c, theScanner, offset, scan, dataColumns, tupleProjector,
- dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr);
+ dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
}
if (j != null) {
@@ -289,8 +290,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
boolean hasMore;
boolean hasAny = false;
Pair<Integer, Integer> minMaxQualifiers = getMinMaxQualifiersFromScan(scan);
- boolean useEncodedScheme = minMaxQualifiers != null;
- Tuple result = useEncodedScheme ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
+ Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
if (logger.isDebugEnabled()) {
logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
}
@@ -300,7 +300,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
try {
synchronized (innerScanner) {
do {
- List<Cell> results = useEncodedScheme ? new BoundedSkipNullCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond()) : new ArrayList<Cell>();
+ List<Cell> results = useQualifierAsIndex ? new BoundedSkipNullCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond()) : new ArrayList<Cell>();
// Results are potentially returned even when the return value of s.next is false
// since this is an indication of whether or not there are more values after the
// ones returned
[2/4] phoenix git commit: Optimize order by and grouped aggregations
by taking advantage of column encoding
Posted by sa...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 4b4caa2..b30e68d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -33,7 +33,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODED_COLUMN_QUALIFIER;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TABLE_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE;
@@ -41,7 +41,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_VALUE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DISABLE_WAL;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODED_COLUMN_QUALIFIER;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FUNCTION_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
@@ -86,6 +86,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_UPDATE_STATS_ASYNC;
@@ -119,6 +120,8 @@ import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
+import co.cask.tephra.TxConstants;
+
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
@@ -130,7 +133,6 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.ZKConfig;
import org.apache.phoenix.compile.ColumnResolver;
import org.apache.phoenix.compile.FromCompiler;
import org.apache.phoenix.compile.IndexExpressionCompiler;
@@ -190,6 +192,7 @@ import org.apache.phoenix.query.ConnectionQueryServices.Feature;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PTable.EncodedCQCounter;
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTable.LinkType;
import org.apache.phoenix.schema.PTable.StorageScheme;
@@ -219,7 +222,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Objects;
-import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
@@ -227,8 +229,7 @@ import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.primitives.Ints;
-import co.cask.tephra.TxConstants;
-
+import static org.apache.phoenix.schema.PTable.EncodedCQCounter.NULL_COUNTER;
public class MetaDataClient {
private static final Logger logger = LoggerFactory.getLogger(MetaDataClient.class);
@@ -289,7 +290,7 @@ public class MetaDataClient {
LINK_TYPE + "," +
PARENT_TENANT_ID + " " + PVarchar.INSTANCE.getSqlTypeName() + // Dynamic column for now to prevent schema change
") VALUES (?, ?, ?, ?, ?, ?)";
- private static final String UPDATE_ENCODED_COLUMN_COUNT =
+ private static final String UPDATE_ENCODED_COLUMN_COUNTER =
"UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
TENANT_ID + ", " +
TABLE_SCHEM + "," +
@@ -826,7 +827,7 @@ public class MetaDataClient {
argUpsert.execute();
}
- private PColumn newColumn(int position, ColumnDef def, PrimaryKeyConstraint pkConstraint, String defaultColumnFamily, boolean addingToPK, Map<String, Integer> nextEncodedColumnQualifiers) throws SQLException {
+ private PColumn newColumn(int position, ColumnDef def, PrimaryKeyConstraint pkConstraint, String defaultColumnFamily, boolean addingToPK, EncodedCQCounter encodedColumnQualifier) throws SQLException {
try {
ColumnName columnDefName = def.getColumnDefName();
SortOrder sortOrder = def.getSortOrder();
@@ -874,17 +875,8 @@ public class MetaDataClient {
}
isNull = false;
}
- Integer columnQualifier = null;
- if (!isPK && nextEncodedColumnQualifiers != null) {
- columnQualifier = nextEncodedColumnQualifiers.get(familyName.getString());
- if (columnQualifier == null) {
- // We use columnQualifier 0 for the special empty key value.
- columnQualifier = 1;
- }
- nextEncodedColumnQualifiers.put(familyName.toString(), columnQualifier + 1);
- }
PColumn column = new PColumnImpl(PNameFactory.newName(columnName), familyName, def.getDataType(),
- def.getMaxLength(), def.getScale(), isNull, position, sortOrder, def.getArraySize(), null, false, def.getExpression(), isRowTimestamp, false, columnQualifier);
+ def.getMaxLength(), def.getScale(), isNull, position, sortOrder, def.getArraySize(), null, false, def.getExpression(), isRowTimestamp, false, isPK ? null : encodedColumnQualifier.getValue());
return column;
} catch (IllegalArgumentException e) { // Based on precondition check in constructor
throw new SQLException(e);
@@ -1921,10 +1913,8 @@ public class MetaDataClient {
int position = positionOffset;
StorageScheme storageScheme = StorageScheme.NON_ENCODED_COLUMN_NAMES;
- Map<String, Integer> nextCQCounters = null;
- Map<String, Integer> updatedPhysicalTableCQCounters = null;
+ EncodedCQCounter cqCounter = NULL_COUNTER;
PTable viewPhysicalTable = null;
- //TODO: samarth what about local indexes.
if (SchemaUtil.isSystemTable(Bytes.toBytes(SchemaUtil.getTableName(schemaName, tableName)))) {
// System tables have hard-coded column qualifiers. So we can't use column encoding for them.
storageScheme = StorageScheme.NON_ENCODED_COLUMN_NAMES;
@@ -1938,14 +1928,13 @@ public class MetaDataClient {
} else {
/*
* For regular phoenix views, use the storage scheme of the physical table since they all share the
- * the same HTable. Views always use the base table's column qualifier counters for doling out
- * encoded column qualifiers.
+ * the same HTable. Views always use the base table's column qualifier counter for doling out
+ * encoded column qualifier.
*/
viewPhysicalTable = connection.getTable(new PTableKey(null, physicalNames.get(0).getString()));
storageScheme = viewPhysicalTable.getStorageScheme();
if (storageScheme == StorageScheme.ENCODED_COLUMN_NAMES) {
- nextCQCounters = viewPhysicalTable.getEncodedCQCounters();
- updatedPhysicalTableCQCounters = Maps.newHashMapWithExpectedSize(colDefs.size());
+ cqCounter = viewPhysicalTable.getEncodedCQCounter();
}
}
} else {
@@ -1963,8 +1952,8 @@ public class MetaDataClient {
* If the hbase table already exists, then possibly encoded or non-encoded column qualifiers already exist.
* In this case we pursue ahead with non-encoded column qualifier scheme. If the phoenix table already exists
* then we rely on the PTable, with appropriate storage scheme, returned in the MetadataMutationResult to be updated
- * in the client cache. If it doesn't then the non-encoded column qualifier scheme works because we cannot control
- * the column qualifiers that were used when populating the hbase table.
+ * in the client cache. If the phoenix table already doesn't exist then the non-encoded column qualifier scheme works
+ * because we cannot control the column qualifiers that were used when populating the hbase table.
*/
byte[] tableNameBytes = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
boolean tableExists = true;
@@ -1987,10 +1976,11 @@ public class MetaDataClient {
storageScheme = StorageScheme.ENCODED_COLUMN_NAMES;
}
if (storageScheme == StorageScheme.ENCODED_COLUMN_NAMES) {
- nextCQCounters = Maps.newHashMapWithExpectedSize(colDefs.size() - pkColumns.size());
+ cqCounter = new EncodedCQCounter(ENCODED_CQ_COUNTER_INITIAL_VALUE);
}
}
+ Integer initialCounterValue = cqCounter.getValue();
for (ColumnDef colDef : colDefs) {
rowTimeStampColumnAlreadyFound = checkAndValidateRowTimestampCol(colDef, pkConstraint, rowTimeStampColumnAlreadyFound, tableType);
if (colDef.isPK()) { // i.e. the column is declared as CREATE TABLE COLNAME DATATYPE PRIMARY KEY...
@@ -2009,11 +1999,13 @@ public class MetaDataClient {
.setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
}
}
- PColumn column = newColumn(position++, colDef, pkConstraint, defaultFamilyName, false, nextCQCounters);
- String cf = column.getFamilyName() != null ? column.getFamilyName().getString() : null;
- if (updatedPhysicalTableCQCounters != null && cf != null && EncodedColumnsUtil.hasEncodedColumnName(column)) {
- updatedPhysicalTableCQCounters.put(cf, nextCQCounters.get(cf));
+ ColumnName columnDefName = colDef.getColumnDefName();
+ PColumn column = null;
+ column = newColumn(position++, colDef, pkConstraint, defaultFamilyName, false, cqCounter);
+ if (incrementEncodedCQCounter(storageScheme, pkConstraint, colDef, columnDefName)) {
+ cqCounter.increment();
}
+ String cf = column.getFamilyName() != null ? column.getFamilyName().getString() : null;
if (SchemaUtil.isPKColumn(column)) {
// TODO: remove this constraint?
if (pkColumnsIterator.hasNext() && !column.getName().getString().equals(pkColumnsIterator.next().getFirst().getColumnName())) {
@@ -2050,42 +2042,34 @@ public class MetaDataClient {
}
if (storageScheme == StorageScheme.ENCODED_COLUMN_NAMES) {
- // Store the encoded column counter for each column family for phoenix entities that have their own hbase
+ // Store the encoded column counter for phoenix entities that have their own hbase
// tables i.e. base tables and indexes.
- Map<String, Integer> mapToUse = tableType == VIEW ? updatedPhysicalTableCQCounters : nextCQCounters;
- if (tableType != VIEW && nextCQCounters.isEmpty()) {
- // Case when a table or index has only pk columns.
- nextCQCounters.put(defaultFamilyName == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : defaultFamilyName, 1);
- }
- if (mapToUse != null) {
- PreparedStatement linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNT);
- String schemaNameToUse = tableType == VIEW ? viewPhysicalTable.getSchemaName().getString() : schemaName;
- String tableNameToUse = tableType == VIEW ? viewPhysicalTable.getTableName().getString() : tableName;
- // For local indexes and indexes on views, pass on the the tenant id i.e. all their meta-data rows have
- // tenant ids in there.
- boolean sharedIndex = tableType == PTableType.INDEX && (indexType == IndexType.LOCAL || parent.getType() == PTableType.VIEW);
- String tenantIdToUse = connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null;
- for (Entry<String, Integer> entry : mapToUse.entrySet()) {
- String familyName = entry.getKey();
- Integer nextQualifier = entry.getValue();
- linkStatement.setString(1, tenantIdToUse);
- linkStatement.setString(2, schemaNameToUse);
- linkStatement.setString(3, tableNameToUse);
- linkStatement.setString(4, familyName);
- linkStatement.setInt(5, nextQualifier);
- linkStatement.execute();
- }
+ String schemaNameToUse = tableType == VIEW ? viewPhysicalTable.getSchemaName().getString() : schemaName;
+ String tableNameToUse = tableType == VIEW ? viewPhysicalTable.getTableName().getString() : tableName;
+ // For local indexes and indexes on views, pass on the the tenant id since all their meta-data rows have
+ // tenant ids in there.
+ boolean sharedIndex = tableType == PTableType.INDEX && (indexType == IndexType.LOCAL || parent.getType() == PTableType.VIEW);
+ String tenantIdToUse = connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null;
+ //TODO: samarth I think we can safely use the default column family here
+ String familyName = QueryConstants.DEFAULT_COLUMN_FAMILY;
+ try (PreparedStatement linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER)) {
+ linkStatement.setString(1, tenantIdToUse);
+ linkStatement.setString(2, schemaNameToUse);
+ linkStatement.setString(3, tableNameToUse);
+ linkStatement.setString(4, familyName);
+ linkStatement.setInt(5, cqCounter.getValue());
+ linkStatement.execute();
+ }
- // When a view adds its own columns, then we need to increase the sequence number of the base table
- // too since we want clients to get the latest PTable of the base table.
- if (tableType == VIEW && updatedPhysicalTableCQCounters != null && !updatedPhysicalTableCQCounters.isEmpty()) {
- PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM);
- incrementStatement.setString(1, null);
- incrementStatement.setString(2, viewPhysicalTable.getSchemaName().getString());
- incrementStatement.setString(3, viewPhysicalTable.getTableName().getString());
- incrementStatement.setLong(4, viewPhysicalTable.getSequenceNumber() + 1);
- incrementStatement.execute();
- }
+ // When a view adds its own columns, then we need to increase the sequence number of the base table
+ // too since we want clients to get the latest PTable of the base table.
+ if (tableType == VIEW && cqCounter.getValue() != initialCounterValue) {
+ PreparedStatement incrementStatement = connection.prepareStatement(INCREMENT_SEQ_NUM);
+ incrementStatement.setString(1, null);
+ incrementStatement.setString(2, viewPhysicalTable.getSchemaName().getString());
+ incrementStatement.setString(3, viewPhysicalTable.getTableName().getString());
+ incrementStatement.setLong(4, viewPhysicalTable.getSequenceNumber() + 1);
+ incrementStatement.execute();
}
}
@@ -2172,7 +2156,7 @@ public class MetaDataClient {
Collections.<PTable>emptyList(), isImmutableRows,
Collections.<PName>emptyList(), defaultFamilyName == null ? null :
PNameFactory.newName(defaultFamilyName), null,
- Boolean.TRUE.equals(disableWAL), false, false, null, indexId, indexType, true, false, 0, 0L, isNamespaceMapped, StorageScheme.NON_ENCODED_COLUMN_NAMES, ImmutableMap.<String, Integer>of());
+ Boolean.TRUE.equals(disableWAL), false, false, null, indexId, indexType, true, false, 0, 0L, isNamespaceMapped, StorageScheme.NON_ENCODED_COLUMN_NAMES, PTable.EncodedCQCounter.NULL_COUNTER);
connection.addTable(table, MetaDataProtocol.MIN_TABLE_TIMESTAMP);
} else if (tableType == PTableType.INDEX && indexId == null) {
if (tableProps.get(HTableDescriptor.MAX_FILESIZE) == null) {
@@ -2338,14 +2322,18 @@ public class MetaDataClient {
throw new ConcurrentTableMutationException(schemaName, tableName);
default:
PName newSchemaName = PNameFactory.newName(schemaName);
- // Views always rely on the parent table's map to dole out encoded column qualifiers.
- Map<String, Integer> qualifierMapToBe = tableType == PTableType.VIEW ? ImmutableMap.<String, Integer>of() : nextCQCounters;
+ /*
+ * It doesn't hurt for the PTable of views to have the cqCounter. However, views always rely on the
+ * parent table's counter to dole out encoded column qualifiers. So setting the counter as NULL_COUNTER
+ * for extra safety.
+ */
+ EncodedCQCounter cqCounterToBe = tableType == PTableType.VIEW ? NULL_COUNTER : cqCounter;
PTable table = PTableImpl.makePTable(
tenantId, newSchemaName, PNameFactory.newName(tableName), tableType, indexState, timestamp!=null ? timestamp : result.getMutationTime(),
PTable.INITIAL_SEQ_NUM, pkName == null ? null : PNameFactory.newName(pkName), saltBucketNum, columns,
dataTableName == null ? null : newSchemaName, dataTableName == null ? null : PNameFactory.newName(dataTableName), Collections.<PTable>emptyList(), isImmutableRows,
physicalNames, defaultFamilyName == null ? null : PNameFactory.newName(defaultFamilyName), viewStatement, Boolean.TRUE.equals(disableWAL), multiTenant, storeNulls, viewType,
- indexId, indexType, rowKeyOrderOptimizable, transactional, updateCacheFrequency, 0L, isNamespaceMapped, storageScheme, qualifierMapToBe);
+ indexId, indexType, rowKeyOrderOptimizable, transactional, updateCacheFrequency, 0L, isNamespaceMapped, storageScheme, cqCounterToBe);
result = new MetaDataMutationResult(code, result.getMutationTime(), table, true);
addTableToCache(result);
return table;
@@ -2355,6 +2343,15 @@ public class MetaDataClient {
}
}
+ private static boolean incrementEncodedCQCounter(StorageScheme storageScheme, PrimaryKeyConstraint pkConstraint,
+ ColumnDef colDef, ColumnName columnDefName) {
+ return storageScheme == StorageScheme.ENCODED_COLUMN_NAMES && !(colDef.isPK() || (pkConstraint != null && pkConstraint.getColumnWithSortOrder(columnDefName) != null));
+ }
+
+ private static boolean incrementEncodedCQCounter(StorageScheme storageScheme, ColumnDef colDef) {
+ return storageScheme == StorageScheme.ENCODED_COLUMN_NAMES && !colDef.isPK();
+ }
+
private byte[][] getSplitKeys(List<HRegionLocation> allTableRegions) {
if(allTableRegions.size() == 1) return null;
byte[][] splitKeys = new byte[allTableRegions.size()-1][];
@@ -2906,15 +2903,16 @@ public class MetaDataClient {
Set<String> families = new LinkedHashSet<>();
PTableType tableType = table.getType();
PTable tableForCQCounters = null;
- Map<String, Integer> cqCountersToUse = null;
- Map<String, Integer> cfWithUpdatedCQCounters = null;
+ EncodedCQCounter cqCounterToUse = NULL_COUNTER;
+ StorageScheme storageScheme = table.getStorageScheme();
+ Integer initialCounterValue = null;
if (columnDefs.size() > 0 ) {
//FIXME: samarth change this to fetch table from server if client cache doesn't have it. What about local indexes?
//FIXME: samarth fix this mess of getting table names from connection
//TODO: samarth should these be guarded by storage scheme check. Better to have the map always available. immutable empty for views and non encoded.
tableForCQCounters = tableType == PTableType.VIEW ? connection.getTable(new PTableKey(null, table.getPhysicalName().getString())) : table;
- cqCountersToUse = tableForCQCounters.getEncodedCQCounters();
- cfWithUpdatedCQCounters = cqCountersToUse != null ? Maps.<String, Integer>newHashMapWithExpectedSize(columnDefs.size()) : null;
+ cqCounterToUse = tableForCQCounters.getEncodedCQCounter();
+ initialCounterValue = cqCounterToUse.getValue();
try (PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN_ALTER_TABLE)) {
short nextKeySeq = SchemaUtil.getMaxKeySeq(table);
for( ColumnDef colDef : columnDefs) {
@@ -2934,12 +2932,11 @@ public class MetaDataClient {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.ROWTIMESTAMP_CREATE_ONLY)
.setColumnName(colDef.getColumnDefName().getColumnName()).build().buildException();
}
- PColumn column = newColumn(position++, colDef, PrimaryKeyConstraint.EMPTY, table.getDefaultFamilyName() == null ? null : table.getDefaultFamilyName().getString(), true, cqCountersToUse);
- String cf = column.getFamilyName() != null ? column.getFamilyName().getString() : null;
- if (cfWithUpdatedCQCounters != null && cf != null && EncodedColumnsUtil.hasEncodedColumnName(column)) {
- cfWithUpdatedCQCounters.put(cf, cqCountersToUse.get(cf));
- }
+ PColumn column = newColumn(position++, colDef, PrimaryKeyConstraint.EMPTY, table.getDefaultFamilyName() == null ? null : table.getDefaultFamilyName().getString(), true, cqCounterToUse);
columns.add(column);
+ if (incrementEncodedCQCounter(storageScheme, colDef)) {
+ cqCounterToUse.increment();
+ }
String pkName = null;
Short keySeq = null;
@@ -2976,7 +2973,7 @@ public class MetaDataClient {
ColumnName indexColName = ColumnName.caseSensitiveColumnName(IndexUtil.getIndexColumnName(null, colDef.getColumnDefName().getColumnName()));
Expression expression = new RowKeyColumnExpression(columns.get(i), new RowKeyValueAccessor(pkColumns, ++pkSlotPosition));
ColumnDef indexColDef = FACTORY.columnDef(indexColName, indexColDataType.getSqlTypeName(), colDef.isNull(), colDef.getMaxLength(), colDef.getScale(), true, colDef.getSortOrder(), expression.toString(), colDef.isRowTimestamp());
- PColumn indexColumn = newColumn(indexPosition++, indexColDef, PrimaryKeyConstraint.EMPTY, null, true, cqCountersToUse);
+ PColumn indexColumn = newColumn(indexPosition++, indexColDef, PrimaryKeyConstraint.EMPTY, null, true, NULL_COUNTER);
addColumnMutation(schemaName, index.getTableName().getString(), indexColumn, colUpsert, index.getParentTableName().getString(), index.getPKName() == null ? null : index.getPKName().getString(), ++nextIndexKeySeq, index.getBucketNum() != null);
}
}
@@ -3027,21 +3024,21 @@ public class MetaDataClient {
boolean sharedIndex = tableType == PTableType.INDEX && (table.getIndexType() == IndexType.LOCAL || table.getViewIndexId() != null);
String tenantIdToUse = connection.getTenantId() != null && sharedIndex ? connection.getTenantId().getString() : null;
//TODO: samarth I am not sure this is going to work on server side. But for now lets add these mutations here.
- if (cfWithUpdatedCQCounters != null && !cfWithUpdatedCQCounters.isEmpty()) {
+ if (cqCounterToUse.getValue() != initialCounterValue) {
PreparedStatement linkStatement;
- if (!sharedIndex) {
- linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNT);
- for (Entry<String, Integer> entry : cfWithUpdatedCQCounters.entrySet()) {
- String familyName = entry.getKey();
- Integer nextQualifier = entry.getValue();
- linkStatement.setString(1, tenantIdToUse);
- linkStatement.setString(2, tableForCQCounters.getSchemaName().getString());
- linkStatement.setString(3, tableForCQCounters.getTableName().getString());
- linkStatement.setString(4, familyName);
- linkStatement.setInt(5, nextQualifier);
- linkStatement.execute();
- }
- }
+ //TODO: samarth i don't think we need the shared index check here.
+ //if (!sharedIndex) {
+ linkStatement = connection.prepareStatement(UPDATE_ENCODED_COLUMN_COUNTER);
+ //TODO: samarth should be ok to use the default column family here.
+ String familyName = QueryConstants.DEFAULT_COLUMN_FAMILY;
+ linkStatement.setString(1, tenantIdToUse);
+ linkStatement.setString(2, tableForCQCounters.getSchemaName().getString());
+ linkStatement.setString(3, tableForCQCounters.getTableName().getString());
+ linkStatement.setString(4, familyName);
+ linkStatement.setInt(5, cqCounterToUse.getValue());
+ linkStatement.execute();
+
+ //}
// When a view adds its own columns, then we need to increase the sequence number of the base table
// too since we want clients to get the latest PTable of the base table.
if (tableType == VIEW) {
@@ -3372,11 +3369,13 @@ public class MetaDataClient {
Map<String, List<TableRef>> tenantIdTableRefMap = Maps.newHashMap();
if (result.getSharedTablesToDelete() != null) {
for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) {
+ //TODO: samarth I don't think we really care about storage scheme and cq counter at this point.
+ //Probably worthy to change the constructor here to not expect the two arguments.
PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(),
sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts,
table.getColumnFamilies(), sharedTableState.getColumns(),
sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(),
- table.isMultiTenant(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounters());
+ table.isMultiTenant(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounter());
TableRef indexTableRef = new TableRef(viewIndexTable);
PName indexTableTenantId = sharedTableState.getTenantId();
if (indexTableTenantId==null) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index e42000e..ca911df 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -20,6 +20,8 @@ package org.apache.phoenix.schema;
import java.util.List;
import java.util.Map;
+import javax.annotation.Nullable;
+
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
@@ -385,5 +387,51 @@ public interface PTable extends PMetaDataEntity {
long getUpdateCacheFrequency();
boolean isNamespaceMapped();
StorageScheme getStorageScheme();
- Map<String, Integer> getEncodedCQCounters();
+ EncodedCQCounter getEncodedCQCounter();
+
+ /**
+ * Wrapper around {@link java.lang.Integer} to help track and update counter values.
+ */
+ public class EncodedCQCounter {
+
+ @Nullable private Integer counter;
+ public static final EncodedCQCounter NULL_COUNTER = new EncodedCQCounter(null);
+
+ public EncodedCQCounter(Integer initialValue) {
+ counter = initialValue;
+ }
+
+ @Nullable
+ public Integer getValue() {
+ return counter;
+ }
+
+ public void increment() {
+ if (counter != null) {
+ counter++;
+ }
+ }
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((counter == null) ? 0 : counter.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) return true;
+ if (obj == null) return false;
+ if (getClass() != obj.getClass()) return false;
+ EncodedCQCounter other = (EncodedCQCounter)obj;
+ if (counter == null) {
+ if (other.counter != null) return false;
+ } else if (!counter.equals(other.counter)) return false;
+ return true;
+ }
+
+
+
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 897317a..6978fa7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -28,7 +28,6 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.TreeMap;
@@ -47,7 +46,6 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.phoenix.coprocessor.generated.PGuidePostsProtos;
import org.apache.phoenix.coprocessor.generated.PGuidePostsProtos.PGuidePosts;
import org.apache.phoenix.coprocessor.generated.PTableProtos;
-import org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter;
import org.apache.phoenix.exception.DataExceedsCapacityException;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
@@ -146,7 +144,7 @@ public class PTableImpl implements PTable {
private long updateCacheFrequency;
private boolean isNamespaceMapped;
private StorageScheme storageScheme;
- private Map<String, Integer> encodedCQCounters;
+ private EncodedCQCounter encodedCQCounter;
public PTableImpl() {
this.indexes = Collections.emptyList();
@@ -179,7 +177,7 @@ public class PTableImpl implements PTable {
// For indexes stored in shared physical tables
public PTableImpl(PName tenantId, PName schemaName, PName tableName, long timestamp, List<PColumnFamily> families,
- List<PColumn> columns, List<PName> physicalNames, Short viewIndexId, boolean multiTenant, boolean isNamespaceMpped, StorageScheme storageScheme, Map<String, Integer> encodedColumnQualifierCounters) throws SQLException {
+ List<PColumn> columns, List<PName> physicalNames, Short viewIndexId, boolean multiTenant, boolean isNamespaceMpped, StorageScheme storageScheme, EncodedCQCounter encodedCQCounter) throws SQLException {
this.pkColumns = this.allColumns = Collections.emptyList();
this.rowKeySchema = RowKeySchema.EMPTY_SCHEMA;
this.indexes = Collections.emptyList();
@@ -193,7 +191,7 @@ public class PTableImpl implements PTable {
init(tenantId, this.schemaName, this.tableName, PTableType.INDEX, state, timeStamp, sequenceNumber, pkName, bucketNum, columns,
PTableStats.EMPTY_STATS, this.schemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName,
null, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable,
- isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMpped, storageScheme, encodedColumnQualifierCounters);
+ isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMpped, storageScheme, encodedCQCounter);
}
public PTableImpl(long timeStamp) { // For delete marker
@@ -236,7 +234,7 @@ public class PTableImpl implements PTable {
table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), getColumnsToClone(table), parentSchemaName, table.getParentTableName(),
indexes, table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), viewStatement,
table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(),
- table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounters());
+ table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounter());
}
public static PTableImpl makePTable(PTable table, List<PColumn> columns) throws SQLException {
@@ -245,7 +243,7 @@ public class PTableImpl implements PTable {
table.getSequenceNumber(), table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(),
table.getIndexes(), table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(),
- table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounters());
+ table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounter());
}
public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, List<PColumn> columns) throws SQLException {
@@ -254,7 +252,7 @@ public class PTableImpl implements PTable {
sequenceNumber, table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(),
table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(),
- table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounters());
+ table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounter());
}
public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, List<PColumn> columns, boolean isImmutableRows) throws SQLException {
@@ -263,7 +261,7 @@ public class PTableImpl implements PTable {
sequenceNumber, table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(),
table.getIndexes(), isImmutableRows, table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(),
- table.getIndexType(), table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounters());
+ table.getIndexType(), table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounter());
}
public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, List<PColumn> columns, boolean isImmutableRows, boolean isWalDisabled,
@@ -273,7 +271,7 @@ public class PTableImpl implements PTable {
sequenceNumber, table.getPKName(), table.getBucketNum(), columns, table.getParentSchemaName(), table.getParentTableName(),
table.getIndexes(), isImmutableRows, table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
isWalDisabled, isMultitenant, storeNulls, table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(),
- table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), isTransactional, updateCacheFrequency, table.getIndexDisableTimestamp(), isNamespaceMapped, table.getStorageScheme(), table.getEncodedCQCounters());
+ table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), isTransactional, updateCacheFrequency, table.getIndexDisableTimestamp(), isNamespaceMapped, table.getStorageScheme(), table.getEncodedCQCounter());
}
public static PTableImpl makePTable(PTable table, PIndexState state) throws SQLException {
@@ -283,7 +281,7 @@ public class PTableImpl implements PTable {
table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(),
- table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounters());
+ table.getTableStats(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounter());
}
public static PTableImpl makePTable(PTable table, boolean rowKeyOrderOptimizable) throws SQLException {
@@ -293,7 +291,7 @@ public class PTableImpl implements PTable {
table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getTableStats(),
- table.getBaseColumnCount(), rowKeyOrderOptimizable, table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounters());
+ table.getBaseColumnCount(), rowKeyOrderOptimizable, table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounter());
}
public static PTableImpl makePTable(PTable table, PTableStats stats) throws SQLException {
@@ -303,7 +301,7 @@ public class PTableImpl implements PTable {
table.getParentSchemaName(), table.getParentTableName(), table.getIndexes(),
table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(),
table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), stats,
- table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounters());
+ table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getStorageScheme(), table.getEncodedCQCounter());
}
public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type,
@@ -312,12 +310,12 @@ public class PTableImpl implements PTable {
boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression,
boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
IndexType indexType, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency,
- long indexDisableTimestamp, boolean isNamespaceMapped, StorageScheme storageScheme, Map<String, Integer> encodedColumnQualifierCounters) throws SQLException {
+ long indexDisableTimestamp, boolean isNamespaceMapped, StorageScheme storageScheme, EncodedCQCounter encodedCQCounter) throws SQLException {
return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, dataSchemaName,
dataTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName,
viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId,
indexType, PTableStats.EMPTY_STATS, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, rowKeyOrderOptimizable, isTransactional,
- updateCacheFrequency,indexDisableTimestamp, isNamespaceMapped, storageScheme, encodedColumnQualifierCounters);
+ updateCacheFrequency,indexDisableTimestamp, isNamespaceMapped, storageScheme, encodedCQCounter);
}
public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type,
@@ -326,12 +324,12 @@ public class PTableImpl implements PTable {
boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression,
boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
IndexType indexType, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency,
- @NotNull PTableStats stats, int baseColumnCount, long indexDisableTimestamp, boolean isNamespaceMapped, StorageScheme storageScheme, Map<String, Integer> encodedColumnQualifierCounters)
+ @NotNull PTableStats stats, int baseColumnCount, long indexDisableTimestamp, boolean isNamespaceMapped, StorageScheme storageScheme, EncodedCQCounter encodedCQCounter)
throws SQLException {
return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName,
bucketNum, columns, dataSchemaName, dataTableName, indexes, isImmutableRows, physicalNames,
defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId,
- indexType, stats, baseColumnCount, rowKeyOrderOptimizable, isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, storageScheme, encodedColumnQualifierCounters);
+ indexType, stats, baseColumnCount, rowKeyOrderOptimizable, isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, storageScheme, encodedCQCounter);
}
private PTableImpl(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state,
@@ -339,11 +337,11 @@ public class PTableImpl implements PTable {
PName parentSchemaName, PName parentTableName, List<PTable> indexes, boolean isImmutableRows,
List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant,
boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType,
- PTableStats stats, int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp, boolean isNamespaceMapped, StorageScheme storageScheme, Map<String, Integer> encodedColumnQualifierCounters) throws SQLException {
+ PTableStats stats, int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp, boolean isNamespaceMapped, StorageScheme storageScheme, EncodedCQCounter encodedCQCounter) throws SQLException {
init(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns,
stats, schemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName,
viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable,
- isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, storageScheme, encodedColumnQualifierCounters);
+ isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, storageScheme, encodedCQCounter);
}
@Override
@@ -376,7 +374,7 @@ public class PTableImpl implements PTable {
PName pkName, Integer bucketNum, List<PColumn> columns, PTableStats stats, PName parentSchemaName, PName parentTableName,
List<PTable> indexes, boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL,
boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId,
- IndexType indexType , int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp, boolean isNamespaceMapped, StorageScheme storageScheme, Map<String, Integer> encodedColumnQualifierCounters) throws SQLException {
+ IndexType indexType , int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp, boolean isNamespaceMapped, StorageScheme storageScheme, EncodedCQCounter encodedCQCounter) throws SQLException {
Preconditions.checkNotNull(schemaName);
Preconditions.checkArgument(tenantId==null || tenantId.getBytes().length > 0); // tenantId should be null or not empty
int estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE +
@@ -551,7 +549,7 @@ public class PTableImpl implements PTable {
}
this.estimatedSize = estimatedSize;
this.baseColumnCount = baseColumnCount;
- this.encodedCQCounters = encodedColumnQualifierCounters;
+ this.encodedCQCounter = encodedCQCounter;
}
@Override
@@ -1172,22 +1170,20 @@ public class PTableImpl implements PTable {
if (table.hasStorageScheme()) {
storageScheme = StorageScheme.fromSerializedValue(table.getStorageScheme().toByteArray()[0]);
}
- int numCounters = table.getEncodedColumnQualifierCountersCount();
- Map<String, Integer> encodedColumnQualifierCounters = null;
- if (numCounters > 0) {
- encodedColumnQualifierCounters = Maps.newHashMapWithExpectedSize(numCounters);
- for (int i = 0; i < numCounters; i++) {
- PTableProtos.EncodedColumnQualifierCounter c = table.getEncodedColumnQualifierCounters(i);
- encodedColumnQualifierCounters.put(c.getFamilyName(), c.getCounter());
- }
+ EncodedCQCounter encodedColumnQualifierCounter = null;
+ if (table.hasEncodedColumnQualifierCounter()) {
+ encodedColumnQualifierCounter = new EncodedCQCounter(table.getEncodedColumnQualifierCounter());
+ } else {
+ encodedColumnQualifierCounter = PTable.EncodedCQCounter.NULL_COUNTER;
}
+
try {
PTableImpl result = new PTableImpl();
result.init(tenantId, schemaName, tableName, tableType, indexState, timeStamp, sequenceNumber, pkName,
(bucketNum == NO_SALTING) ? null : bucketNum, columns, stats, schemaName,dataTableName, indexes,
isImmutableRows, physicalNames, defaultFamilyName, viewStatement, disableWAL,
multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable,
- isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, storageScheme, encodedColumnQualifierCounters);
+ isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, storageScheme, encodedColumnQualifierCounter);
return result;
} catch (SQLException e) {
throw new RuntimeException(e); // Impossible
@@ -1282,14 +1278,8 @@ public class PTableImpl implements PTable {
if (table.getStorageScheme() != null) {
builder.setStorageScheme(ByteStringer.wrap(new byte[]{table.getStorageScheme().getSerializedValue()}));
}
- Map<String, Integer> encodedColumnQualifierCounters = table.getEncodedCQCounters();
- if (encodedColumnQualifierCounters != null) {
- for (Entry<String, Integer> entry : encodedColumnQualifierCounters.entrySet()) {
- EncodedColumnQualifierCounter.Builder b = EncodedColumnQualifierCounter.newBuilder();
- b.setFamilyName(entry.getKey());
- b.setCounter(entry.getValue());
- builder.addEncodedColumnQualifierCounters(b.build());
- }
+ if (table.getEncodedCQCounter() != PTable.EncodedCQCounter.NULL_COUNTER) {
+ builder.setEncodedColumnQualifierCounter(table.getEncodedCQCounter().getValue());
}
return builder.build();
}
@@ -1340,7 +1330,7 @@ public class PTableImpl implements PTable {
}
@Override
- public Map<String, Integer> getEncodedCQCounters() {
- return encodedCQCounters;
+ public EncodedCQCounter getEncodedCQCounter() {
+ return encodedCQCounter;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/BoundedSkipNullCellsList.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/BoundedSkipNullCellsList.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/BoundedSkipNullCellsList.java
index a04adf7..fa30f54 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/BoundedSkipNullCellsList.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/BoundedSkipNullCellsList.java
@@ -17,33 +17,60 @@
*/
package org.apache.phoenix.schema.tuple;
+import static com.google.common.base.Preconditions.checkArgument;
+import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
+import static org.apache.phoenix.query.QueryConstants.ENCODED_EMPTY_COLUMN_NAME;
+
+import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
+import java.util.NoSuchElementException;
+
+import javax.annotation.concurrent.NotThreadSafe;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PTable.StorageScheme;
+import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.types.PInteger;
-import com.google.common.base.Preconditions;
-
+/**
+ * List implementation that provides indexed based look up when the cell column qualifiers are generated using the
+ * {@link StorageScheme#ENCODED_COLUMN_NAMES} scheme. The api methods in this list assume that the caller wants to see
+ * and add only non null elements in the list. Such an assumption makes the implementation mimic the behavior that one
+ * would get when passing an {@link ArrayList} to hbase for filling in the key values returned by scanners. This
+ * implementation doesn't implement all the optional methods of the {@link List} interface which should be OK. A lot of
+ * things would be screwed up if HBase starts expecting that the the list implementation passed in to scanners
+ * implements all the optional methods of the interface too.
+ *
+ * For getting elements out o
+ */
+@NotThreadSafe
public class BoundedSkipNullCellsList implements List<Cell> {
-
- private final int minQualifier;
- private final int maxQualifier;
+
+ private int minQualifier;
+ private int maxQualifier;
private final Cell[] array;
private int numNonNullElements;
private int firstNonNullElementIdx = -1;
-
+ private static final String RESERVED_RANGE = "(" + ENCODED_EMPTY_COLUMN_NAME + ", "
+ + (QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE - 1) + ")";
+
public BoundedSkipNullCellsList(int minQualifier, int maxQualifier) {
- Preconditions.checkArgument(minQualifier <= maxQualifier);
+ checkArgument(minQualifier <= maxQualifier, "Invalid arguments. Min: " + minQualifier + ". Max: " + maxQualifier);
+ if (!(minQualifier == maxQualifier && minQualifier == ENCODED_EMPTY_COLUMN_NAME)) {
+ checkArgument(minQualifier >= ENCODED_CQ_COUNTER_INITIAL_VALUE, "Argument minQualifier " + minQualifier + " needs to lie outside of the reserved range: " + RESERVED_RANGE);
+ }
this.minQualifier = minQualifier;
this.maxQualifier = maxQualifier;
- this.array = new Cell[maxQualifier - minQualifier + 1];
+ int reservedRangeSize = ENCODED_CQ_COUNTER_INITIAL_VALUE - ENCODED_EMPTY_COLUMN_NAME;
+ this.array = new Cell[reservedRangeSize + maxQualifier - ENCODED_CQ_COUNTER_INITIAL_VALUE + 1];
}
-
+
@Override
public int size() {
return numNonNullElements;
@@ -56,21 +83,41 @@ public class BoundedSkipNullCellsList implements List<Cell> {
@Override
public boolean contains(Object o) {
- throwUnsupportedOperationException();
- return false;
+ return indexOf(o) >= 0;
}
-
+
+ /**
+ * This implementation only returns an array of non-null elements in the list.
+ */
@Override
public Object[] toArray() {
- throwUnsupportedOperationException();
- return null;
+ Object[] toReturn = new Object[numNonNullElements];
+ int counter = 0;
+ for (int i = 0; i < array.length; i++) {
+ if (array[i] != null) {
+ toReturn[counter++] = array[i];
+ }
+ }
+ return toReturn;
}
+
+ /**
+ * This implementation only returns an array of non-null elements in the list.
+ * This is not the most efficient way of copying elemts into an array
+ */
@Override
+ @SuppressWarnings("unchecked")
public <T> T[] toArray(T[] a) {
- throwUnsupportedOperationException();
- return null;
+ T[] toReturn = (T[])java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), numNonNullElements);
+ int counter = 0;
+ for (int i = 0; i < array.length; i++) {
+ if (array[i] != null) {
+ toReturn[counter++] = (T)array[i];
+ }
+ }
+ return toReturn;
}
@Override
@@ -78,7 +125,7 @@ public class BoundedSkipNullCellsList implements List<Cell> {
if (e == null) {
throw new NullPointerException();
}
- int columnQualifier = (int)PInteger.INSTANCE.toObject(e.getQualifierArray(), e.getQualifierOffset(), e.getQualifierLength());
+ int columnQualifier = PInteger.INSTANCE.getCodec().decodeInt(e.getQualifierArray(), e.getQualifierOffset(), SortOrder.ASC);
checkQualifierRange(columnQualifier);
int idx = getArrayIndex(columnQualifier);
array[idx] = e;
@@ -92,7 +139,7 @@ public class BoundedSkipNullCellsList implements List<Cell> {
@Override
public boolean remove(Object o) {
if (o == null) {
- throw new NullPointerException();
+ return false;
}
Cell e = (Cell)o;
int i = 0;
@@ -108,7 +155,7 @@ public class BoundedSkipNullCellsList implements List<Cell> {
i++;
}
if (i < array.length) {
- firstNonNullElementIdx = maxQualifier;
+ firstNonNullElementIdx = i;
} else {
firstNonNullElementIdx = -1;
}
@@ -122,8 +169,12 @@ public class BoundedSkipNullCellsList implements List<Cell> {
@Override
public boolean containsAll(Collection<?> c) {
- throwUnsupportedOperationException();
- return false;
+ boolean containsAll = true;
+ Iterator<?> itr = c.iterator();
+ while (itr.hasNext()) {
+ containsAll &= (indexOf(itr.next()) >= 0);
+ }
+ return containsAll;
}
@Override
@@ -146,8 +197,12 @@ public class BoundedSkipNullCellsList implements List<Cell> {
@Override
public boolean removeAll(Collection<?> c) {
- throwUnsupportedOperationException();
- return false;
+ Iterator<?> itr = c.iterator();
+ boolean changed = false;
+ while (itr.hasNext()) {
+ changed |= remove(itr.next());
+ }
+ return changed;
}
@Override
@@ -161,39 +216,31 @@ public class BoundedSkipNullCellsList implements List<Cell> {
for (int i = 0; i < array.length; i++) {
array[i] = null;
}
+ firstNonNullElementIdx = -1;
numNonNullElements = 0;
}
-
+
@Override
public Cell get(int index) {
- //TODO: samarth how can we support this? It is always assumed that the
- // user expects to get something back from the list and we would end up returning null
- // here. Do we just add the
- throwUnsupportedOperationException();
rangeCheck(index);
- return array[index];
- }
-
- public Cell getCellForColumnQualifier(int columnQualifier) {
- int idx = getArrayIndex(columnQualifier);
- return array[idx];
+ int numNonNullElementsFound = 0;
+ int i = 0;
+ for (; i < array.length; i++) {
+ if (array[i] != null) {
+ numNonNullElementsFound++;
+ if (numNonNullElementsFound - 1 == index) {
+ break;
+ }
+ }
+
+ }
+ return (numNonNullElementsFound - 1) != index ? null : array[i];
}
@Override
public Cell set(int index, Cell element) {
- //TODO: samarth how can we support this?
throwUnsupportedOperationException();
- if (element == null) {
- throw new NullPointerException();
- }
- rangeCheck(index);
- int idx = minQualifier + index;
- Cell prev = array[idx];
- array[idx] = element;
- if (prev == null) {
- numNonNullElements++;
- }
- return prev;
+ return null;
}
@Override
@@ -209,14 +256,28 @@ public class BoundedSkipNullCellsList implements List<Cell> {
@Override
public int indexOf(Object o) {
- throwUnsupportedOperationException();
- return 0;
+ if (o == null) {
+ return -1;
+ } else {
+ for (int i = 0; i < array.length; i++)
+ if (o.equals(array[i])) {
+ return i;
+ }
+ }
+ return -1;
}
@Override
public int lastIndexOf(Object o) {
- throwUnsupportedOperationException();
- return 0;
+ if (o == null) {
+ return -1;
+ }
+ for (int i = array.length - 1; i >=0 ; i--) {
+ if (o.equals(array[i])) {
+ return i;
+ }
+ }
+ return -1;
}
@Override
@@ -237,27 +298,51 @@ public class BoundedSkipNullCellsList implements List<Cell> {
return null;
}
- private void checkQualifierRange(int qualifier) {
- if (qualifier < minQualifier || qualifier > maxQualifier) {
- throw new IndexOutOfBoundsException("Qualifier is out of the range. Min: " + minQualifier + " Max: " + maxQualifier);
+ @Override
+ public Iterator<Cell> iterator() {
+ return new Itr();
+ }
+
+ public Cell getCellForColumnQualifier(int columnQualifier) {
+ checkQualifierRange(columnQualifier);
+ int idx = getArrayIndex(columnQualifier);
+ Cell c = array[idx];
+ if (c == null) {
+ throw new NoSuchElementException("No element present for column qualifier: " + columnQualifier);
}
+ return c;
}
+ public Cell getFirstCell() {
+ if (firstNonNullElementIdx == -1) {
+ throw new NoSuchElementException("No elements present in the list");
+ }
+ return array[firstNonNullElementIdx];
+ }
+
+ private void checkQualifierRange(int qualifier) {
+ if (qualifier < ENCODED_EMPTY_COLUMN_NAME || qualifier > maxQualifier) {
+ throw new IndexOutOfBoundsException(
+ "Qualifier " + qualifier + " is out of the valid range. Reserved: " + RESERVED_RANGE + ". Table column qualifier range: ("
+ + minQualifier + ", " + maxQualifier + ")");
+ }
+ }
+
private void rangeCheck(int index) {
- if (index < 0 || index >= array.length) {
+ if (index < 0 || index > size() - 1) {
throw new IndexOutOfBoundsException();
}
}
- private void throwUnsupportedOperationException() {
- throw new UnsupportedOperationException("Operation not supported because Samarth didn't implement it");
+ private int getArrayIndex(int columnQualifier) {
+ return columnQualifier < ENCODED_CQ_COUNTER_INITIAL_VALUE ? columnQualifier : ENCODED_CQ_COUNTER_INITIAL_VALUE
+ + (columnQualifier - minQualifier);
}
- @Override
- public Iterator<Cell> iterator() {
- return new Itr();
+ private void throwUnsupportedOperationException() {
+ throw new UnsupportedOperationException("Operation cannot be supported because it potentially violates the invariance contract of this list implementation");
}
-
+
private class Itr implements Iterator<Cell> {
private Cell current;
private int currentIdx = 0;
@@ -265,7 +350,7 @@ public class BoundedSkipNullCellsList implements List<Cell> {
private Itr() {
moveToNextNonNullCell(true);
}
-
+
@Override
public boolean hasNext() {
return !exhausted;
@@ -285,7 +370,7 @@ public class BoundedSkipNullCellsList implements List<Cell> {
public void remove() {
throwUnsupportedOperationException();
}
-
+
private void moveToNextNonNullCell(boolean init) {
int i = init ? 0 : currentIdx + 1;
while (i < array.length && (current = array[i]) == null) {
@@ -298,41 +383,148 @@ public class BoundedSkipNullCellsList implements List<Cell> {
exhausted = true;
}
}
-
+
}
-
- public Cell getFirstCell() {
- if (firstNonNullElementIdx == -1) {
- throw new IllegalStateException("List doesn't have any non-null cell present");
+
+ private class ListItr implements ListIterator<Cell> {
+ private int previousIndex;
+ private int nextIndex;
+ private Cell previous;
+ private Cell next;
+
+ private ListItr() {
+ movePointersForward(true);
+ previous = null;
+ if (nextIndex != -1) {
+ next = array[nextIndex];
+ }
}
- return array[firstNonNullElementIdx];
- }
-
- private int getArrayIndex(int columnQualifier) {
- return columnQualifier - minQualifier;
+
+ @Override
+ public boolean hasNext() {
+ return next != null;
+ }
+
+ @Override
+ public Cell next() {
+ Cell toReturn = next;
+ if (toReturn == null) {
+ throw new NoSuchElementException();
+ }
+ movePointersForward(false);
+ return toReturn;
+ }
+
+ @Override
+ public boolean hasPrevious() {
+ return previous != null;
+ }
+
+ @Override
+ public Cell previous() {
+ Cell toReturn = previous;
+ if (toReturn == null) {
+ throw new NoSuchElementException();
+ }
+ movePointersBackward(false);
+ return toReturn;
+ }
+
+ @Override
+ public int nextIndex() {
+ return nextIndex;
+ }
+
+ @Override
+ public int previousIndex() {
+ return previousIndex;
+ }
+
+ @Override
+ public void remove() {
+ // TODO Auto-generated method stub
+
+ }
+
+ // TODO: samarth this is one of these ouch methods that can make our implementation frgaile.
+ // It is a non-optional method and can't really be supported
+ @Override
+ public void set(Cell e) {
+ // TODO Auto-generated method stub
+
+ }
+
+ @Override
+ public void add(Cell e) {
+ // TODO Auto-generated method stub
+
+ }
+
+ private void movePointersForward(boolean init) {
+ int i = init ? 0 : nextIndex;
+ if (!init) {
+ previousIndex = nextIndex;
+ previous = next;
+ } else {
+ previousIndex = -1;
+ previous = null;
+ }
+ while (i < array.length && (array[i]) == null) {
+ i++;
+ }
+ if (i < array.length) {
+ nextIndex = i;
+ next = array[i];
+ } else {
+ nextIndex = -1;
+ next = null;
+ }
+ }
+
+ private void movePointersBackward(boolean init) {
+ int i = init ? 0 : previousIndex;
+ }
+
}
-
-// private Cell setCell(int columnQualifier, Cell e) {
-//
-// }
-
+
public static void main (String args[]) throws Exception {
- BoundedSkipNullCellsList list = new BoundedSkipNullCellsList(0, 10); // list of eleven elements
+ BoundedSkipNullCellsList list = new BoundedSkipNullCellsList(11, 16); // list of 6 elements
System.out.println(list.size());
+
byte[] row = Bytes.toBytes("row");
byte[] cf = Bytes.toBytes("cf");
+
+ // add elements in reserved range
list.add(KeyValue.createFirstOnRow(row, cf, PInteger.INSTANCE.toBytes(0)));
list.add(KeyValue.createFirstOnRow(row, cf, PInteger.INSTANCE.toBytes(5)));
list.add(KeyValue.createFirstOnRow(row, cf, PInteger.INSTANCE.toBytes(10)));
+ System.out.println(list.size());
+ for (Cell c : list) {
+ //System.out.println(c);
+ }
+ // add elements in qualifier range
+ list.add(KeyValue.createFirstOnRow(row, cf, PInteger.INSTANCE.toBytes(12)));
+ list.add(KeyValue.createFirstOnRow(row, cf, PInteger.INSTANCE.toBytes(14)));
+ list.add(KeyValue.createFirstOnRow(row, cf, PInteger.INSTANCE.toBytes(16)));
+ System.out.println(list.size());
for (Cell c : list) {
- System.out.println(c);
+ //System.out.println(c);
}
+
+ list.add(KeyValue.createFirstOnRow(row, cf, PInteger.INSTANCE.toBytes(11)));
System.out.println(list.size());
+ for (Cell c : list) {
+ //System.out.println(c);
+ }
+
System.out.println(list.get(0));
- System.out.println(list.get(5));
- System.out.println(list.get(10));
System.out.println(list.get(1));
+ System.out.println(list.get(2));
+ System.out.println(list.get(3));
+ System.out.println(list.get(4));
+ System.out.println(list.get(5));
+ System.out.println(list.get(6));
System.out.println(list.remove(KeyValue.createFirstOnRow(row, cf, PInteger.INSTANCE.toBytes(5))));
System.out.println(list.get(5));
System.out.println(list.size());
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java
index a1fe549..8c41844 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java
@@ -17,10 +17,13 @@
*/
package org.apache.phoenix.schema.tuple;
+import static com.google.common.base.Preconditions.checkArgument;
+
import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.types.PInteger;
/**
@@ -31,19 +34,15 @@ public class PositionBasedMultiKeyValueTuple extends BaseTuple {
public PositionBasedMultiKeyValueTuple() {}
-// public PositionBasedMultiKeyValueTuple(List<Cell> values, int minQualifier, int maxQualifier) {
-// this.values = new BoundedSkipNullCellsList(minQualifier, maxQualifier);
-// setKeyValues(values);
-// }
-
-// public PositionBasedMultiKeyValueTuple(int minQualifier, int maxQualifier){
-// this.values = new BoundedSkipNullCellsList(minQualifier, maxQualifier);
-// }
+ public PositionBasedMultiKeyValueTuple(List<Cell> values) {
+ checkArgument(values instanceof BoundedSkipNullCellsList, "PositionBasedMultiKeyValueTuple only works with lists of type BoundedSkipNullCellsList");
+ this.values = (BoundedSkipNullCellsList)values;
+ }
/** Caller must not modify the list that is passed here */
@Override
public void setKeyValues(List<Cell> values) {
- assert values instanceof BoundedSkipNullCellsList;
+ checkArgument(values instanceof BoundedSkipNullCellsList, "PositionBasedMultiKeyValueTuple only works with lists of type BoundedSkipNullCellsList");
this.values = (BoundedSkipNullCellsList)values;
}
@@ -60,7 +59,7 @@ public class PositionBasedMultiKeyValueTuple extends BaseTuple {
@Override
public Cell getValue(byte[] family, byte[] qualifier) {
- return values.getCellForColumnQualifier((int)PInteger.INSTANCE.toObject(qualifier));
+ return values.getCellForColumnQualifier(PInteger.INSTANCE.getCodec().decodeInt(qualifier, 0, SortOrder.ASC));
}
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
index c28a2bf..7f2873a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
@@ -17,33 +17,44 @@
*/
package org.apache.phoenix.schema.tuple;
+import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
+
+import java.util.Collections;
+
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PInteger;
import org.apache.phoenix.util.KeyValueUtil;
-
+/**
+ *
+ * Wrapper around {@link Result} that implements Phoenix's {@link Tuple} interface.
+ *
+ */
public class ResultTuple extends BaseTuple {
- private Result result;
+ private final Result result;
+ public static final ResultTuple EMPTY_TUPLE = new ResultTuple(Result.create(Collections.<Cell>emptyList()));
+ //TODO: samarth see if we can get rid of this constructor altogether.
public ResultTuple(Result result) {
this.result = result;
}
- public ResultTuple() {
- }
+// public ResultTuple(Result result, boolean useQualifierAsIndex) {
+// this.result = result;
+// this.useQualifierAsIndex = useQualifierAsIndex;
+// }
public Result getResult() {
return this.result;
}
- public void setResult(Result result) {
- this.result = result;
- }
-
@Override
public void getKey(ImmutableBytesWritable ptr) {
ptr.set(result.getRow());
@@ -56,6 +67,12 @@ public class ResultTuple extends BaseTuple {
@Override
public KeyValue getValue(byte[] family, byte[] qualifier) {
+// if (useQualifierAsIndex) {
+// int index = PInteger.INSTANCE.getCodec().decodeInt(qualifier, 0, SortOrder.ASC);
+// //TODO: samarth this seems like a hack here at this place. Think more. Maybe we should use a new tuple here?
+// index = index >= ENCODED_CQ_COUNTER_INITIAL_VALUE ? (index - ENCODED_CQ_COUNTER_INITIAL_VALUE) : index;
+// return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(result.rawCells()[index]);
+// }
Cell cell = KeyValueUtil.getColumnLatest(GenericKeyValueBuilder.INSTANCE,
result.rawCells(), family, qualifier);
return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cell);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 49ac1f4..2df5cd6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -17,6 +17,9 @@
*/
package org.apache.phoenix.util;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_QUALIFIER;
+
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
@@ -521,14 +524,15 @@ public class IndexUtil {
}
// TODO: handle null case (but shouldn't happen)
+ //TODO: samarth confirm if this is the right thing to do here i.e. pass false for look up.
Tuple joinTuple = new ResultTuple(joinResult);
// This will create a byte[] that captures all of the values from the data table
byte[] value =
tupleProjector.getSchema().toBytes(joinTuple, tupleProjector.getExpressions(),
tupleProjector.getValueBitSet(), ptr);
KeyValue keyValue =
- KeyValueUtil.newKeyValue(firstCell.getRowArray(),firstCell.getRowOffset(),firstCell.getRowLength(), TupleProjector.VALUE_COLUMN_FAMILY,
- TupleProjector.VALUE_COLUMN_QUALIFIER, firstCell.getTimestamp(), value, 0, value.length);
+ KeyValueUtil.newKeyValue(firstCell.getRowArray(),firstCell.getRowOffset(),firstCell.getRowLength(), VALUE_COLUMN_FAMILY,
+ VALUE_COLUMN_QUALIFIER, firstCell.getTimestamp(), value, 0, value.length);
result.add(keyValue);
}
for (int i = 0; i < result.size(); i++) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
index dba6550..f97230b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ResultUtil.java
@@ -18,7 +18,6 @@
package org.apache.phoenix.util;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
@@ -129,63 +128,4 @@ public class ResultUtil {
return Bytes.compareTo(getRawBytes(r1), getKeyOffset(r1), getKeyLength(r1), getRawBytes(r2), getKeyOffset(r2), getKeyLength(r2));
}
- /**
- * Binary search for latest column value without allocating memory in the process
- */
- public static KeyValue getColumnLatest(Result r, byte[] family, byte[] qualifier) {
- byte[] rbytes = getRawBytes(r);
- int roffset = getKeyOffset(r);
- int rlength = getKeyLength(r);
- return getColumnLatest(r, rbytes, roffset, rlength, family, 0, family.length, qualifier, 0, qualifier.length);
- }
-
- public static KeyValue getSearchTerm(Result r, byte[] family, byte[] qualifier) {
- byte[] rbytes = getRawBytes(r);
- int roffset = getKeyOffset(r);
- int rlength = getKeyLength(r);
- return KeyValue.createFirstOnRow(rbytes, roffset, rlength, family, 0, family.length, qualifier, 0, qualifier.length);
- }
- /**
- * Binary search for latest column value without allocating memory in the process
- */
- public static KeyValue getColumnLatest(Result r, byte[] row, int roffset, int rlength, byte[] family, int foffset, int flength, byte[] qualifier, int qoffset, int qlength) {
- KeyValue searchTerm = KeyValue.createFirstOnRow(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength);
- return getColumnLatest(r,searchTerm);
-
- }
-
- /**
- * Binary search for latest column value without allocating memory in the process
- * @param r
- * @param searchTerm
- */
- @SuppressWarnings("deprecation")
- public static KeyValue getColumnLatest(Result r, KeyValue searchTerm) {
- KeyValue [] kvs = r.raw(); // side effect possibly.
- if (kvs == null || kvs.length == 0) {
- return null;
- }
-
- // pos === ( -(insertion point) - 1)
- int pos = Arrays.binarySearch(kvs, searchTerm, KeyValue.COMPARATOR);
- // never will exact match
- if (pos < 0) {
- pos = (pos+1) * -1;
- // pos is now insertion point
- }
- if (pos == kvs.length) {
- return null; // doesn't exist
- }
-
- KeyValue kv = kvs[pos];
- if (Bytes.compareTo(kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(),
- searchTerm.getBuffer(), searchTerm.getFamilyOffset(), searchTerm.getFamilyLength()) != 0) {
- return null;
- }
- if (Bytes.compareTo(kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength(),
- searchTerm.getBuffer(), searchTerm.getQualifierOffset(), searchTerm.getQualifierLength()) != 0) {
- return null;
- }
- return kv;
- }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 8174f7b..23df3fd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -59,6 +59,7 @@ import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.IllegalDataException;
import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.RowKeySchema;
import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.ValueSchema.Field;
@@ -825,5 +826,13 @@ public class ScanUtil {
}
return new Pair<>(minQ, maxQ);
}
+
+ public static boolean useQualifierAsIndex(Pair<Integer, Integer> minMaxQualifiers, boolean isJoin) {
+ return minMaxQualifiers != null && !isJoin;
+ }
+
+ public static boolean setMinMaxQualifiersOnScan(PTable table) {
+ return EncodedColumnsUtil.usesEncodedColumnNames(table) && !table.isTransactional();
+ }
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
index 944dda0..8970469 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/execute/CorrelatePlanTest.java
@@ -17,6 +17,7 @@
*/
package org.apache.phoenix.execute;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY;
import static org.apache.phoenix.util.PhoenixRuntime.CONNECTIONLESS;
import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
@@ -55,6 +56,7 @@ import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.parse.JoinTableNode.JoinType;
import org.apache.phoenix.parse.ParseNodeFactory;
import org.apache.phoenix.parse.SelectStatement;
+import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.ColumnRef;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PColumnImpl;
@@ -247,7 +249,7 @@ public class CorrelatePlanTest {
for (int i = 0; i < row.length; i++) {
String name = ParseNodeFactory.createTempAlias();
Expression expr = LiteralExpression.newConstant(row[i]);
- columns.add(new PColumnImpl(PNameFactory.newName(name), PNameFactory.newName(TupleProjector.VALUE_COLUMN_FAMILY),
+ columns.add(new PColumnImpl(PNameFactory.newName(name), PNameFactory.newName(VALUE_COLUMN_FAMILY),
expr.getDataType(), expr.getMaxLength(), expr.getScale(), expr.isNullable(),
i, expr.getSortOrder(), null, null, false, name, false, false, null));
}
[3/4] phoenix git commit: Optimize order by and grouped aggregations
by taking advantage of column encoding
Posted by sa...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
index 8d6c575..97d3d58 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
@@ -3449,30 +3449,15 @@ public final class PTableProtos {
*/
com.google.protobuf.ByteString getStorageScheme();
- // repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;
+ // optional int32 encodedColumnQualifierCounter = 32;
/**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
+ * <code>optional int32 encodedColumnQualifierCounter = 32;</code>
*/
- java.util.List<org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter>
- getEncodedColumnQualifierCountersList();
+ boolean hasEncodedColumnQualifierCounter();
/**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
+ * <code>optional int32 encodedColumnQualifierCounter = 32;</code>
*/
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter getEncodedColumnQualifierCounters(int index);
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- int getEncodedColumnQualifierCountersCount();
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- java.util.List<? extends org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounterOrBuilder>
- getEncodedColumnQualifierCountersOrBuilderList();
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounterOrBuilder getEncodedColumnQualifierCountersOrBuilder(
- int index);
+ int getEncodedColumnQualifierCounter();
}
/**
* Protobuf type {@code PTable}
@@ -3698,12 +3683,9 @@ public final class PTableProtos {
storageScheme_ = input.readBytes();
break;
}
- case 258: {
- if (!((mutable_bitField0_ & 0x80000000) == 0x80000000)) {
- encodedColumnQualifierCounters_ = new java.util.ArrayList<org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter>();
- mutable_bitField0_ |= 0x80000000;
- }
- encodedColumnQualifierCounters_.add(input.readMessage(org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.PARSER, extensionRegistry));
+ case 256: {
+ bitField0_ |= 0x08000000;
+ encodedColumnQualifierCounter_ = input.readInt32();
break;
}
}
@@ -3726,9 +3708,6 @@ public final class PTableProtos {
if (((mutable_bitField0_ & 0x00040000) == 0x00040000)) {
physicalNames_ = java.util.Collections.unmodifiableList(physicalNames_);
}
- if (((mutable_bitField0_ & 0x80000000) == 0x80000000)) {
- encodedColumnQualifierCounters_ = java.util.Collections.unmodifiableList(encodedColumnQualifierCounters_);
- }
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
@@ -4351,40 +4330,20 @@ public final class PTableProtos {
return storageScheme_;
}
- // repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;
- public static final int ENCODEDCOLUMNQUALIFIERCOUNTERS_FIELD_NUMBER = 32;
- private java.util.List<org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter> encodedColumnQualifierCounters_;
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public java.util.List<org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter> getEncodedColumnQualifierCountersList() {
- return encodedColumnQualifierCounters_;
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public java.util.List<? extends org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounterOrBuilder>
- getEncodedColumnQualifierCountersOrBuilderList() {
- return encodedColumnQualifierCounters_;
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public int getEncodedColumnQualifierCountersCount() {
- return encodedColumnQualifierCounters_.size();
- }
+ // optional int32 encodedColumnQualifierCounter = 32;
+ public static final int ENCODEDCOLUMNQUALIFIERCOUNTER_FIELD_NUMBER = 32;
+ private int encodedColumnQualifierCounter_;
/**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
+ * <code>optional int32 encodedColumnQualifierCounter = 32;</code>
*/
- public org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter getEncodedColumnQualifierCounters(int index) {
- return encodedColumnQualifierCounters_.get(index);
+ public boolean hasEncodedColumnQualifierCounter() {
+ return ((bitField0_ & 0x08000000) == 0x08000000);
}
/**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
+ * <code>optional int32 encodedColumnQualifierCounter = 32;</code>
*/
- public org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounterOrBuilder getEncodedColumnQualifierCountersOrBuilder(
- int index) {
- return encodedColumnQualifierCounters_.get(index);
+ public int getEncodedColumnQualifierCounter() {
+ return encodedColumnQualifierCounter_;
}
private void initFields() {
@@ -4419,7 +4378,7 @@ public final class PTableProtos {
indexDisableTimestamp_ = 0L;
isNamespaceMapped_ = false;
storageScheme_ = com.google.protobuf.ByteString.EMPTY;
- encodedColumnQualifierCounters_ = java.util.Collections.emptyList();
+ encodedColumnQualifierCounter_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -4480,12 +4439,6 @@ public final class PTableProtos {
return false;
}
}
- for (int i = 0; i < getEncodedColumnQualifierCountersCount(); i++) {
- if (!getEncodedColumnQualifierCounters(i).isInitialized()) {
- memoizedIsInitialized = 0;
- return false;
- }
- }
memoizedIsInitialized = 1;
return true;
}
@@ -4586,8 +4539,8 @@ public final class PTableProtos {
if (((bitField0_ & 0x04000000) == 0x04000000)) {
output.writeBytes(31, storageScheme_);
}
- for (int i = 0; i < encodedColumnQualifierCounters_.size(); i++) {
- output.writeMessage(32, encodedColumnQualifierCounters_.get(i));
+ if (((bitField0_ & 0x08000000) == 0x08000000)) {
+ output.writeInt32(32, encodedColumnQualifierCounter_);
}
getUnknownFields().writeTo(output);
}
@@ -4727,9 +4680,9 @@ public final class PTableProtos {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(31, storageScheme_);
}
- for (int i = 0; i < encodedColumnQualifierCounters_.size(); i++) {
+ if (((bitField0_ & 0x08000000) == 0x08000000)) {
size += com.google.protobuf.CodedOutputStream
- .computeMessageSize(32, encodedColumnQualifierCounters_.get(i));
+ .computeInt32Size(32, encodedColumnQualifierCounter_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
@@ -4897,8 +4850,11 @@ public final class PTableProtos {
result = result && getStorageScheme()
.equals(other.getStorageScheme());
}
- result = result && getEncodedColumnQualifierCountersList()
- .equals(other.getEncodedColumnQualifierCountersList());
+ result = result && (hasEncodedColumnQualifierCounter() == other.hasEncodedColumnQualifierCounter());
+ if (hasEncodedColumnQualifierCounter()) {
+ result = result && (getEncodedColumnQualifierCounter()
+ == other.getEncodedColumnQualifierCounter());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -5036,9 +4992,9 @@ public final class PTableProtos {
hash = (37 * hash) + STORAGESCHEME_FIELD_NUMBER;
hash = (53 * hash) + getStorageScheme().hashCode();
}
- if (getEncodedColumnQualifierCountersCount() > 0) {
- hash = (37 * hash) + ENCODEDCOLUMNQUALIFIERCOUNTERS_FIELD_NUMBER;
- hash = (53 * hash) + getEncodedColumnQualifierCountersList().hashCode();
+ if (hasEncodedColumnQualifierCounter()) {
+ hash = (37 * hash) + ENCODEDCOLUMNQUALIFIERCOUNTER_FIELD_NUMBER;
+ hash = (53 * hash) + getEncodedColumnQualifierCounter();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
@@ -5144,7 +5100,6 @@ public final class PTableProtos {
getColumnsFieldBuilder();
getIndexesFieldBuilder();
getGuidePostsFieldBuilder();
- getEncodedColumnQualifierCountersFieldBuilder();
}
}
private static Builder create() {
@@ -5227,12 +5182,8 @@ public final class PTableProtos {
bitField0_ = (bitField0_ & ~0x20000000);
storageScheme_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x40000000);
- if (encodedColumnQualifierCountersBuilder_ == null) {
- encodedColumnQualifierCounters_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x80000000);
- } else {
- encodedColumnQualifierCountersBuilder_.clear();
- }
+ encodedColumnQualifierCounter_ = 0;
+ bitField0_ = (bitField0_ & ~0x80000000);
return this;
}
@@ -5401,15 +5352,10 @@ public final class PTableProtos {
to_bitField0_ |= 0x04000000;
}
result.storageScheme_ = storageScheme_;
- if (encodedColumnQualifierCountersBuilder_ == null) {
- if (((bitField0_ & 0x80000000) == 0x80000000)) {
- encodedColumnQualifierCounters_ = java.util.Collections.unmodifiableList(encodedColumnQualifierCounters_);
- bitField0_ = (bitField0_ & ~0x80000000);
- }
- result.encodedColumnQualifierCounters_ = encodedColumnQualifierCounters_;
- } else {
- result.encodedColumnQualifierCounters_ = encodedColumnQualifierCountersBuilder_.build();
+ if (((from_bitField0_ & 0x80000000) == 0x80000000)) {
+ to_bitField0_ |= 0x08000000;
}
+ result.encodedColumnQualifierCounter_ = encodedColumnQualifierCounter_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -5597,31 +5543,8 @@ public final class PTableProtos {
if (other.hasStorageScheme()) {
setStorageScheme(other.getStorageScheme());
}
- if (encodedColumnQualifierCountersBuilder_ == null) {
- if (!other.encodedColumnQualifierCounters_.isEmpty()) {
- if (encodedColumnQualifierCounters_.isEmpty()) {
- encodedColumnQualifierCounters_ = other.encodedColumnQualifierCounters_;
- bitField0_ = (bitField0_ & ~0x80000000);
- } else {
- ensureEncodedColumnQualifierCountersIsMutable();
- encodedColumnQualifierCounters_.addAll(other.encodedColumnQualifierCounters_);
- }
- onChanged();
- }
- } else {
- if (!other.encodedColumnQualifierCounters_.isEmpty()) {
- if (encodedColumnQualifierCountersBuilder_.isEmpty()) {
- encodedColumnQualifierCountersBuilder_.dispose();
- encodedColumnQualifierCountersBuilder_ = null;
- encodedColumnQualifierCounters_ = other.encodedColumnQualifierCounters_;
- bitField0_ = (bitField0_ & ~0x80000000);
- encodedColumnQualifierCountersBuilder_ =
- com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
- getEncodedColumnQualifierCountersFieldBuilder() : null;
- } else {
- encodedColumnQualifierCountersBuilder_.addAllMessages(other.encodedColumnQualifierCounters_);
- }
- }
+ if (other.hasEncodedColumnQualifierCounter()) {
+ setEncodedColumnQualifierCounter(other.getEncodedColumnQualifierCounter());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
@@ -5682,12 +5605,6 @@ public final class PTableProtos {
return false;
}
}
- for (int i = 0; i < getEncodedColumnQualifierCountersCount(); i++) {
- if (!getEncodedColumnQualifierCounters(i).isInitialized()) {
-
- return false;
- }
- }
return true;
}
@@ -7467,245 +7384,38 @@ public final class PTableProtos {
return this;
}
- // repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;
- private java.util.List<org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter> encodedColumnQualifierCounters_ =
- java.util.Collections.emptyList();
- private void ensureEncodedColumnQualifierCountersIsMutable() {
- if (!((bitField0_ & 0x80000000) == 0x80000000)) {
- encodedColumnQualifierCounters_ = new java.util.ArrayList<org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter>(encodedColumnQualifierCounters_);
- bitField0_ |= 0x80000000;
- }
- }
-
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounterOrBuilder> encodedColumnQualifierCountersBuilder_;
-
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public java.util.List<org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter> getEncodedColumnQualifierCountersList() {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- return java.util.Collections.unmodifiableList(encodedColumnQualifierCounters_);
- } else {
- return encodedColumnQualifierCountersBuilder_.getMessageList();
- }
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public int getEncodedColumnQualifierCountersCount() {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- return encodedColumnQualifierCounters_.size();
- } else {
- return encodedColumnQualifierCountersBuilder_.getCount();
- }
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter getEncodedColumnQualifierCounters(int index) {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- return encodedColumnQualifierCounters_.get(index);
- } else {
- return encodedColumnQualifierCountersBuilder_.getMessage(index);
- }
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public Builder setEncodedColumnQualifierCounters(
- int index, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter value) {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureEncodedColumnQualifierCountersIsMutable();
- encodedColumnQualifierCounters_.set(index, value);
- onChanged();
- } else {
- encodedColumnQualifierCountersBuilder_.setMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public Builder setEncodedColumnQualifierCounters(
- int index, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder builderForValue) {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- ensureEncodedColumnQualifierCountersIsMutable();
- encodedColumnQualifierCounters_.set(index, builderForValue.build());
- onChanged();
- } else {
- encodedColumnQualifierCountersBuilder_.setMessage(index, builderForValue.build());
- }
- return this;
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public Builder addEncodedColumnQualifierCounters(org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter value) {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureEncodedColumnQualifierCountersIsMutable();
- encodedColumnQualifierCounters_.add(value);
- onChanged();
- } else {
- encodedColumnQualifierCountersBuilder_.addMessage(value);
- }
- return this;
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public Builder addEncodedColumnQualifierCounters(
- int index, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter value) {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- if (value == null) {
- throw new NullPointerException();
- }
- ensureEncodedColumnQualifierCountersIsMutable();
- encodedColumnQualifierCounters_.add(index, value);
- onChanged();
- } else {
- encodedColumnQualifierCountersBuilder_.addMessage(index, value);
- }
- return this;
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public Builder addEncodedColumnQualifierCounters(
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder builderForValue) {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- ensureEncodedColumnQualifierCountersIsMutable();
- encodedColumnQualifierCounters_.add(builderForValue.build());
- onChanged();
- } else {
- encodedColumnQualifierCountersBuilder_.addMessage(builderForValue.build());
- }
- return this;
- }
+ // optional int32 encodedColumnQualifierCounter = 32;
+ private int encodedColumnQualifierCounter_ ;
/**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
+ * <code>optional int32 encodedColumnQualifierCounter = 32;</code>
*/
- public Builder addEncodedColumnQualifierCounters(
- int index, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder builderForValue) {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- ensureEncodedColumnQualifierCountersIsMutable();
- encodedColumnQualifierCounters_.add(index, builderForValue.build());
- onChanged();
- } else {
- encodedColumnQualifierCountersBuilder_.addMessage(index, builderForValue.build());
- }
- return this;
+ public boolean hasEncodedColumnQualifierCounter() {
+ return ((bitField0_ & 0x80000000) == 0x80000000);
}
/**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
+ * <code>optional int32 encodedColumnQualifierCounter = 32;</code>
*/
- public Builder addAllEncodedColumnQualifierCounters(
- java.lang.Iterable<? extends org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter> values) {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- ensureEncodedColumnQualifierCountersIsMutable();
- super.addAll(values, encodedColumnQualifierCounters_);
- onChanged();
- } else {
- encodedColumnQualifierCountersBuilder_.addAllMessages(values);
- }
- return this;
+ public int getEncodedColumnQualifierCounter() {
+ return encodedColumnQualifierCounter_;
}
/**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
+ * <code>optional int32 encodedColumnQualifierCounter = 32;</code>
*/
- public Builder clearEncodedColumnQualifierCounters() {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- encodedColumnQualifierCounters_ = java.util.Collections.emptyList();
- bitField0_ = (bitField0_ & ~0x80000000);
- onChanged();
- } else {
- encodedColumnQualifierCountersBuilder_.clear();
- }
+ public Builder setEncodedColumnQualifierCounter(int value) {
+ bitField0_ |= 0x80000000;
+ encodedColumnQualifierCounter_ = value;
+ onChanged();
return this;
}
/**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
+ * <code>optional int32 encodedColumnQualifierCounter = 32;</code>
*/
- public Builder removeEncodedColumnQualifierCounters(int index) {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- ensureEncodedColumnQualifierCountersIsMutable();
- encodedColumnQualifierCounters_.remove(index);
- onChanged();
- } else {
- encodedColumnQualifierCountersBuilder_.remove(index);
- }
+ public Builder clearEncodedColumnQualifierCounter() {
+ bitField0_ = (bitField0_ & ~0x80000000);
+ encodedColumnQualifierCounter_ = 0;
+ onChanged();
return this;
}
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder getEncodedColumnQualifierCountersBuilder(
- int index) {
- return getEncodedColumnQualifierCountersFieldBuilder().getBuilder(index);
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounterOrBuilder getEncodedColumnQualifierCountersOrBuilder(
- int index) {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- return encodedColumnQualifierCounters_.get(index); } else {
- return encodedColumnQualifierCountersBuilder_.getMessageOrBuilder(index);
- }
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public java.util.List<? extends org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounterOrBuilder>
- getEncodedColumnQualifierCountersOrBuilderList() {
- if (encodedColumnQualifierCountersBuilder_ != null) {
- return encodedColumnQualifierCountersBuilder_.getMessageOrBuilderList();
- } else {
- return java.util.Collections.unmodifiableList(encodedColumnQualifierCounters_);
- }
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder addEncodedColumnQualifierCountersBuilder() {
- return getEncodedColumnQualifierCountersFieldBuilder().addBuilder(
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.getDefaultInstance());
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder addEncodedColumnQualifierCountersBuilder(
- int index) {
- return getEncodedColumnQualifierCountersFieldBuilder().addBuilder(
- index, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.getDefaultInstance());
- }
- /**
- * <code>repeated .EncodedColumnQualifierCounter encodedColumnQualifierCounters = 32;</code>
- */
- public java.util.List<org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder>
- getEncodedColumnQualifierCountersBuilderList() {
- return getEncodedColumnQualifierCountersFieldBuilder().getBuilderList();
- }
- private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounterOrBuilder>
- getEncodedColumnQualifierCountersFieldBuilder() {
- if (encodedColumnQualifierCountersBuilder_ == null) {
- encodedColumnQualifierCountersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounterOrBuilder>(
- encodedColumnQualifierCounters_,
- ((bitField0_ & 0x80000000) == 0x80000000),
- getParentForChildren(),
- isClean());
- encodedColumnQualifierCounters_ = null;
- }
- return encodedColumnQualifierCountersBuilder_;
- }
// @@protoc_insertion_point(builder_scope:PTable)
}
@@ -7718,645 +7428,25 @@ public final class PTableProtos {
// @@protoc_insertion_point(class_scope:PTable)
}
- public interface EncodedColumnQualifierCounterOrBuilder
- extends com.google.protobuf.MessageOrBuilder {
-
- // required string familyName = 1;
- /**
- * <code>required string familyName = 1;</code>
- */
- boolean hasFamilyName();
- /**
- * <code>required string familyName = 1;</code>
- */
- java.lang.String getFamilyName();
- /**
- * <code>required string familyName = 1;</code>
- */
- com.google.protobuf.ByteString
- getFamilyNameBytes();
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_PColumn_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_PColumn_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_PTableStats_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_PTableStats_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_PTable_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_PTable_fieldAccessorTable;
- // required int32 counter = 2;
- /**
- * <code>required int32 counter = 2;</code>
- */
- boolean hasCounter();
- /**
- * <code>required int32 counter = 2;</code>
- */
- int getCounter();
- }
- /**
- * Protobuf type {@code EncodedColumnQualifierCounter}
- */
- public static final class EncodedColumnQualifierCounter extends
- com.google.protobuf.GeneratedMessage
- implements EncodedColumnQualifierCounterOrBuilder {
- // Use EncodedColumnQualifierCounter.newBuilder() to construct.
- private EncodedColumnQualifierCounter(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
- super(builder);
- this.unknownFields = builder.getUnknownFields();
- }
- private EncodedColumnQualifierCounter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
- private static final EncodedColumnQualifierCounter defaultInstance;
- public static EncodedColumnQualifierCounter getDefaultInstance() {
- return defaultInstance;
- }
-
- public EncodedColumnQualifierCounter getDefaultInstanceForType() {
- return defaultInstance;
- }
-
- private final com.google.protobuf.UnknownFieldSet unknownFields;
- @java.lang.Override
- public final com.google.protobuf.UnknownFieldSet
- getUnknownFields() {
- return this.unknownFields;
- }
- private EncodedColumnQualifierCounter(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- initFields();
- int mutable_bitField0_ = 0;
- com.google.protobuf.UnknownFieldSet.Builder unknownFields =
- com.google.protobuf.UnknownFieldSet.newBuilder();
- try {
- boolean done = false;
- while (!done) {
- int tag = input.readTag();
- switch (tag) {
- case 0:
- done = true;
- break;
- default: {
- if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
- done = true;
- }
- break;
- }
- case 10: {
- bitField0_ |= 0x00000001;
- familyName_ = input.readBytes();
- break;
- }
- case 16: {
- bitField0_ |= 0x00000002;
- counter_ = input.readInt32();
- break;
- }
- }
- }
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- throw e.setUnfinishedMessage(this);
- } catch (java.io.IOException e) {
- throw new com.google.protobuf.InvalidProtocolBufferException(
- e.getMessage()).setUnfinishedMessage(this);
- } finally {
- this.unknownFields = unknownFields.build();
- makeExtensionsImmutable();
- }
- }
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.phoenix.coprocessor.generated.PTableProtos.internal_static_EncodedColumnQualifierCounter_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.phoenix.coprocessor.generated.PTableProtos.internal_static_EncodedColumnQualifierCounter_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.class, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder.class);
- }
-
- public static com.google.protobuf.Parser<EncodedColumnQualifierCounter> PARSER =
- new com.google.protobuf.AbstractParser<EncodedColumnQualifierCounter>() {
- public EncodedColumnQualifierCounter parsePartialFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return new EncodedColumnQualifierCounter(input, extensionRegistry);
- }
- };
-
- @java.lang.Override
- public com.google.protobuf.Parser<EncodedColumnQualifierCounter> getParserForType() {
- return PARSER;
- }
-
- private int bitField0_;
- // required string familyName = 1;
- public static final int FAMILYNAME_FIELD_NUMBER = 1;
- private java.lang.Object familyName_;
- /**
- * <code>required string familyName = 1;</code>
- */
- public boolean hasFamilyName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string familyName = 1;</code>
- */
- public java.lang.String getFamilyName() {
- java.lang.Object ref = familyName_;
- if (ref instanceof java.lang.String) {
- return (java.lang.String) ref;
- } else {
- com.google.protobuf.ByteString bs =
- (com.google.protobuf.ByteString) ref;
- java.lang.String s = bs.toStringUtf8();
- if (bs.isValidUtf8()) {
- familyName_ = s;
- }
- return s;
- }
- }
- /**
- * <code>required string familyName = 1;</code>
- */
- public com.google.protobuf.ByteString
- getFamilyNameBytes() {
- java.lang.Object ref = familyName_;
- if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- familyName_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
-
- // required int32 counter = 2;
- public static final int COUNTER_FIELD_NUMBER = 2;
- private int counter_;
- /**
- * <code>required int32 counter = 2;</code>
- */
- public boolean hasCounter() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>required int32 counter = 2;</code>
- */
- public int getCounter() {
- return counter_;
- }
-
- private void initFields() {
- familyName_ = "";
- counter_ = 0;
- }
- private byte memoizedIsInitialized = -1;
- public final boolean isInitialized() {
- byte isInitialized = memoizedIsInitialized;
- if (isInitialized != -1) return isInitialized == 1;
-
- if (!hasFamilyName()) {
- memoizedIsInitialized = 0;
- return false;
- }
- if (!hasCounter()) {
- memoizedIsInitialized = 0;
- return false;
- }
- memoizedIsInitialized = 1;
- return true;
- }
-
- public void writeTo(com.google.protobuf.CodedOutputStream output)
- throws java.io.IOException {
- getSerializedSize();
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- output.writeBytes(1, getFamilyNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- output.writeInt32(2, counter_);
- }
- getUnknownFields().writeTo(output);
- }
-
- private int memoizedSerializedSize = -1;
- public int getSerializedSize() {
- int size = memoizedSerializedSize;
- if (size != -1) return size;
-
- size = 0;
- if (((bitField0_ & 0x00000001) == 0x00000001)) {
- size += com.google.protobuf.CodedOutputStream
- .computeBytesSize(1, getFamilyNameBytes());
- }
- if (((bitField0_ & 0x00000002) == 0x00000002)) {
- size += com.google.protobuf.CodedOutputStream
- .computeInt32Size(2, counter_);
- }
- size += getUnknownFields().getSerializedSize();
- memoizedSerializedSize = size;
- return size;
- }
-
- private static final long serialVersionUID = 0L;
- @java.lang.Override
- protected java.lang.Object writeReplace()
- throws java.io.ObjectStreamException {
- return super.writeReplace();
- }
-
- @java.lang.Override
- public boolean equals(final java.lang.Object obj) {
- if (obj == this) {
- return true;
- }
- if (!(obj instanceof org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter)) {
- return super.equals(obj);
- }
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter other = (org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter) obj;
-
- boolean result = true;
- result = result && (hasFamilyName() == other.hasFamilyName());
- if (hasFamilyName()) {
- result = result && getFamilyName()
- .equals(other.getFamilyName());
- }
- result = result && (hasCounter() == other.hasCounter());
- if (hasCounter()) {
- result = result && (getCounter()
- == other.getCounter());
- }
- result = result &&
- getUnknownFields().equals(other.getUnknownFields());
- return result;
- }
-
- private int memoizedHashCode = 0;
- @java.lang.Override
- public int hashCode() {
- if (memoizedHashCode != 0) {
- return memoizedHashCode;
- }
- int hash = 41;
- hash = (19 * hash) + getDescriptorForType().hashCode();
- if (hasFamilyName()) {
- hash = (37 * hash) + FAMILYNAME_FIELD_NUMBER;
- hash = (53 * hash) + getFamilyName().hashCode();
- }
- if (hasCounter()) {
- hash = (37 * hash) + COUNTER_FIELD_NUMBER;
- hash = (53 * hash) + getCounter();
- }
- hash = (29 * hash) + getUnknownFields().hashCode();
- memoizedHashCode = hash;
- return hash;
- }
-
- public static org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parseFrom(
- com.google.protobuf.ByteString data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parseFrom(
- com.google.protobuf.ByteString data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parseFrom(byte[] data)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data);
- }
- public static org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parseFrom(
- byte[] data,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws com.google.protobuf.InvalidProtocolBufferException {
- return PARSER.parseFrom(data, extensionRegistry);
- }
- public static org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parseFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parseFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
- public static org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parseDelimitedFrom(java.io.InputStream input)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input);
- }
- public static org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parseDelimitedFrom(
- java.io.InputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseDelimitedFrom(input, extensionRegistry);
- }
- public static org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parseFrom(
- com.google.protobuf.CodedInputStream input)
- throws java.io.IOException {
- return PARSER.parseFrom(input);
- }
- public static org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parseFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- return PARSER.parseFrom(input, extensionRegistry);
- }
-
- public static Builder newBuilder() { return Builder.create(); }
- public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter prototype) {
- return newBuilder().mergeFrom(prototype);
- }
- public Builder toBuilder() { return newBuilder(this); }
-
- @java.lang.Override
- protected Builder newBuilderForType(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- Builder builder = new Builder(parent);
- return builder;
- }
- /**
- * Protobuf type {@code EncodedColumnQualifierCounter}
- */
- public static final class Builder extends
- com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounterOrBuilder {
- public static final com.google.protobuf.Descriptors.Descriptor
- getDescriptor() {
- return org.apache.phoenix.coprocessor.generated.PTableProtos.internal_static_EncodedColumnQualifierCounter_descriptor;
- }
-
- protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internalGetFieldAccessorTable() {
- return org.apache.phoenix.coprocessor.generated.PTableProtos.internal_static_EncodedColumnQualifierCounter_fieldAccessorTable
- .ensureFieldAccessorsInitialized(
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.class, org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.Builder.class);
- }
-
- // Construct using org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.newBuilder()
- private Builder() {
- maybeForceBuilderInitialization();
- }
-
- private Builder(
- com.google.protobuf.GeneratedMessage.BuilderParent parent) {
- super(parent);
- maybeForceBuilderInitialization();
- }
- private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
- }
- }
- private static Builder create() {
- return new Builder();
- }
-
- public Builder clear() {
- super.clear();
- familyName_ = "";
- bitField0_ = (bitField0_ & ~0x00000001);
- counter_ = 0;
- bitField0_ = (bitField0_ & ~0x00000002);
- return this;
- }
-
- public Builder clone() {
- return create().mergeFrom(buildPartial());
- }
-
- public com.google.protobuf.Descriptors.Descriptor
- getDescriptorForType() {
- return org.apache.phoenix.coprocessor.generated.PTableProtos.internal_static_EncodedColumnQualifierCounter_descriptor;
- }
-
- public org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter getDefaultInstanceForType() {
- return org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.getDefaultInstance();
- }
-
- public org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter build() {
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter result = buildPartial();
- if (!result.isInitialized()) {
- throw newUninitializedMessageException(result);
- }
- return result;
- }
-
- public org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter buildPartial() {
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter result = new org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter(this);
- int from_bitField0_ = bitField0_;
- int to_bitField0_ = 0;
- if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
- to_bitField0_ |= 0x00000001;
- }
- result.familyName_ = familyName_;
- if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
- to_bitField0_ |= 0x00000002;
- }
- result.counter_ = counter_;
- result.bitField0_ = to_bitField0_;
- onBuilt();
- return result;
- }
-
- public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter) {
- return mergeFrom((org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter)other);
- } else {
- super.mergeFrom(other);
- return this;
- }
- }
-
- public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter other) {
- if (other == org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter.getDefaultInstance()) return this;
- if (other.hasFamilyName()) {
- bitField0_ |= 0x00000001;
- familyName_ = other.familyName_;
- onChanged();
- }
- if (other.hasCounter()) {
- setCounter(other.getCounter());
- }
- this.mergeUnknownFields(other.getUnknownFields());
- return this;
- }
-
- public final boolean isInitialized() {
- if (!hasFamilyName()) {
-
- return false;
- }
- if (!hasCounter()) {
-
- return false;
- }
- return true;
- }
-
- public Builder mergeFrom(
- com.google.protobuf.CodedInputStream input,
- com.google.protobuf.ExtensionRegistryLite extensionRegistry)
- throws java.io.IOException {
- org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter parsedMessage = null;
- try {
- parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
- } catch (com.google.protobuf.InvalidProtocolBufferException e) {
- parsedMessage = (org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedColumnQualifierCounter) e.getUnfinishedMessage();
- throw e;
- } finally {
- if (parsedMessage != null) {
- mergeFrom(parsedMessage);
- }
- }
- return this;
- }
- private int bitField0_;
-
- // required string familyName = 1;
- private java.lang.Object familyName_ = "";
- /**
- * <code>required string familyName = 1;</code>
- */
- public boolean hasFamilyName() {
- return ((bitField0_ & 0x00000001) == 0x00000001);
- }
- /**
- * <code>required string familyName = 1;</code>
- */
- public java.lang.String getFamilyName() {
- java.lang.Object ref = familyName_;
- if (!(ref instanceof java.lang.String)) {
- java.lang.String s = ((com.google.protobuf.ByteString) ref)
- .toStringUtf8();
- familyName_ = s;
- return s;
- } else {
- return (java.lang.String) ref;
- }
- }
- /**
- * <code>required string familyName = 1;</code>
- */
- public com.google.protobuf.ByteString
- getFamilyNameBytes() {
- java.lang.Object ref = familyName_;
- if (ref instanceof String) {
- com.google.protobuf.ByteString b =
- com.google.protobuf.ByteString.copyFromUtf8(
- (java.lang.String) ref);
- familyName_ = b;
- return b;
- } else {
- return (com.google.protobuf.ByteString) ref;
- }
- }
- /**
- * <code>required string familyName = 1;</code>
- */
- public Builder setFamilyName(
- java.lang.String value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- familyName_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required string familyName = 1;</code>
- */
- public Builder clearFamilyName() {
- bitField0_ = (bitField0_ & ~0x00000001);
- familyName_ = getDefaultInstance().getFamilyName();
- onChanged();
- return this;
- }
- /**
- * <code>required string familyName = 1;</code>
- */
- public Builder setFamilyNameBytes(
- com.google.protobuf.ByteString value) {
- if (value == null) {
- throw new NullPointerException();
- }
- bitField0_ |= 0x00000001;
- familyName_ = value;
- onChanged();
- return this;
- }
-
- // required int32 counter = 2;
- private int counter_ ;
- /**
- * <code>required int32 counter = 2;</code>
- */
- public boolean hasCounter() {
- return ((bitField0_ & 0x00000002) == 0x00000002);
- }
- /**
- * <code>required int32 counter = 2;</code>
- */
- public int getCounter() {
- return counter_;
- }
- /**
- * <code>required int32 counter = 2;</code>
- */
- public Builder setCounter(int value) {
- bitField0_ |= 0x00000002;
- counter_ = value;
- onChanged();
- return this;
- }
- /**
- * <code>required int32 counter = 2;</code>
- */
- public Builder clearCounter() {
- bitField0_ = (bitField0_ & ~0x00000002);
- counter_ = 0;
- onChanged();
- return this;
- }
-
- // @@protoc_insertion_point(builder_scope:EncodedColumnQualifierCounter)
- }
-
- static {
- defaultInstance = new EncodedColumnQualifierCounter(true);
- defaultInstance.initFields();
- }
-
- // @@protoc_insertion_point(class_scope:EncodedColumnQualifierCounter)
- }
-
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_PColumn_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_PColumn_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_PTableStats_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_PTableStats_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_PTable_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_PTable_fieldAccessorTable;
- private static com.google.protobuf.Descriptors.Descriptor
- internal_static_EncodedColumnQualifierCounter_descriptor;
- private static
- com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_EncodedColumnQualifierCounter_fieldAccessorTable;
-
- public static com.google.protobuf.Descriptors.FileDescriptor
- getDescriptor() {
- return descriptor;
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
@@ -8374,7 +7464,7 @@ public final class PTableProtos {
"PTableStats\022\013\n\003key\030\001 \002(\014\022\016\n\006values\030\002 \003(\014",
"\022\033\n\023guidePostsByteCount\030\003 \001(\003\022\025\n\rkeyByte" +
"sCount\030\004 \001(\003\022\027\n\017guidePostsCount\030\005 \001(\005\022!\n" +
- "\013pGuidePosts\030\006 \001(\0132\014.PGuidePosts\"\275\006\n\006PTa" +
+ "\013pGuidePosts\030\006 \001(\0132\014.PGuidePosts\"\234\006\n\006PTa" +
"ble\022\027\n\017schemaNameBytes\030\001 \002(\014\022\026\n\016tableNam" +
"eBytes\030\002 \002(\014\022\036\n\ttableType\030\003 \002(\0162\013.PTable" +
"Type\022\022\n\nindexState\030\004 \001(\t\022\026\n\016sequenceNumb" +
@@ -8393,14 +7483,12 @@ public final class PTableProtos {
"rOptimizable\030\032 \001(\010\022\025\n\rtransactional\030\033 \001(" +
"\010\022\034\n\024updateCacheFrequency\030\034 \001(\003\022\035\n\025index",
"DisableTimestamp\030\035 \001(\003\022\031\n\021isNamespaceMap" +
- "ped\030\036 \001(\010\022\025\n\rstorageScheme\030\037 \001(\014\022F\n\036enco" +
- "dedColumnQualifierCounters\030 \003(\0132\036.Encod" +
- "edColumnQualifierCounter\"D\n\035EncodedColum" +
- "nQualifierCounter\022\022\n\nfamilyName\030\001 \002(\t\022\017\n" +
- "\007counter\030\002 \002(\005*A\n\nPTableType\022\n\n\006SYSTEM\020\000" +
- "\022\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n\005INDEX\020\003\022\010\n\004JOIN\020" +
- "\004B@\n(org.apache.phoenix.coprocessor.gene" +
- "ratedB\014PTableProtosH\001\210\001\001\240\001\001"
+ "ped\030\036 \001(\010\022\025\n\rstorageScheme\030\037 \001(\014\022%\n\035enco" +
+ "dedColumnQualifierCounter\030 \001(\005*A\n\nPTabl" +
+ "eType\022\n\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n" +
+ "\005INDEX\020\003\022\010\n\004JOIN\020\004B@\n(org.apache.phoenix" +
+ ".coprocessor.generatedB\014PTableProtosH\001\210\001" +
+ "\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -8424,13 +7512,7 @@ public final class PTableProtos {
internal_static_PTable_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_PTable_descriptor,
- new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "GuidePosts", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", "RowKeyOrderOptimizable", "Transactional", "UpdateCacheFrequency", "IndexDisableTimestamp", "IsNamespaceMapped", "StorageScheme", "EncodedColumnQualifierCounters", });
- internal_static_EncodedColumnQualifierCounter_descriptor =
- getDescriptor().getMessageTypes().get(3);
- internal_static_EncodedColumnQualifierCounter_fieldAccessorTable = new
- com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_EncodedColumnQualifierCounter_descriptor,
- new java.lang.String[] { "FamilyName", "Counter", });
+ new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "GuidePosts", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", "StoreNulls", "BaseColumnCount", "RowKeyOrderOptimizable", "Transactional", "UpdateCacheFrequency", "IndexDisableTimestamp", "IsNamespaceMapped", "StorageScheme", "EncodedColumnQualifierCounter", });
return null;
}
};
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
index e181e80..dd68944 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/SortMergeJoinPlan.java
@@ -651,6 +651,7 @@ public class SortMergeJoinPlan implements QueryPlan {
byte[] b = new byte[length];
buffer.get(b);
Result result = ResultUtil.toResult(new ImmutableBytesWritable(b));
+ //TODO: samarth make joins work with position based look up.
return new ResultTuple(result);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
index a884949..b6e1de2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/TupleProjector.java
@@ -17,6 +17,9 @@
*/
package org.apache.phoenix.execute;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_QUALIFIER;
+
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
@@ -50,9 +53,6 @@ import org.apache.phoenix.util.SchemaUtil;
import com.google.common.base.Preconditions;
public class TupleProjector {
- public static final byte[] VALUE_COLUMN_FAMILY = Bytes.toBytes("_v");
- public static final byte[] VALUE_COLUMN_QUALIFIER = new byte[0];
-
private static final String SCAN_PROJECTOR = "scanProjector";
private final KeyValueSchema schema;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index 13b95b5..b7e1975 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -26,6 +26,7 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
+import java.util.ListIterator;
import java.util.Map;
import java.util.Set;
@@ -222,8 +223,12 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
for (ColumnReference ref : mutableColumns) {
scan.addColumn(ref.getFamily(), ref.getQualifier());
}
- // Indexes inherit the storage scheme of the data table which means all the indexes have the same
- // storage scheme and empty key value qualifier.
+ /*
+ * Indexes inherit the storage scheme of the data table which means all the indexes have the same
+ * storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
+ * supporting new indexes over existing data tables to have a different storage scheme than the data
+ * table.
+ */
byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();
// Project empty key value column
@@ -322,17 +327,20 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
boolean hasPuts = false;
LinkedList<Cell> singleTimeCells = Lists.newLinkedList();
long writePtr;
+ Cell cell = cells.get(i);
do {
- Cell cell = cells.get(i);
hasPuts |= cell.getTypeByte() == KeyValue.Type.Put.getCode();
writePtr = cell.getTimestamp();
+ ListIterator<Cell> it = singleTimeCells.listIterator();
do {
// Add at the beginning of the list to match the expected HBase
// newest to oldest sort order (which TxTableState relies on
- // with the Result.getLatestColumnValue() calls).
- singleTimeCells.addFirst(cell);
- } while (++i < nCells && cells.get(i).getTimestamp() == writePtr);
- } while (i < nCells && cells.get(i).getTimestamp() <= readPtr);
+ // with the Result.getLatestColumnValue() calls). However, we
+ // still want to add Cells in the expected order for each time
+ // bound as otherwise we won't find it in our old state.
+ it.add(cell);
+ } while (++i < nCells && (cell=cells.get(i)).getTimestamp() == writePtr);
+ } while (i < nCells && cell.getTimestamp() <= readPtr);
// Generate point delete markers for the prior row deletion of the old index value.
// The write timestamp is the next timestamp, not the current timestamp,
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 0dca8ba..5a923eb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -22,7 +22,9 @@ import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.EXPECTED_
import static org.apache.phoenix.coprocessor.BaseScannerRegionObserver.SCAN_ACTUAL_START_ROW;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_FAILED_QUERY_COUNTER;
import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_QUERY_TIMEOUT_COUNTER;
+import static org.apache.phoenix.query.QueryConstants.ENCODED_EMPTY_COLUMN_NAME;
import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY;
+import static org.apache.phoenix.util.ScanUtil.setMinMaxQualifiersOnScan;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
@@ -46,6 +48,8 @@ import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
+import javax.management.Query;
+
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.client.Scan;
@@ -71,6 +75,7 @@ import org.apache.phoenix.parse.FilterableStatement;
import org.apache.phoenix.parse.HintNode.Hint;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.MetaDataClient;
@@ -225,7 +230,8 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
if(offset!=null){
ScanUtil.addOffsetAttribute(scan, offset);
}
- if (EncodedColumnsUtil.usesEncodedColumnNames(table)) {
+ //TODO: samarth add condition to not do position based look ups in case of joins so that we won't need to do the hacky check inside co-processors.
+ if (setMinMaxQualifiersOnScan(table)) {
Pair<Integer, Integer> minMaxQualifiers = getMinMaxQualifiers(scan, context);
if (minMaxQualifiers != null) {
scan.setAttribute(BaseScannerRegionObserver.MIN_QUALIFIER, PInteger.INSTANCE.toBytes(minMaxQualifiers.getFirst()));
@@ -243,10 +249,15 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
checkArgument(EncodedColumnsUtil.usesEncodedColumnNames(table), "Method should only be used for tables using encoded column names");
Integer minQualifier = null;
Integer maxQualifier = null;
+ boolean emptyKVProjected = false;
for (Pair<byte[], byte[]> whereCol : context.getWhereConditionColumns()) {
byte[] cq = whereCol.getSecond();
if (cq != null) {
int qualifier = (Integer)PInteger.INSTANCE.toObject(cq);
+ if (qualifier == ENCODED_EMPTY_COLUMN_NAME) {
+ emptyKVProjected = true;
+ continue;
+ }
if (minQualifier == null && maxQualifier == null) {
minQualifier = maxQualifier = qualifier;
} else {
@@ -264,6 +275,10 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
for (byte[] cq : entry.getValue()) {
if (cq != null) {
int qualifier = (Integer)PInteger.INSTANCE.toObject(cq);
+ if (qualifier == ENCODED_EMPTY_COLUMN_NAME) {
+ emptyKVProjected = true;
+ continue;
+ }
if (minQualifier == null && maxQualifier == null) {
minQualifier = maxQualifier = qualifier;
} else {
@@ -277,7 +292,9 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
}
}
}
- if (minQualifier == null) {
+ if (minQualifier == null && emptyKVProjected) {
+ return new Pair<>(ENCODED_EMPTY_COLUMN_NAME, ENCODED_EMPTY_COLUMN_NAME);
+ } else if (minQualifier == null) {
return null;
}
return new Pair<>(minQualifier, maxQualifier);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java
index 3293f65..1e5f09e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/LookAheadResultIterator.java
@@ -49,7 +49,7 @@ abstract public class LookAheadResultIterator implements PeekingResultIterator {
};
}
- private final static Tuple UNINITIALIZED = new ResultTuple();
+ private final static Tuple UNINITIALIZED = ResultTuple.EMPTY_TUPLE;
private Tuple next = UNINITIALIZED;
abstract protected Tuple advance() throws SQLException;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferQueue.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferQueue.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferQueue.java
index 8ada952..135ab26 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferQueue.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/MappedByteBufferQueue.java
@@ -180,6 +180,7 @@ public abstract class MappedByteBufferQueue<T> extends AbstractQueue<T> {
return this.index;
}
+ @Override
public int size() {
if (flushBuffer)
return flushedCount;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
index 8dcb2e8..e4c52c0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/OrderedResultIterator.java
@@ -32,6 +32,7 @@ import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.OrderByExpression;
import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.ServerUtil;
import org.apache.phoenix.util.SizedUtil;
import com.google.common.base.Function;
@@ -264,7 +265,7 @@ public class OrderedResultIterator implements PeekingResultIterator {
}
this.byteSize = queueEntries.getByteSize();
} catch (IOException e) {
- throw new SQLException("", e);
+ ServerUtil.createIOException(e.getMessage(), e);
} finally {
delegate.close();
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
index 88e141a..d287749 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RegionScannerResultIterator.java
@@ -24,16 +24,24 @@ import java.util.List;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.schema.tuple.BoundedSkipNullCellsList;
import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+import org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple;
import org.apache.phoenix.schema.tuple.Tuple;
+import org.apache.phoenix.util.ScanUtil;
import org.apache.phoenix.util.ServerUtil;
public class RegionScannerResultIterator extends BaseResultIterator {
private final RegionScanner scanner;
+ private final Pair<Integer, Integer> minMaxQualifiers;
+ private final boolean useQualifierAsIndex;
- public RegionScannerResultIterator(RegionScanner scanner) {
+ public RegionScannerResultIterator(RegionScanner scanner, Pair<Integer, Integer> minMaxQualifiers, boolean isJoin) {
this.scanner = scanner;
+ this.useQualifierAsIndex = ScanUtil.useQualifierAsIndex(minMaxQualifiers, isJoin);
+ this.minMaxQualifiers = minMaxQualifiers;
}
@Override
@@ -43,7 +51,7 @@ public class RegionScannerResultIterator extends BaseResultIterator {
synchronized (scanner) {
try {
// TODO: size
- List<Cell> results = new ArrayList<Cell>();
+ List<Cell> results = useQualifierAsIndex ? new BoundedSkipNullCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond()) : new ArrayList<Cell>();
// Results are potentially returned even when the return value of s.next is false
// since this is an indication of whether or not there are more values after the
// ones returned
@@ -53,7 +61,7 @@ public class RegionScannerResultIterator extends BaseResultIterator {
}
// We instantiate a new tuple because in all cases currently we hang on to it
// (i.e. to compute and hold onto the TopN).
- MultiKeyValueTuple tuple = new MultiKeyValueTuple();
+ Tuple tuple = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
tuple.setKeyValues(results);
return tuple;
} catch (IOException e) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 1c9bc51..d02c29d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -586,9 +586,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData {
newCells.addAll(cells);
newCells.add(kv);
Collections.sort(newCells, KeyValue.COMPARATOR);
- resultTuple.setResult(Result.create(newCells));
+ tuple = new ResultTuple(Result.create(newCells));
}
-
return tuple;
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
index 47c17ae..3ca48a1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
@@ -107,7 +107,7 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable {
private final static String STRING_FALSE = "0";
private final static BigDecimal BIG_DECIMAL_FALSE = BigDecimal.valueOf(0);
private final static Integer INTEGER_FALSE = Integer.valueOf(0);
- private final static Tuple BEFORE_FIRST = new ResultTuple();
+ private final static Tuple BEFORE_FIRST = ResultTuple.EMPTY_TUPLE;
private final ResultIterator scanner;
private final RowProjector rowProjector;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
index 908a117..2d7550a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/join/HashCacheFactory.java
@@ -122,6 +122,7 @@ public class HashCacheFactory implements ServerCacheFactory {
int resultSize = (int)Bytes.readVLong(hashCacheByteArray, offset);
offset += WritableUtils.decodeVIntSize(hashCacheByteArray[offset]);
ImmutableBytesWritable value = new ImmutableBytesWritable(hashCacheByteArray,offset,resultSize);
+ //TODO: samarth make joins work with position look up.
Tuple result = new ResultTuple(ResultUtil.toResult(value));
ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(result, onExpressions);
List<Tuple> tuples = hashCacheMap.get(key);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index f57043b..4984321 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -29,7 +29,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODED_COLUMN_QUALIFIER;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CURRENT_VALUE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.CYCLE_FLAG;
@@ -39,7 +39,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_VALUE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DISABLE_WAL;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODED_COLUMN_QUALIFIER;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FUNCTION_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_ROW_COUNT;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH;
@@ -120,7 +120,6 @@ import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.types.PInteger;
-import org.apache.phoenix.util.ByteUtil;
/**
@@ -150,23 +149,30 @@ public interface QueryConstants {
public final static byte[] OFFSET_ROW_KEY_BYTES = Bytes.toBytes(OFFSET_ROW_KEY);
public final static ImmutableBytesPtr OFFSET_ROW_KEY_PTR = new ImmutableBytesPtr(OFFSET_ROW_KEY_BYTES);
- public final static PName SINGLE_COLUMN_NAME = PNameFactory.newNormalizedName("s");
- public final static PName SINGLE_COLUMN_FAMILY_NAME = PNameFactory.newNormalizedName("s");
- public final static byte[] SINGLE_COLUMN = SINGLE_COLUMN_NAME.getBytes();
- public final static byte[] SINGLE_COLUMN_FAMILY = SINGLE_COLUMN_FAMILY_NAME.getBytes();
-
public static final long AGG_TIMESTAMP = HConstants.LATEST_TIMESTAMP;
/**
* Key used for a single row aggregation where there is no group by
*/
public final static byte[] UNGROUPED_AGG_ROW_KEY = Bytes.toBytes("a");
- public final static PName AGG_COLUMN_NAME = SINGLE_COLUMN_NAME;
- public final static PName AGG_COLUMN_FAMILY_NAME = SINGLE_COLUMN_FAMILY_NAME;
-
- public static final byte[] ARRAY_VALUE_COLUMN_FAMILY = Bytes.toBytes("a");
- // Use empty byte array for column qualifier so as not to accidentally conflict with any other columns
- public static final byte[] ARRAY_VALUE_COLUMN_QUALIFIER = ByteUtil.EMPTY_BYTE_ARRAY;
+
+ /** BEGIN Set of reserved column qualifiers **/
+
+ public static final String RESERVED_COLUMN_FAMILY = "_r";
+ public static final byte[] RESERVED_COLUMN_FAMILY_BYTES = Bytes.toBytes(RESERVED_COLUMN_FAMILY);
+
+ public static final byte[] VALUE_COLUMN_FAMILY = RESERVED_COLUMN_FAMILY_BYTES;
+ public static final byte[] VALUE_COLUMN_QUALIFIER = PInteger.INSTANCE.toBytes(1);
+
+ public static final byte[] ARRAY_VALUE_COLUMN_FAMILY = RESERVED_COLUMN_FAMILY_BYTES;
+ public static final byte[] ARRAY_VALUE_COLUMN_QUALIFIER = PInteger.INSTANCE.toBytes(2);
+
+ public final static PName SINGLE_COLUMN_NAME = PNameFactory.newNormalizedName("s");
+ public final static PName SINGLE_COLUMN_FAMILY_NAME = PNameFactory.newNormalizedName("s");
+ public final static byte[] SINGLE_COLUMN = PInteger.INSTANCE.toBytes(3);
+ public final static byte[] SINGLE_COLUMN_FAMILY = RESERVED_COLUMN_FAMILY_BYTES;
+ /** END Set of reserved column qualifiers **/
+
public static final byte[] TRUE = new byte[] {1};
/**
@@ -214,6 +220,13 @@ public interface QueryConstants {
public static final int NANOS_IN_SECOND = BigDecimal.valueOf(Math.pow(10, 9)).intValue();
public static final int DIVERGED_VIEW_BASE_COLUMN_COUNT = -100;
public static final int BASE_TABLE_BASE_COLUMN_COUNT = -1;
+
+ //TODO: samarth think about this more.
+ /**
+ * We mark counter values 0 to 10 as reserved. Value 0 is used by {@link #ENCODED_EMPTY_COLUMN_NAME}. Values 1-10
+ * are reserved for special column qualifiers returned by Phoenix co-processors.
+ */
+ public static final int ENCODED_CQ_COUNTER_INITIAL_VALUE = 11;
public static final String CREATE_TABLE_METADATA =
// Do not use IF NOT EXISTS as we sometimes catch the TableAlreadyExists
// exception and add columns to the SYSTEM.TABLE dynamically.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index c49ad1d..8c7c882 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -235,7 +235,7 @@ public class QueryServicesOptions {
// doesn't depend on phoenix-core.
public static final String DEFAULT_QUERY_SERVER_SERIALIZATION = "PROTOBUF";
public static final int DEFAULT_QUERY_SERVER_HTTP_PORT = 8765;
- public static final boolean DEFAULT_RENEW_LEASE_ENABLED = true;
+ public static final boolean DEFAULT_RENEW_LEASE_ENABLED = false;
public static final int DEFAULT_RUN_RENEW_LEASE_FREQUENCY_INTERVAL_MILLISECONDS =
DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD / 2;
public static final int DEFAULT_RENEW_LEASE_THRESHOLD_MILLISECONDS =
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
index 1690bd3..e7672f2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/DelegateTable.java
@@ -18,7 +18,6 @@
package org.apache.phoenix.schema;
import java.util.List;
-import java.util.Map;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
@@ -289,7 +288,7 @@ public class DelegateTable implements PTable {
}
@Override
- public Map<String, Integer> getEncodedCQCounters() {
- return delegate.getEncodedCQCounters();
+ public EncodedCQCounter getEncodedCQCounter() {
+ return delegate.getEncodedCQCounter();
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/9525c72f/phoenix-core/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java
index 1ab8c86..1bcf808 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/KeyValueSchema.java
@@ -109,6 +109,8 @@ public class KeyValueSchema extends ValueSchema {
Field field = fields.get(i);
PDataType type = field.getDataType();
for (int j = 0; j < field.getCount(); j++) {
+ //TODO: samarth it is at this point that we are looking up stuff in the result tuple to figure out
+ // where exactly the value is here.
if (expressions[index].evaluate(tuple, ptr) && ptr.getLength() > 0) { // Skip null values
if (index >= minNullableIndex) {
valueSet.set(index - minNullableIndex);