You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/11/07 07:46:27 UTC
[1/5] phoenix git commit: PHOENIX-1402 Don't recalculate stats on
split
Repository: phoenix
Updated Branches:
refs/heads/3.0 d2cd2856b -> 84f634325
PHOENIX-1402 Don't recalculate stats on split
Conflicts:
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c9715571
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c9715571
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c9715571
Branch: refs/heads/3.0
Commit: c9715571a6bfb83c224459d0202a2e41e7cdde1a
Parents: d2cd285
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Nov 6 14:29:52 2014 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Nov 6 22:37:52 2014 -0800
----------------------------------------------------------------------
dev/eclipse_prefs_phoenix.epf | 4 -
.../phoenix/end2end/StatsCollectorIT.java | 103 ++++++++++++++++++
.../UngroupedAggregateRegionObserver.java | 44 ++++----
.../phoenix/iterate/ParallelIterators.java | 2 +-
.../phoenix/schema/stats/GuidePostsInfo.java | 3 +
.../schema/stats/StatisticsCollector.java | 105 ++++---------------
.../phoenix/schema/stats/StatisticsUtil.java | 11 ++
.../phoenix/schema/stats/StatisticsWriter.java | 70 ++++++++++++-
.../org/apache/phoenix/util/UpgradeUtil.java | 18 +++-
pom.xml | 4 +-
10 files changed, 239 insertions(+), 125 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9715571/dev/eclipse_prefs_phoenix.epf
----------------------------------------------------------------------
diff --git a/dev/eclipse_prefs_phoenix.epf b/dev/eclipse_prefs_phoenix.epf
index e8c1a09..5a0d998 100644
--- a/dev/eclipse_prefs_phoenix.epf
+++ b/dev/eclipse_prefs_phoenix.epf
@@ -464,9 +464,7 @@ file_export_version=3.0
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.codeComplete.staticFieldPrefixes=
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.codeComplete.visibilityCheck=enabled
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
-/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
-/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.compliance=1.6
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.debug.lineNumber=generate
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.debug.localVariable=generate
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.debug.sourceFile=generate
@@ -516,7 +514,6 @@ file_export_version=3.0
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.problem.unusedParameterWhenOverridingConcrete=disabled
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=error
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error
-/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.compiler.source=1.6
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression=16
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant=16
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call=16
@@ -775,7 +772,6 @@ file_export_version=3.0
/instance/org.eclipse.jdt.core/org.eclipse.jdt.core.formatter.wrap_before_binary_operator=true
/instance/org.eclipse.jdt.debug.ui/org.eclipse.debug.ui.ExpressionView.org.eclipse.jdt.debug.ui.show_null_entries=true
/instance/org.eclipse.jdt.debug.ui/org.eclipse.debug.ui.VariableView.org.eclipse.jdt.debug.ui.show_null_entries=true
-/instance/org.eclipse.jdt.launching/org.eclipse.jdt.launching.PREF_DEFAULT_ENVIRONMENTS_XML=<?xml version\="1.0" encoding\="UTF-8"?>\n<defaultEnvironments>\n<defaultEnvironment environmentId\="J2SE-1.5" vmId\="57,org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType13,1217539956414"/>\n<defaultEnvironment environmentId\="JavaSE-1.6" vmId\="57,org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType13,1217443741223"/>\n</defaultEnvironments>\n
/instance/org.eclipse.jdt.ui/content_assist_disabled_computers=org.eclipse.jdt.ui.javaNoTypeProposalCategory\u0000org.eclipse.jdt.ui.textProposalCategory\u0000org.eclipse.jdt.ui.templateProposalCategory\u0000org.eclipse.jdt.ui.javaTypeProposalCategory\u0000
/instance/org.eclipse.jdt.ui/content_assist_lru_history=<?xml version\="1.0" encoding\="UTF-8"?><history maxLHS\="100" maxRHS\="10"/>
/instance/org.eclipse.jdt.ui/content_assist_number_of_computers=17
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9715571/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index b9a0e88..71eaf42 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.end2end;
import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
import static org.apache.phoenix.util.TestUtil.getAllSplits;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
@@ -34,13 +35,16 @@ import java.util.Map;
import java.util.Properties;
import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.PropertiesUtil;
import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.TestUtil;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -50,12 +54,15 @@ import com.google.common.collect.Maps;
@Category(NeedsOwnMiniClusterTest.class)
public class StatsCollectorIT extends BaseOwnClusterHBaseManagedTimeIT {
private static final String STATS_TEST_TABLE_NAME = "S";
+ private static final byte[] STATS_TEST_TABLE_BYTES = Bytes.toBytes(STATS_TEST_TABLE_NAME);
@BeforeClass
public static void doSetup() throws Exception {
Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
// Must update config before starting server
props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
+ props.put(QueryServices.THREAD_POOL_SIZE_ATTRIB, Integer.toString(10));
+ props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1000));
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
@@ -275,4 +282,100 @@ public class StatsCollectorIT extends BaseOwnClusterHBaseManagedTimeIT {
assertEquals(nRows/2+1, keyRanges.size());
}
+
+
+ private void splitTable(Connection conn, byte[] splitPoint) throws IOException, InterruptedException, SQLException {
+ ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
+ int nRegionsNow = services.getAllTableRegions(STATS_TEST_TABLE_BYTES).size();
+ HBaseAdmin admin = services.getAdmin();
+ try {
+ admin.split(STATS_TEST_TABLE_BYTES, splitPoint);
+ int nTries = 0;
+ int nRegions;
+ do {
+ Thread.sleep(2000);
+ services.clearTableRegionCache(STATS_TEST_TABLE_BYTES);
+ nRegions = services.getAllTableRegions(STATS_TEST_TABLE_BYTES).size();
+ nTries++;
+ } while (nRegions == nRegionsNow && nTries < 10);
+ if (nRegions == nRegionsNow) {
+ throw new IOException("Failed to complete split within alloted time");
+ }
+ // FIXME: I see the commit of the stats finishing before this with a lower timestamp that the scan timestamp,
+ // yet without this sleep, the query finds the old data. Seems like an HBase bug and a potentially serious one.
+ Thread.sleep(5000);
+ } finally {
+ admin.close();
+ }
+ }
+
+ @Test
+ public void testSplitUpdatesStats() throws Exception {
+ int nRows = 10;
+ Connection conn;
+ PreparedStatement stmt;
+ Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+ conn = DriverManager.getConnection(getUrl(), props);
+ conn.createStatement().execute("CREATE TABLE " + STATS_TEST_TABLE_NAME + "(k VARCHAR PRIMARY KEY, v INTEGER) " + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + Boolean.FALSE);
+ stmt = conn.prepareStatement("UPSERT INTO " + STATS_TEST_TABLE_NAME + " VALUES(?,?)");
+ for (int i = 0; i < nRows; i++) {
+ stmt.setString(1, Character.toString((char) ('a' + i)));
+ stmt.setInt(2, i);
+ stmt.executeUpdate();
+ }
+ conn.commit();
+
+ TestUtil.analyzeTable(conn, STATS_TEST_TABLE_NAME);
+ List<KeyRange>keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME);
+ assertEquals(nRows+1, keyRanges.size());
+
+ ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
+ List<HRegionLocation> regions = services.getAllTableRegions(STATS_TEST_TABLE_BYTES);
+ assertEquals(1, regions.size());
+
+ ResultSet rs = conn.createStatement().executeQuery("SELECT GUIDE_POSTS_COUNT, REGION_NAME FROM SYSTEM.STATS WHERE PHYSICAL_NAME='"+STATS_TEST_TABLE_NAME+"' AND REGION_NAME IS NOT NULL");
+ assertTrue(rs.next());
+ assertEquals(nRows, rs.getLong(1));
+ assertEquals(regions.get(0).getRegionInfo().getRegionNameAsString(), rs.getString(2));
+ assertFalse(rs.next());
+
+ byte[] midPoint = Bytes.toBytes(Character.toString((char) ('a' + (nRows/2))));
+ splitTable(conn, midPoint);
+
+ keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME);
+ assertEquals(nRows+1, keyRanges.size()); // Same number as before because split was at guidepost
+
+ regions = services.getAllTableRegions(STATS_TEST_TABLE_BYTES);
+ assertEquals(2, regions.size());
+ rs = conn.createStatement().executeQuery("SELECT GUIDE_POSTS_COUNT, REGION_NAME FROM SYSTEM.STATS WHERE PHYSICAL_NAME='"+STATS_TEST_TABLE_NAME+"' AND REGION_NAME IS NOT NULL");
+ assertTrue(rs.next());
+ assertEquals(nRows/2, rs.getLong(1));
+ assertEquals(regions.get(0).getRegionInfo().getRegionNameAsString(), rs.getString(2));
+ assertTrue(rs.next());
+ assertEquals(nRows/2 - 1, rs.getLong(1));
+ assertEquals(regions.get(1).getRegionInfo().getRegionNameAsString(), rs.getString(2));
+ assertFalse(rs.next());
+
+ byte[] midPoint2 = Bytes.toBytes("cj");
+ splitTable(conn, midPoint2);
+
+ keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME);
+ assertEquals(nRows+2, keyRanges.size()); // One extra due to split between guideposts
+
+ regions = services.getAllTableRegions(STATS_TEST_TABLE_BYTES);
+ assertEquals(3, regions.size());
+ rs = conn.createStatement().executeQuery("SELECT GUIDE_POSTS_COUNT, REGION_NAME FROM SYSTEM.STATS WHERE PHYSICAL_NAME='"+STATS_TEST_TABLE_NAME+"' AND REGION_NAME IS NOT NULL");
+ assertTrue(rs.next());
+ assertEquals(3, rs.getLong(1));
+ assertEquals(regions.get(0).getRegionInfo().getRegionNameAsString(), rs.getString(2));
+ assertTrue(rs.next());
+ assertEquals(2, rs.getLong(1));
+ assertEquals(regions.get(1).getRegionInfo().getRegionNameAsString(), rs.getString(2));
+ assertTrue(rs.next());
+ assertEquals(nRows/2 - 1, rs.getLong(1));
+ assertEquals(regions.get(2).getRegionInfo().getRegionNameAsString(), rs.getString(2));
+ assertFalse(rs.next());
+
+ conn.close();
+ }
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9715571/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 7d7159b..4cf816a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -64,7 +64,6 @@ import org.apache.phoenix.expression.aggregator.ServerAggregators;
import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
import org.apache.phoenix.index.PhoenixIndexCodec;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.join.HashJoinInfo;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
@@ -194,8 +193,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
boolean hasMore;
boolean hasAny = false;
MultiKeyValueTuple result = new MultiKeyValueTuple();
- if (logger.isInfoEnabled()) {
- logger.info("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo());
+ if (logger.isDebugEnabled()) {
+ logger.debug("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo());
}
long rowCount = 0;
MultiVersionConsistencyControl.setThreadReadPoint(innerScanner.getMvccReadPoint());
@@ -320,8 +319,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
}
}
- if (logger.isInfoEnabled()) {
- logger.info("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan);
+ if (logger.isDebugEnabled()) {
+ logger.debug("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan);
}
if (!mutations.isEmpty()) {
@@ -458,27 +457,24 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
throws IOException {
HRegion region = e.getEnvironment().getRegion();
String table = region.getRegionInfo().getTableNameAsString();
- if (!table.equals(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)) {
- StatisticsCollector stats = null;
- try {
- boolean useCurrentTime =
- e.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
- QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
- // Provides a means of clients controlling their timestamps to not use current time
- // when background tasks are updating stats. Instead we track the max timestamp of
- // the cells and use that.
- long clientTimeStamp = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : StatisticsCollector.NO_TIMESTAMP;
- stats = new StatisticsCollector(e.getEnvironment(), table, clientTimeStamp);
- stats.collectStatsDuringSplit(e.getEnvironment().getConfiguration(), l, r, region);
- } catch (IOException ioe) {
- if(logger.isDebugEnabled()) {
- logger.debug("Error while collecting stats during split ",ioe);
- }
- } finally {
- if (stats != null) stats.close();
+ StatisticsCollector stats = null;
+ try {
+ boolean useCurrentTime =
+ e.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
+ QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
+ // Provides a means of clients controlling their timestamps to not use current time
+ // when background tasks are updating stats. Instead we track the max timestamp of
+ // the cells and use that.
+ long clientTimeStamp = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : StatisticsCollector.NO_TIMESTAMP;
+ stats = new StatisticsCollector(e.getEnvironment(), table, clientTimeStamp);
+ stats.splitStats(region, l, r);
+ } catch (IOException ioe) {
+ if(logger.isWarnEnabled()) {
+ logger.warn("Error while collecting stats during split for " + table,ioe);
}
+ } finally {
+ if (stats != null) stats.close();
}
-
}
public static byte[] serialize(List<Expression> selectExpressions) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9715571/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index f5c4027..42890bb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -345,7 +345,7 @@ public class ParallelIterators extends ExplainTable implements ResultIterators {
for (int i = 0; i < gps.size(); i++) {
buf.append(Bytes.toStringBinary(gps.get(i)));
buf.append(",");
- if (i+1 < gps.size() && ((i+1) % 10) == 0) {
+ if (i > 0 && i < gps.size()-1 && (i % 10) == 0) {
buf.append("\n");
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9715571/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java
index 6484349..b4aa0b8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/GuidePostsInfo.java
@@ -4,6 +4,7 @@ import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
+import java.util.Collections;
import java.util.List;
import org.apache.hadoop.hbase.util.Bytes;
@@ -34,6 +35,8 @@ import com.google.common.collect.Lists;
* A simple POJO class that holds the information related to GuidePosts serDe.
*/
public class GuidePostsInfo {
+ public static final GuidePostsInfo EMPTY_GUIDE_POSTS_INFO = new GuidePostsInfo(0L, Collections.<byte[]>emptyList());
+
private long byteCount; // Number of bytes traversed in the region
private long keyByteSize; // Total number of bytes in keys stored in guidePosts
private List<byte[]> guidePosts;
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9715571/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index a90c095..61fb7da 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -19,12 +19,10 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@@ -57,7 +55,6 @@ public class StatisticsCollector {
private Map<String, byte[]> minMap = Maps.newHashMap();
private Map<String, byte[]> maxMap = Maps.newHashMap();
private long guidepostDepth;
- private boolean useCurrentTime;
private long maxTimeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
private Map<String, Pair<Long,GuidePostsInfo>> guidePostsMap = Maps.newHashMap();
// Tracks the bytecount per family if it has reached the guidePostsDepth
@@ -68,9 +65,6 @@ public class StatisticsCollector {
throws IOException {
Configuration config = env.getConfiguration();
HTableInterface statsHTable = env.getTable((PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES));
- useCurrentTime =
- config.getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
- QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
int guidepostPerRegion = config.getInt(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB, 0);
if (guidepostPerRegion > 0) {
long maxFileSize = statsHTable.getTableDescriptor().getMaxFileSize();
@@ -139,36 +133,6 @@ public class StatisticsCollector {
statsTable.commitStats(mutations);
}
- private void deleteStatsFromStatsTable(final HRegion region, List<Mutation> mutations, long currentTime)
- throws IOException {
- try {
- String regionName = region.getRegionInfo().getRegionNameAsString();
- // update the statistics table
- for (ImmutableBytesPtr fam : familyMap.keySet()) {
- statsTable.deleteStats(regionName, this, Bytes.toString(fam.copyBytesIfNecessary()),
- mutations);
- }
- } catch (IOException e) {
- logger.error("Failed to delete from statistics table!", e);
- throw e;
- }
- }
-
- private int scanRegion(RegionScanner scanner, int count) throws IOException {
- List<KeyValue> results = new ArrayList<KeyValue>();
- boolean hasMore = true;
- while (hasMore) {
- hasMore = scanner.next(results);
- collectStatistics(results);
- count += results.size();
- results.clear();
- while (!hasMore) {
- break;
- }
- }
- return count;
- }
-
public void collectStatistics(final List<KeyValue> results) {
for (KeyValue kv : results) {
updateStatistic(kv);
@@ -183,67 +147,24 @@ public class StatisticsCollector {
return getInternalScanner(region, store, s, store.getColumnFamilyName());
}
-
- public void collectStatsDuringSplit(Configuration conf, HRegion l, HRegion r, HRegion region)
- throws IOException {
- // Invoke collectStat here
+ public void splitStats(HRegion parent, HRegion left, HRegion right) {
try {
- // Create a delete operation on the parent region
- // Then write the new guide posts for individual regions
- List<Mutation> mutations = Lists.newArrayListWithExpectedSize(3);
-
- long currentTime = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : -1;
- deleteStatsFromStatsTable(region, mutations, currentTime);
if (logger.isDebugEnabled()) {
- logger.debug("Collecting stats for the daughter region " + l.getRegionInfo());
+ logger.debug("Collecting stats for split of " + parent.getRegionInfo() + " into " + left.getRegionInfo() + " and " + right.getRegionInfo());
}
- collectStatsForSplitRegions(conf, l, mutations, currentTime);
- if (logger.isDebugEnabled()) {
- logger.debug("Collecting stats for the daughter region " + r.getRegionInfo());
+ List<Mutation> mutations = Lists.newArrayListWithExpectedSize(3);
+ for (byte[] fam : parent.getStores().keySet()) {
+ statsTable.splitStats(parent, left, right, this, Bytes.toString(fam), mutations);
}
- collectStatsForSplitRegions(conf, r, mutations, currentTime);
if (logger.isDebugEnabled()) {
- logger.debug("Committing stats for the daughter regions as part of split " + r.getRegionInfo());
+ logger.debug("Committing stats for the daughter regions as part of split " + parent.getRegionInfo());
}
+ statsTable.commitStats(mutations);
} catch (IOException e) {
logger.error("Error while capturing stats after split of region "
- + region.getRegionInfo().getRegionNameAsString(), e);
- }
- }
-
- private void collectStatsForSplitRegions(Configuration conf, HRegion daughter,
- List<Mutation> mutations, long currentTime) throws IOException {
- IOException toThrow = null;
- clear();
- Scan scan = createScan(conf);
- RegionScanner scanner = null;
- int count = 0;
- try {
- scanner = daughter.getScanner(scan);
- count = scanRegion(scanner, count);
- writeStatsToStatsTable(daughter, false, mutations, currentTime);
- } catch (IOException e) {
- logger.error("Unable to collects stats during split", e);
- toThrow = e;
- } finally {
- try {
- if (scanner != null) scanner.close();
- } catch (IOException e) {
- logger.error("Unable to close scanner after split", e);
- if (toThrow != null) toThrow = e;
- } finally {
- if (toThrow != null) throw toThrow;
- }
+ + parent.getRegionInfo().getRegionNameAsString(), e);
}
}
-
- private Scan createScan(Configuration conf) {
- Scan scan = new Scan();
- scan.setCaching(conf.getInt(QueryServices.SCAN_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_SCAN_CACHE_SIZE));
- // do not cache the blocks here
- scan.setCacheBlocks(false);
- return scan;
- }
protected InternalScanner getInternalScanner(HRegion region, Store store,
InternalScanner internalScan, String family) {
@@ -259,6 +180,16 @@ public class StatisticsCollector {
maxTimeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
}
+ public void addGuidePost(String fam, GuidePostsInfo info, long byteSize, long timestamp) {
+ Pair<Long,GuidePostsInfo> newInfo = new Pair<Long,GuidePostsInfo>(byteSize,info);
+ Pair<Long,GuidePostsInfo> oldInfo = guidePostsMap.put(fam, newInfo);
+ if (oldInfo != null) {
+ info.combine(oldInfo.getSecond());
+ newInfo.setFirst(oldInfo.getFirst() + newInfo.getFirst());
+ }
+ maxTimeStamp = Math.max(maxTimeStamp, timestamp);
+ }
+
public void updateStatistic(KeyValue kv) {
byte[] cf = kv.getFamily();
familyMap.put(new ImmutableBytesPtr(cf), true);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9715571/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
index a48b04a..c47ad24 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.util.TreeMap;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -58,6 +59,16 @@ public class StatisticsUtil {
return rowKey;
}
+ public static Result readRegionStatistics(HTableInterface statsHTable, byte[] tableNameBytes, byte[] cf, byte[] regionName, long clientTimeStamp)
+ throws IOException {
+ byte[] prefix = StatisticsUtil.getRowKey(tableNameBytes, cf, regionName);
+ Get get = new Get(prefix);
+ get.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimeStamp);
+ get.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES);
+ get.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
+ return statsHTable.get(get);
+ }
+
public static PTableStats readStatistics(HTableInterface statsHTable, byte[] tableNameBytes, long clientTimeStamp) throws IOException {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
Scan s = MetaDataUtil.newTableRowsScan(tableNameBytes, MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimeStamp);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9715571/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index 4118bb9..e28f805 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -20,17 +20,23 @@ package org.apache.phoenix.schema.stats;
import java.io.Closeable;
import java.io.IOException;
import java.sql.Date;
+import java.util.Collections;
import java.util.List;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationProtocol;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.PDataType;
+import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.TimeKeeper;
@@ -68,6 +74,56 @@ public class StatisticsWriter implements Closeable {
statisticsTable.close();
}
+ public void splitStats(HRegion p, HRegion l, HRegion r, StatisticsCollector tracker, String fam, List<Mutation> mutations) throws IOException {
+ if (tracker == null) { return; }
+ boolean useMaxTimeStamp = clientTimeStamp == StatisticsCollector.NO_TIMESTAMP;
+ if (!useMaxTimeStamp) {
+ mutations.add(getLastStatsUpdatedTimePut(clientTimeStamp));
+ }
+ long readTimeStamp = useMaxTimeStamp ? HConstants.LATEST_TIMESTAMP : clientTimeStamp;
+ byte[] famBytes = PDataType.VARCHAR.toBytes(fam);
+ Result result = StatisticsUtil.readRegionStatistics(statisticsTable, tableName, famBytes, p.getRegionName(), readTimeStamp);
+ if (result != null && !result.isEmpty()) {
+ KeyValue cell = result.getColumnLatest(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
+
+ if (cell != null) {
+ long writeTimeStamp = useMaxTimeStamp ? cell.getTimestamp() : clientTimeStamp;
+
+ GuidePostsInfo guidePosts = GuidePostsInfo.fromBytes(cell.getBuffer(), cell.getValueOffset(), cell.getValueLength());
+ byte[] pPrefix = StatisticsUtil.getRowKey(tableName, famBytes, p.getRegionName());
+ mutations.add(new Delete(pPrefix, writeTimeStamp));
+
+ long byteSize = 0;
+ KeyValue byteSizeCell = result.getColumnLatest(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES);
+ if (byteSizeCell != null) {
+ byteSize = PDataType.LONG.getCodec().decodeLong(byteSizeCell.getBuffer(), byteSizeCell.getValueOffset(), SortOrder.getDefault()) / 2;
+ }
+ int midEndIndex, midStartIndex;
+ int index = Collections.binarySearch(guidePosts.getGuidePosts(), r.getStartKey(), Bytes.BYTES_COMPARATOR);
+ if (index < 0) {
+ midEndIndex = midStartIndex = -(index + 1);
+ } else {
+ // For an exact match, we want to get rid of the exact match guidepost,
+ // since it's replaced by the region boundary.
+ midEndIndex = index;
+ midStartIndex = index + 1;
+ }
+ if (midEndIndex > 0) {
+ GuidePostsInfo lguidePosts = new GuidePostsInfo(byteSize, guidePosts.getGuidePosts().subList(0, midEndIndex));
+ tracker.clear();
+ tracker.addGuidePost(fam, lguidePosts, byteSize, cell.getTimestamp());
+ addStats(l.getRegionNameAsString(), tracker, fam, mutations);
+ }
+ if (midStartIndex < guidePosts.getGuidePosts().size()) {
+ GuidePostsInfo rguidePosts = new GuidePostsInfo(byteSize, guidePosts.getGuidePosts().subList(midStartIndex, guidePosts.getGuidePosts().size()));
+ tracker.clear();
+ tracker.addGuidePost(fam, rguidePosts, byteSize, cell.getTimestamp());
+ addStats(r.getRegionNameAsString(), tracker, fam, mutations);
+ }
+ }
+ }
+ }
+
/**
* Update a list of statistics for a given region. If the ANALYZE <tablename> query is issued
* then we use Upsert queries to update the table
@@ -103,10 +159,16 @@ public class StatisticsWriter implements Closeable {
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES,
timeStamp, PDataType.LONG.toBytes(gp.getByteCount()));
}
- put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_KEY_BYTES,
- timeStamp, PDataType.VARBINARY.toBytes(tracker.getMinKey(fam)));
- put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_KEY_BYTES,
- timeStamp, PDataType.VARBINARY.toBytes(tracker.getMaxKey(fam)));
+ byte[] minKey = tracker.getMinKey(fam);
+ if (minKey != null) {
+ put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_KEY_BYTES,
+ timeStamp, PDataType.VARBINARY.toBytes(minKey));
+ }
+ byte[] maxKey = tracker.getMaxKey(fam);
+ if (maxKey != null) {
+ put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_KEY_BYTES,
+ timeStamp, PDataType.VARBINARY.toBytes(maxKey));
+ }
// Add our empty column value so queries behave correctly
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
timeStamp, ByteUtil.EMPTY_BYTE_ARRAY);
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9715571/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index a61f39a..12d3938 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -55,6 +55,7 @@ public class UpgradeUtil {
private static void preSplitSequenceTable(PhoenixConnection conn, int nSaltBuckets) throws SQLException {
HBaseAdmin admin = conn.getQueryServices().getAdmin();
+ boolean snapshotCreated = false;
try {
if (nSaltBuckets <= 0) {
return;
@@ -62,6 +63,7 @@ public class UpgradeUtil {
logger.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window.");
HTableDescriptor desc = admin.getTableDescriptor(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME_BYTES);
admin.snapshot(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME, PhoenixDatabaseMetaData.SEQUENCE_FULLNAME);
+ snapshotCreated = true;
admin.disableTable(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME);
admin.deleteTable(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME);
byte[][] splitPoints = SaltingUtil.getSalteByteSplitPoints(nSaltBuckets);
@@ -74,9 +76,19 @@ public class UpgradeUtil {
throw new SQLException("Unable to pre-split SYSTEM.SEQUENCE table", e);
} finally {
try {
- admin.close();
- } catch (IOException e) {
- logger.warn("Exception while closing admin during pre-split", e);
+ if (snapshotCreated) {
+ try {
+ admin.deleteSnapshot(PhoenixDatabaseMetaData.SEQUENCE_FULLNAME);
+ } catch (IOException e) {
+ logger.warn("Exception while deleting SYSTEM.SEQUENCE snapshot during pre-split", e);
+ }
+ }
+ } finally {
+ try {
+ admin.close();
+ } catch (IOException e) {
+ logger.warn("Exception while closing admin during pre-split", e);
+ }
}
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/c9715571/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index dfce72f..141b61a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -129,8 +129,8 @@
<artifactId>maven-compiler-plugin</artifactId>
<version>3.0</version>
<configuration>
- <source>1.6</source>
- <target>1.6</target>
+ <source>1.7</source>
+ <target>1.7</target>
</configuration>
</plugin>
<!--This plugin's configuration is used to store Eclipse m2e settings
[4/5] phoenix git commit: PHOENIX-1394 getColumnDisplaySize() method
returns incorrect value for varchar columns (Samarth Jain)
Posted by ja...@apache.org.
PHOENIX-1394 getColumnDisplaySize() method returns incorrect value for varchar columns (Samarth Jain)
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/efd5b217
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/efd5b217
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/efd5b217
Branch: refs/heads/3.0
Commit: efd5b217941fb6c6484f2cdc7b07ee3757139df2
Parents: 4f1df52
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Nov 6 19:22:08 2014 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Nov 6 22:41:36 2014 -0800
----------------------------------------------------------------------
.../phoenix/jdbc/PhoenixResultSetMetaData.java | 18 +++-----
.../jdbc/PhoenixResultSetMetadataTest.java | 45 ++++++++++++++++++++
2 files changed, 52 insertions(+), 11 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/efd5b217/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSetMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSetMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSetMetaData.java
index b58d5ad..30e9862 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSetMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSetMetaData.java
@@ -55,9 +55,7 @@ import org.apache.phoenix.schema.PDataType;
* @since 0.1
*/
public class PhoenixResultSetMetaData implements ResultSetMetaData {
- private static final int MIN_DISPLAY_WIDTH = 3;
- private static final int MAX_DISPLAY_WIDTH = 40;
- private static final int DEFAULT_DISPLAY_WIDTH = 10;
+ static final int DEFAULT_DISPLAY_WIDTH = 40;
private final RowProjector rowProjector;
private final PhoenixConnection connection;
@@ -85,21 +83,19 @@ public class PhoenixResultSetMetaData implements ResultSetMetaData {
@Override
public int getColumnDisplaySize(int column) throws SQLException {
ColumnProjector projector = rowProjector.getColumnProjector(column-1);
- int displaySize = Math.max(projector.getName().length(),MIN_DISPLAY_WIDTH);
PDataType type = projector.getExpression().getDataType();
if (type == null) {
- return Math.min(Math.max(displaySize, QueryConstants.NULL_DISPLAY_TEXT.length()), MAX_DISPLAY_WIDTH);
+ return QueryConstants.NULL_DISPLAY_TEXT.length();
}
if (type.isCoercibleTo(PDataType.DATE)) {
- return Math.min(Math.max(displaySize, connection.getDatePattern().length()), MAX_DISPLAY_WIDTH);
+ return connection.getDatePattern().length();
}
- if (type.isFixedWidth() && projector.getExpression().getMaxLength() != null) {
- return Math.min(Math.max(displaySize, projector.getExpression().getMaxLength()), MAX_DISPLAY_WIDTH);
+ if (projector.getExpression().getMaxLength() != null) {
+ return projector.getExpression().getMaxLength();
}
-
- return Math.min(Math.max(displaySize, DEFAULT_DISPLAY_WIDTH), MAX_DISPLAY_WIDTH);
+ return DEFAULT_DISPLAY_WIDTH;
}
-
+
@Override
public String getColumnLabel(int column) throws SQLException {
return rowProjector.getColumnProjector(column-1).getName();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/efd5b217/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixResultSetMetadataTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixResultSetMetadataTest.java b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixResultSetMetadataTest.java
new file mode 100644
index 0000000..9153595
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/jdbc/PhoenixResultSetMetadataTest.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.jdbc;
+
+import static org.junit.Assert.assertEquals;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+
+import org.apache.phoenix.query.BaseConnectionlessQueryTest;
+import org.apache.phoenix.query.QueryConstants;
+import org.junit.Test;
+
+public class PhoenixResultSetMetadataTest extends BaseConnectionlessQueryTest {
+
+ @Test
+ public void testColumnDisplaySize() throws Exception {
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.createStatement().execute(
+ "CREATE TABLE T (pk1 CHAR(15) not null, pk2 VARCHAR not null, v1 VARCHAR(15), v2 DATE, v3 VARCHAR " +
+ "CONSTRAINT pk PRIMARY KEY (pk1, pk2)) ");
+ ResultSet rs = conn.createStatement().executeQuery("SELECT pk1, pk2, v1, v2, NULL FROM T");
+ assertEquals(15, rs.getMetaData().getColumnDisplaySize(1));
+ assertEquals(PhoenixResultSetMetaData.DEFAULT_DISPLAY_WIDTH, rs.getMetaData().getColumnDisplaySize(2));
+ assertEquals(15, rs.getMetaData().getColumnDisplaySize(3));
+ assertEquals(conn.unwrap(PhoenixConnection.class).getDatePattern().length(), rs.getMetaData().getColumnDisplaySize(4));
+ assertEquals(QueryConstants.NULL_DISPLAY_TEXT.length(), rs.getMetaData().getColumnDisplaySize(5));
+ }
+}
[5/5] phoenix git commit: PHOENIX-1418 Optionally display number of
parallel chunks in explain plan
Posted by ja...@apache.org.
PHOENIX-1418 Optionally display number of parallel chunks in explain plan
Conflicts:
phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/84f63432
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/84f63432
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/84f63432
Branch: refs/heads/3.0
Commit: 84f634325891949ea56002e1aa13d4852b60b604
Parents: efd5b21
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Nov 6 19:42:19 2014 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Nov 6 22:46:07 2014 -0800
----------------------------------------------------------------------
.../org/apache/phoenix/end2end/StatsCollectorIT.java | 12 +++++++++---
.../org/apache/phoenix/iterate/ParallelIterators.java | 6 +++++-
.../java/org/apache/phoenix/query/QueryServices.java | 1 +
.../org/apache/phoenix/query/QueryServicesOptions.java | 7 +++++++
.../org/apache/phoenix/query/QueryServicesTestImpl.java | 2 ++
5 files changed, 24 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/84f63432/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index 71eaf42..85b7c23 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -43,6 +43,7 @@ import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
import org.apache.phoenix.util.ReadOnlyProps;
import org.apache.phoenix.util.TestUtil;
import org.junit.BeforeClass;
@@ -58,11 +59,12 @@ public class StatsCollectorIT extends BaseOwnClusterHBaseManagedTimeIT {
@BeforeClass
public static void doSetup() throws Exception {
- Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
+ Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
// Must update config before starting server
props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
props.put(QueryServices.THREAD_POOL_SIZE_ATTRIB, Integer.toString(10));
props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1000));
+ props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
@@ -303,6 +305,7 @@ public class StatsCollectorIT extends BaseOwnClusterHBaseManagedTimeIT {
}
// FIXME: I see the commit of the stats finishing before this with a lower timestamp that the scan timestamp,
// yet without this sleep, the query finds the old data. Seems like an HBase bug and a potentially serious one.
+
Thread.sleep(5000);
} finally {
admin.close();
@@ -325,15 +328,18 @@ public class StatsCollectorIT extends BaseOwnClusterHBaseManagedTimeIT {
}
conn.commit();
+ ResultSet rs;
TestUtil.analyzeTable(conn, STATS_TEST_TABLE_NAME);
List<KeyRange>keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME);
assertEquals(nRows+1, keyRanges.size());
-
+ rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + STATS_TEST_TABLE_NAME);
+ assertEquals("CLIENT " + (nRows+1) + "-CHUNK " + "PARALLEL 1-WAY FULL SCAN OVER " + STATS_TEST_TABLE_NAME, QueryUtil.getExplainPlan(rs));
+
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
List<HRegionLocation> regions = services.getAllTableRegions(STATS_TEST_TABLE_BYTES);
assertEquals(1, regions.size());
- ResultSet rs = conn.createStatement().executeQuery("SELECT GUIDE_POSTS_COUNT, REGION_NAME FROM SYSTEM.STATS WHERE PHYSICAL_NAME='"+STATS_TEST_TABLE_NAME+"' AND REGION_NAME IS NOT NULL");
+ rs = conn.createStatement().executeQuery("SELECT GUIDE_POSTS_COUNT, REGION_NAME FROM SYSTEM.STATS WHERE PHYSICAL_NAME='"+STATS_TEST_TABLE_NAME+"' AND REGION_NAME IS NOT NULL");
assertTrue(rs.next());
assertEquals(nRows, rs.getLong(1));
assertEquals(regions.get(0).getRegionInfo().getRegionNameAsString(), rs.getString(2));
http://git-wip-us.apache.org/repos/asf/phoenix/blob/84f63432/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 42890bb..d2fcc19 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -55,6 +55,7 @@ import org.apache.phoenix.query.ConnectionQueryServices;
import org.apache.phoenix.query.KeyRange;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.schema.MetaDataClient;
import org.apache.phoenix.schema.PColumnFamily;
import org.apache.phoenix.schema.PTable;
@@ -619,8 +620,11 @@ public class ParallelIterators extends ExplainTable implements ResultIterators {
@Override
public void explain(List<String> planSteps) {
+ boolean displayChunkCount = context.getConnection().getQueryServices().getProps().getBoolean(
+ QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB,
+ QueryServicesOptions.DEFAULT_EXPLAIN_CHUNK_COUNT);
StringBuilder buf = new StringBuilder();
- buf.append("CLIENT PARALLEL " + size() + "-WAY ");
+ buf.append("CLIENT " + (displayChunkCount ? (this.splits.size() + "-CHUNK ") : "") + "PARALLEL " + size() + "-WAY ");
explain(buf.toString(),planSteps);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/84f63432/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 812879e..7ddebaf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -128,6 +128,7 @@ public interface QueryServices extends SQLCloseable {
public static final String SEQUENCE_SALT_BUCKETS_ATTRIB = "phoenix.sequence.saltBuckets";
public static final String COPROCESSOR_PRIORITY_ATTRIB = "phoenix.coprocessor.priority";
+ public static final String EXPLAIN_CHUNK_COUNT_ATTRIB = "phoenix.explain.displayChunkCount";
/**
* Get executor service used for parallel scans
http://git-wip-us.apache.org/repos/asf/phoenix/blob/84f63432/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 67eb690..cc7b238 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -21,6 +21,7 @@ import static org.apache.phoenix.query.QueryServices.CALL_QUEUE_PRODUCER_ATTRIB_
import static org.apache.phoenix.query.QueryServices.CALL_QUEUE_ROUND_ROBIN_ATTRIB;
import static org.apache.phoenix.query.QueryServices.DATE_FORMAT_ATTRIB;
import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
+import static org.apache.phoenix.query.QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB;
import static org.apache.phoenix.query.QueryServices.GROUPBY_MAX_CACHE_SIZE_ATTRIB;
import static org.apache.phoenix.query.QueryServices.GROUPBY_SPILLABLE_ATTRIB;
import static org.apache.phoenix.query.QueryServices.GROUPBY_SPILL_FILES_ATTRIB;
@@ -146,6 +147,7 @@ public class QueryServicesOptions {
* Default value for coprocessor priority is between SYSTEM and USER priority.
*/
public static final int DEFAULT_COPROCESSOR_PRIORITY = Coprocessor.PRIORITY_SYSTEM/2 + Coprocessor.PRIORITY_USER/2; // Divide individually to prevent any overflow
+ public static final boolean DEFAULT_EXPLAIN_CHUNK_COUNT = true;
private final Configuration config;
@@ -442,4 +444,9 @@ public class QueryServicesOptions {
return this;
}
+ public QueryServicesOptions setExplainChunkCount(boolean showChunkCount) {
+ config.setBoolean(EXPLAIN_CHUNK_COUNT_ATTRIB, showChunkCount);
+ return this;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/84f63432/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index a9b4c7d..d9c8589 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -52,6 +52,7 @@ public final class QueryServicesTestImpl extends BaseQueryServicesImpl {
public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE = 1024L*1024L*4L; // 4 Mb
public static final long DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE = 1024L*1024L*2L; // 2 Mb
public static final int DEFAULT_MIN_STATS_UPDATE_FREQ_MS = 0;
+ public static final boolean DEFAULT_EXPLAIN_CHUNK_COUNT = false; // TODO: update explain plans in test and set to true
/**
* Set number of salt buckets lower for sequence table during testing, as a high
@@ -65,6 +66,7 @@ public final class QueryServicesTestImpl extends BaseQueryServicesImpl {
private static QueryServicesOptions getDefaultServicesOptions() {
return withDefaults()
+ .setExplainChunkCount(DEFAULT_EXPLAIN_CHUNK_COUNT)
.setSequenceSaltBuckets(DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS)
.setMinStatsUpdateFrequencyMs(DEFAULT_MIN_STATS_UPDATE_FREQ_MS)
.setThreadPoolSize(DEFAULT_THREAD_POOL_SIZE)
[3/5] phoenix git commit: PHOENIX-1413 Add Phoenix coprocessors with
configurable priority
Posted by ja...@apache.org.
PHOENIX-1413 Add Phoenix coprocessors with configurable priority
Conflicts:
phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4f1df52b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4f1df52b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4f1df52b
Branch: refs/heads/3.0
Commit: 4f1df52b78edbdae7950c26f5b7afd6677e4e319
Parents: 6abe4df
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Nov 6 18:24:54 2014 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Nov 6 22:41:10 2014 -0800
----------------------------------------------------------------------
.../EndToEndCoveredColumnsIndexBuilderIT.java | 3 ++-
.../org/apache/phoenix/hbase/index/Indexer.java | 10 +++++-----
.../CoveredColumnIndexSpecifierBuilder.java | 4 ++--
.../query/ConnectionQueryServicesImpl.java | 19 ++++++++++---------
.../org/apache/phoenix/query/QueryServices.java | 2 ++
.../phoenix/query/QueryServicesOptions.java | 5 +++++
6 files changed, 26 insertions(+), 17 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f1df52b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
index d7b9099..75ed0bf 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
@@ -32,6 +32,7 @@ import java.util.Map;
import java.util.Queue;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
@@ -301,7 +302,7 @@ public class EndToEndCoveredColumnsIndexBuilderIT {
// initializer blows up.
indexerOpts.put(CoveredColumnsIndexBuilder.CODEC_CLASS_NAME_KEY,
CoveredIndexCodecForTesting.class.getName());
- Indexer.enableIndexing(desc, CoveredColumnsIndexBuilder.class, indexerOpts);
+ Indexer.enableIndexing(desc, CoveredColumnsIndexBuilder.class, indexerOpts, Coprocessor.PRIORITY_USER);
// create the table
HBaseAdmin admin = UTIL.getHBaseAdmin();
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f1df52b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 3ab400a..8531361 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -32,7 +32,6 @@ import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -639,17 +638,18 @@ public class Indexer extends BaseRegionObserver {
/**
* Enable indexing on the given table
* @param desc {@link HTableDescriptor} for the table on which indexing should be enabled
- * @param builder class to use when building the index for this table
- * @param properties map of custom configuration options to make available to your
+ * @param builder class to use when building the index for this table
+ * @param properties map of custom configuration options to make available to your
* {@link IndexBuilder} on the server-side
+ * @param priority TODO
* @throws IOException the Indexer coprocessor cannot be added
*/
public static void enableIndexing(HTableDescriptor desc, Class<? extends IndexBuilder> builder,
- Map<String, String> properties) throws IOException {
+ Map<String, String> properties, int priority) throws IOException {
if (properties == null) {
properties = new HashMap<String, String>();
}
properties.put(Indexer.INDEX_BUILDER_CONF_KEY, builder.getName());
- desc.addCoprocessor(Indexer.class.getName(), null, Coprocessor.PRIORITY_USER, properties);
+ desc.addCoprocessor(Indexer.class.getName(), null, priority, properties);
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f1df52b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexSpecifierBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexSpecifierBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexSpecifierBuilder.java
index 9fcd5f3..6ac89d1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexSpecifierBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexSpecifierBuilder.java
@@ -26,8 +26,8 @@ import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HTableDescriptor;
-
import org.apache.phoenix.hbase.index.Indexer;
import org.apache.phoenix.hbase.index.covered.CoveredColumnsIndexBuilder;
import org.apache.phoenix.hbase.index.covered.IndexCodec;
@@ -137,7 +137,7 @@ public class CoveredColumnIndexSpecifierBuilder {
// add the codec for the index to the map of options
Map<String, String> opts = this.convertToMap();
opts.put(CoveredColumnsIndexBuilder.CODEC_CLASS_NAME_KEY, clazz.getName());
- Indexer.enableIndexing(desc, CoveredColumnIndexer.class, opts);
+ Indexer.enableIndexing(desc, CoveredColumnIndexer.class, opts, Coprocessor.PRIORITY_USER);
}
static List<ColumnGroup> getColumns(Configuration conf) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f1df52b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 31d46e0..a65898f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -530,18 +530,19 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private void addCoprocessors(byte[] tableName, HTableDescriptor descriptor, PTableType tableType) throws SQLException {
// The phoenix jar must be available on HBase classpath
+ int priority = props.getInt(QueryServices.COPROCESSOR_PRIORITY_ATTRIB, QueryServicesOptions.DEFAULT_COPROCESSOR_PRIORITY);
try {
if (!descriptor.hasCoprocessor(ScanRegionObserver.class.getName())) {
- descriptor.addCoprocessor(ScanRegionObserver.class.getName(), null, 1, null);
+ descriptor.addCoprocessor(ScanRegionObserver.class.getName(), null, priority, null);
}
if (!descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName())) {
- descriptor.addCoprocessor(UngroupedAggregateRegionObserver.class.getName(), null, 1, null);
+ descriptor.addCoprocessor(UngroupedAggregateRegionObserver.class.getName(), null, priority, null);
}
if (!descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName())) {
- descriptor.addCoprocessor(GroupedAggregateRegionObserver.class.getName(), null, 1, null);
+ descriptor.addCoprocessor(GroupedAggregateRegionObserver.class.getName(), null, priority, null);
}
if (!descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName())) {
- descriptor.addCoprocessor(ServerCachingEndpointImpl.class.getName(), null, 1, null);
+ descriptor.addCoprocessor(ServerCachingEndpointImpl.class.getName(), null, priority, null);
}
// TODO: better encapsulation for this
@@ -554,25 +555,25 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
&& !descriptor.hasCoprocessor(Indexer.class.getName())) {
Map<String, String> opts = Maps.newHashMapWithExpectedSize(1);
opts.put(CoveredColumnsIndexBuilder.CODEC_CLASS_NAME_KEY, PhoenixIndexCodec.class.getName());
- Indexer.enableIndexing(descriptor, PhoenixIndexBuilder.class, opts);
+ Indexer.enableIndexing(descriptor, PhoenixIndexBuilder.class, opts, priority);
}
if (SchemaUtil.isStatsTable(tableName) && !descriptor.hasCoprocessor(MultiRowMutationEndpoint.class.getName())) {
descriptor.addCoprocessor(MultiRowMutationEndpoint.class.getName(),
- null, 1, null);
+ null, priority, null);
}
// Setup split policy on Phoenix metadata table to ensure that the key values of a Phoenix table
// stay on the same region.
if (SchemaUtil.isMetaTable(tableName)) {
if (!descriptor.hasCoprocessor(MetaDataEndpointImpl.class.getName())) {
- descriptor.addCoprocessor(MetaDataEndpointImpl.class.getName(), null, 1, null);
+ descriptor.addCoprocessor(MetaDataEndpointImpl.class.getName(), null, priority, null);
}
if (!descriptor.hasCoprocessor(MetaDataRegionObserver.class.getName())) {
- descriptor.addCoprocessor(MetaDataRegionObserver.class.getName(), null, 2, null);
+ descriptor.addCoprocessor(MetaDataRegionObserver.class.getName(), null, priority + 1, null);
}
} else if (SchemaUtil.isSequenceTable(tableName)) {
if (!descriptor.hasCoprocessor(SequenceRegionObserver.class.getName())) {
- descriptor.addCoprocessor(SequenceRegionObserver.class.getName(), null, 1, null);
+ descriptor.addCoprocessor(SequenceRegionObserver.class.getName(), null, priority, null);
}
}
} catch (IOException e) {
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f1df52b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index d3faf2e..812879e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -127,6 +127,8 @@ public interface QueryServices extends SQLCloseable {
public static final String STATS_USE_CURRENT_TIME_ATTRIB = "phoenix.stats.useCurrentTime";
public static final String SEQUENCE_SALT_BUCKETS_ATTRIB = "phoenix.sequence.saltBuckets";
+ public static final String COPROCESSOR_PRIORITY_ATTRIB = "phoenix.coprocessor.priority";
+
/**
* Get executor service used for parallel scans
*/
http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f1df52b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 117f285..67eb690 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -60,6 +60,7 @@ import static org.apache.phoenix.query.QueryServices.USE_INDEXES_ATTRIB;
import java.util.Map.Entry;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.regionserver.wal.WALEditCodec;
import org.apache.phoenix.schema.SaltingUtil;
import org.apache.phoenix.util.DateUtil;
@@ -141,6 +142,10 @@ public class QueryServicesOptions {
* Use only first time SYSTEM.SEQUENCE table is created.
*/
public static final int DEFAULT_SEQUENCE_TABLE_SALT_BUCKETS = SaltingUtil.MAX_BUCKET_NUM;
+ /**
+ * Default value for coprocessor priority is between SYSTEM and USER priority.
+ */
+ public static final int DEFAULT_COPROCESSOR_PRIORITY = Coprocessor.PRIORITY_SYSTEM/2 + Coprocessor.PRIORITY_USER/2; // Divide individually to prevent any overflow
private final Configuration config;
[2/5] phoenix git commit: PHOENIX-1416 Given a schema name,
DatabaseMetadata.getTables and getColumns calls erroneously match
tables without schema
Posted by ja...@apache.org.
PHOENIX-1416 Given a schema name, DatabaseMetadata.getTables and getColumns calls erroneously match tables without schema
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6abe4df0
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6abe4df0
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6abe4df0
Branch: refs/heads/3.0
Commit: 6abe4df01e4c298a16f029c40e3b8cb120702f18
Parents: c971557
Author: James Taylor <jt...@salesforce.com>
Authored: Thu Nov 6 17:57:57 2014 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Thu Nov 6 22:38:19 2014 -0800
----------------------------------------------------------------------
.../end2end/QueryDatabaseMetaDataIT.java | 43 ++++++++++++++++++
.../org/apache/phoenix/compile/ScanRanges.java | 8 +++-
.../phoenix/expression/AndExpression.java | 4 +-
.../phoenix/expression/AndOrExpression.java | 11 +----
.../apache/phoenix/expression/OrExpression.java | 4 +-
.../phoenix/compile/WhereOptimizerTest.java | 47 ++++++++++++++++++++
6 files changed, 102 insertions(+), 15 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6abe4df0/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index 9bf2b0c..83bb91d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -1058,4 +1058,47 @@ public class QueryDatabaseMetaDataIT extends BaseClientManagedTimeIT {
}
conn5.close();
}
+
+
+ @Test
+ public void testTableWithScemaMetadataScan() throws SQLException {
+ long ts = nextTimestamp();
+ Properties props = new Properties();
+ props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts));
+ Connection conn = DriverManager.getConnection(getUrl(), props);
+
+ conn.createStatement().execute("create table foo.bar(k varchar primary key)");
+ conn.createStatement().execute("create table bar(k varchar primary key)");
+ conn.close();
+
+ props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
+ conn = DriverManager.getConnection(getUrl(), props);
+
+ DatabaseMetaData metaData = conn.getMetaData();
+ ResultSet rs;
+
+ // Tricky case that requires returning false for null AND true expression
+ rs = metaData.getTables(null, "FOO", "BAR", null);
+ assertTrue(rs.next());
+ assertEquals("FOO",rs.getString("TABLE_SCHEM"));
+ assertEquals("BAR", rs.getString("TABLE_NAME"));
+ assertFalse(rs.next());
+
+ // Tricky case that requires end key to maintain trailing nulls
+ rs = metaData.getTables("", "FOO", "BAR", null);
+ assertTrue(rs.next());
+ assertEquals("FOO",rs.getString("TABLE_SCHEM"));
+ assertEquals("BAR", rs.getString("TABLE_NAME"));
+ assertFalse(rs.next());
+
+ rs = metaData.getTables("", null, "BAR", null);
+ assertTrue(rs.next());
+ assertEquals(null,rs.getString("TABLE_SCHEM"));
+ assertEquals("BAR", rs.getString("TABLE_NAME"));
+ assertTrue(rs.next());
+ assertEquals("FOO",rs.getString("TABLE_SCHEM"));
+ assertEquals("BAR", rs.getString("TABLE_NAME"));
+ assertFalse(rs.next());
+ }
+
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6abe4df0/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
index d60a288..61b6451 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
@@ -440,9 +440,13 @@ public class ScanRanges {
if (ScanUtil.getTotalSpan(ranges, slotSpan) < schema.getMaxFields()) {
return false;
}
- for (List<KeyRange> orRanges : ranges) {
+ int lastIndex = ranges.size()-1;
+ for (int i = lastIndex; i >= 0; i--) {
+ List<KeyRange> orRanges = ranges.get(i);
for (KeyRange keyRange : orRanges) {
- if (!keyRange.isSingleKey()) {
+ // Special case for single trailing IS NULL. We cannot consider this as a point key because
+ // we strip trailing nulls when we form the key.
+ if (!keyRange.isSingleKey() || (i == lastIndex && keyRange == KeyRange.IS_NULL_RANGE)) {
return false;
}
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6abe4df0/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
index e9c2740..bb2dc7e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndExpression.java
@@ -79,8 +79,8 @@ public class AndExpression extends AndOrExpression {
}
@Override
- protected boolean getStopValue() {
- return Boolean.FALSE;
+ protected boolean isStopValue(Boolean value) {
+ return !Boolean.TRUE.equals(value);
}
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6abe4df0/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
index eebcd34..89ad02e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/AndOrExpression.java
@@ -21,7 +21,6 @@ import java.util.BitSet;
import java.util.List;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-
import org.apache.phoenix.schema.PDataType;
import org.apache.phoenix.schema.tuple.Tuple;
@@ -45,11 +44,6 @@ public abstract class AndOrExpression extends BaseCompoundExpression {
}
@Override
- public int hashCode() {
- return 31 * super.hashCode() + Boolean.valueOf(this.getStopValue()).hashCode();
- }
-
- @Override
public PDataType getDataType() {
return PDataType.BOOLEAN;
}
@@ -67,7 +61,6 @@ public abstract class AndOrExpression extends BaseCompoundExpression {
@Override
public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
boolean isNull = false;
- boolean stopValue = getStopValue();
for (int i = 0; i < children.size(); i++) {
Expression child = children.get(i);
// If partial state is available, then use that to know we've already evaluated this
@@ -77,7 +70,7 @@ public abstract class AndOrExpression extends BaseCompoundExpression {
// evaluate versus getValue code path.
if (child.evaluate(tuple, ptr)) {
// Short circuit if we see our stop value
- if (Boolean.valueOf(stopValue).equals(PDataType.BOOLEAN.toObject(ptr, child.getDataType()))) {
+ if (isStopValue((Boolean)PDataType.BOOLEAN.toObject(ptr, child.getDataType()))) {
return true;
} else if (partialEvalState != null) {
partialEvalState.set(i);
@@ -93,5 +86,5 @@ public abstract class AndOrExpression extends BaseCompoundExpression {
return true;
}
- protected abstract boolean getStopValue();
+ protected abstract boolean isStopValue(Boolean value);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6abe4df0/phoenix-core/src/main/java/org/apache/phoenix/expression/OrExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/OrExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/OrExpression.java
index e8565c5..5b1b62e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/OrExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/OrExpression.java
@@ -38,8 +38,8 @@ public class OrExpression extends AndOrExpression {
}
@Override
- protected boolean getStopValue() {
- return Boolean.TRUE;
+ protected boolean isStopValue(Boolean value) {
+ return Boolean.TRUE.equals(value);
}
@Override
http://git-wip-us.apache.org/repos/asf/phoenix/blob/6abe4df0/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
index 032768b..1ce6c02 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereOptimizerTest.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.filter.RowKeyComparisonFilter;
import org.apache.phoenix.filter.SingleKeyValueComparisonFilter;
import org.apache.phoenix.filter.SkipScanFilter;
@@ -1734,4 +1735,50 @@ public class WhereOptimizerTest extends BaseConnectionlessQueryTest {
return conn;
}
+ @Test
+ public void testTrailingIsNull() throws Exception {
+ String baseTableDDL = "CREATE TABLE t(\n " +
+ " a VARCHAR,\n" +
+ " b VARCHAR,\n" +
+ " CONSTRAINT pk PRIMARY KEY (a, b))";
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.createStatement().execute(baseTableDDL);
+ conn.close();
+
+ String query = "SELECT * FROM t WHERE a = 'a' and b is null";
+ StatementContext context = compileStatement(query, Collections.<Object>emptyList());
+ Scan scan = context.getScan();
+ Filter filter = scan.getFilter();
+ assertNull(filter);
+ assertArrayEquals(Bytes.toBytes("a"), scan.getStartRow());
+ assertArrayEquals(ByteUtil.concat(Bytes.toBytes("a"), QueryConstants.SEPARATOR_BYTE_ARRAY, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow());
+ }
+
+
+ @Test
+ public void testTrailingIsNullWithOr() throws Exception {
+ String baseTableDDL = "CREATE TABLE t(\n " +
+ " a VARCHAR,\n" +
+ " b VARCHAR,\n" +
+ " CONSTRAINT pk PRIMARY KEY (a, b))";
+ Connection conn = DriverManager.getConnection(getUrl());
+ conn.createStatement().execute(baseTableDDL);
+ conn.close();
+
+ String query = "SELECT * FROM t WHERE a = 'a' and (b is null or b = 'b')";
+ StatementContext context = compileStatement(query, Collections.<Object>emptyList());
+ Scan scan = context.getScan();
+ Filter filter = scan.getFilter();
+ assertTrue(filter instanceof SkipScanFilter);
+ SkipScanFilter skipScan = (SkipScanFilter)filter;
+ List<List<KeyRange>>slots = skipScan.getSlots();
+ assertEquals(2,slots.size());
+ assertEquals(1,slots.get(0).size());
+ assertEquals(2,slots.get(1).size());
+ assertEquals(KeyRange.getKeyRange(Bytes.toBytes("a")), slots.get(0).get(0));
+ assertTrue(KeyRange.IS_NULL_RANGE == slots.get(1).get(0));
+ assertEquals(KeyRange.getKeyRange(Bytes.toBytes("b")), slots.get(1).get(1));
+ assertArrayEquals(Bytes.toBytes("a"), scan.getStartRow());
+ assertArrayEquals(ByteUtil.concat(Bytes.toBytes("a"), QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow());
+ }
}