You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2018/05/19 00:08:46 UTC

[1/2] phoenix git commit: PHOENIX-4742 DistinctPrefixFilter potentially seeks to lesser key when descending or null value

Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.3 52304092f -> 9066ce395


PHOENIX-4742 DistinctPrefixFilter potentially seeks to lesser key when descending or null value


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d7533f70
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d7533f70
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d7533f70

Branch: refs/heads/4.x-HBase-1.3
Commit: d7533f70212ec9cdb664b8f7d6d3814e3ec6e7f5
Parents: 5230409
Author: James Taylor <jt...@salesforce.com>
Authored: Fri May 18 08:46:38 2018 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Fri May 18 17:07:03 2018 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/OrderByIT.java   | 45 +++++++++-----------
 .../GroupedAggregateRegionObserver.java         |  4 +-
 .../phoenix/filter/DistinctPrefixFilter.java    | 31 ++++++++++----
 .../apache/phoenix/filter/SkipScanFilter.java   |  4 +-
 .../org/apache/phoenix/schema/RowKeySchema.java | 20 +++++----
 5 files changed, 61 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d7533f70/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
index 9d6a450..578a3af 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/OrderByIT.java
@@ -27,10 +27,10 @@ import static org.apache.phoenix.util.TestUtil.ROW7;
 import static org.apache.phoenix.util.TestUtil.ROW8;
 import static org.apache.phoenix.util.TestUtil.ROW9;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.apache.phoenix.util.TestUtil.assertResultSet;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.apache.phoenix.util.TestUtil.assertResultSet;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -663,7 +663,6 @@ public class OrderByIT extends ParallelStatsDisabledIT {
             conn = DriverManager.getConnection(getUrl(), props);
 
             String tableName=generateUniqueName();
-            conn.createStatement().execute("DROP TABLE if exists "+tableName);
             String sql="CREATE TABLE "+tableName+" ( "+
                 "ORGANIZATION_ID VARCHAR,"+
                 "CONTAINER_ID VARCHAR,"+
@@ -871,26 +870,25 @@ public class OrderByIT extends ParallelStatsDisabledIT {
     }
 
     @Test
-    public void testOrderByReverseOptimizationBug3491() throws Exception {
+    public void testOrderByReverseOptimization() throws Exception {
         for(boolean salted: new boolean[]{true,false}) {
-            doTestOrderByReverseOptimizationBug3491(salted,true,true,true);
-            doTestOrderByReverseOptimizationBug3491(salted,true,true,false);
-            doTestOrderByReverseOptimizationBug3491(salted,true,false,true);
-            doTestOrderByReverseOptimizationBug3491(salted,true,false,false);
-            doTestOrderByReverseOptimizationBug3491(salted,false,true,true);
-            doTestOrderByReverseOptimizationBug3491(salted,false,true,false);
-            doTestOrderByReverseOptimizationBug3491(salted,false,false,true);
-            doTestOrderByReverseOptimizationBug3491(salted,false,false,false);
+            doTestOrderByReverseOptimization(salted,true,true,true);
+            doTestOrderByReverseOptimization(salted,true,true,false);
+            doTestOrderByReverseOptimization(salted,true,false,true);
+            doTestOrderByReverseOptimization(salted,true,false,false);
+            doTestOrderByReverseOptimization(salted,false,true,true);
+            doTestOrderByReverseOptimization(salted,false,true,false);
+            doTestOrderByReverseOptimization(salted,false,false,true);
+            doTestOrderByReverseOptimization(salted,false,false,false);
         }
     }
 
-    private void doTestOrderByReverseOptimizationBug3491(boolean salted,boolean desc1,boolean desc2,boolean desc3) throws Exception {
+    private void doTestOrderByReverseOptimization(boolean salted,boolean desc1,boolean desc2,boolean desc3) throws Exception {
         Connection conn = null;
         try {
             Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
             conn = DriverManager.getConnection(getUrl(), props);
             String tableName=generateUniqueName();
-            conn.createStatement().execute("DROP TABLE if exists "+tableName);
             String sql="CREATE TABLE "+tableName+" ( "+
                     "ORGANIZATION_ID INTEGER NOT NULL,"+
                     "CONTAINER_ID INTEGER NOT NULL,"+
@@ -965,26 +963,25 @@ public class OrderByIT extends ParallelStatsDisabledIT {
     }
 
     @Test
-    public void testOrderByReverseOptimizationWithNUllsLastBug3491() throws Exception{
+    public void testOrderByReverseOptimizationWithNullsLast() throws Exception{
         for(boolean salted: new boolean[]{true,false}) {
-            doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,true,true,true);
-            doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,true,true,false);
-            doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,true,false,true);
-            doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,true,false,false);
-            doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,false,true,true);
-            doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,false,true,false);
-            doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,false,false,true);
-            doTestOrderByReverseOptimizationWithNUllsLastBug3491(salted,false,false,false);
+            doTestOrderByReverseOptimizationWithNullsLast(salted,true,true,true);
+            doTestOrderByReverseOptimizationWithNullsLast(salted,true,true,false);
+            doTestOrderByReverseOptimizationWithNullsLast(salted,true,false,true);
+            doTestOrderByReverseOptimizationWithNullsLast(salted,true,false,false);
+            doTestOrderByReverseOptimizationWithNullsLast(salted,false,true,true);
+            doTestOrderByReverseOptimizationWithNullsLast(salted,false,true,false);
+            doTestOrderByReverseOptimizationWithNullsLast(salted,false,false,true);
+            doTestOrderByReverseOptimizationWithNullsLast(salted,false,false,false);
         }
     }
 
-    private void doTestOrderByReverseOptimizationWithNUllsLastBug3491(boolean salted,boolean desc1,boolean desc2,boolean desc3) throws Exception {
+    private void doTestOrderByReverseOptimizationWithNullsLast(boolean salted,boolean desc1,boolean desc2,boolean desc3) throws Exception {
         Connection conn = null;
         try {
             Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
             conn = DriverManager.getConnection(getUrl(), props);
             String tableName=generateUniqueName();
-            conn.createStatement().execute("DROP TABLE if exists "+tableName);
             String sql="CREATE TABLE "+tableName+" ( "+
                     "ORGANIZATION_ID VARCHAR,"+
                     "CONTAINER_ID VARCHAR,"+

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d7533f70/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index a6fa6a5..86ab275 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -534,8 +534,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                                 currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN,
                                 AGG_TIMESTAMP, value, 0, value.length);
                     results.add(keyValue);
-                    if (logger.isDebugEnabled()) {
-                        logger.debug(LogUtil.addCustomAnnotations("Adding new aggregate row: "
+                    if (logger.isInfoEnabled()) {
+                        logger.info(LogUtil.addCustomAnnotations("Adding new aggregate row: "
                                 + keyValue
                                 + ",for current key "
                                 + Bytes.toStringBinary(currentKey.get(), currentKey.getOffset(),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d7533f70/phoenix-core/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java
index 1280cb5..fd376c4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/DistinctPrefixFilter.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.io.Writable;
 import org.apache.phoenix.schema.RowKeySchema;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.ValueSchema.Field;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.ByteUtil;
 
@@ -38,8 +40,9 @@ public class DistinctPrefixFilter extends FilterBase implements Writable {
 
     private int offset;
     private RowKeySchema schema;
-    private int prefixLengh;
+    private int prefixLength;
     private boolean filterAll = false;
+    private int lastPosition;
     private final ImmutableBytesWritable lastKey = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY, -1, -1);
 
     public DistinctPrefixFilter() {
@@ -47,7 +50,7 @@ public class DistinctPrefixFilter extends FilterBase implements Writable {
 
     public DistinctPrefixFilter(RowKeySchema schema, int prefixLength) {
         this.schema = schema;
-        this.prefixLengh = prefixLength;
+        this.prefixLength = prefixLength;
     }
 
     public void setOffset(int offset) {
@@ -60,13 +63,14 @@ public class DistinctPrefixFilter extends FilterBase implements Writable {
 
         // First determine the prefix based on the schema
         int maxOffset = schema.iterator(v.getRowArray(), v.getRowOffset()+offset, v.getRowLength()-offset, ptr);
-        schema.next(ptr, 0, maxOffset, prefixLengh - 1);
+        int position = schema.next(ptr, 0, maxOffset, prefixLength - 1);
 
         // now check whether we have seen this prefix before
         if (lastKey.getLength() != ptr.getLength() || !Bytes.equals(ptr.get(), ptr.getOffset(),
                 ptr.getLength(), lastKey.get(), lastKey.getOffset(), ptr.getLength())) {
             // if we haven't seen this prefix, include the row and remember this prefix
             lastKey.set(ptr.get(), ptr.getOffset(), ptr.getLength());
+            lastPosition = position - 1;
             return ReturnCode.INCLUDE;
         }
         // we've seen this prefix already, seek to the next
@@ -75,7 +79,8 @@ public class DistinctPrefixFilter extends FilterBase implements Writable {
 
     @Override
     public Cell getNextCellHint(Cell v) throws IOException {
-        PDataType<?> type = schema.getField(prefixLengh-1).getDataType();
+        Field field = schema.getField(prefixLength - 1);
+        PDataType<?> type = field.getDataType();
 
         ImmutableBytesWritable tmp;
         // In the following we make sure we copy the key at most once
@@ -83,8 +88,10 @@ public class DistinctPrefixFilter extends FilterBase implements Writable {
         if (offset > 0) {
             // make space to copy the missing offset, also 0-pad here if needed
             // (since we're making a copy anyway)
+            // We need to pad all null columns, otherwise we'll potentially
+            // skip rows.
             byte[] tmpKey = new byte[offset + lastKey.getLength() + 
-                                     (reversed || type.isFixedWidth() ? 0 : 1)];
+                                     (reversed || type.isFixedWidth() || field.getSortOrder() == SortOrder.DESC ? 0 : 1) + (prefixLength - 1 - lastPosition)];
             System.arraycopy(v.getRowArray(), v.getRowOffset(), tmpKey, 0, offset);
             System.arraycopy(lastKey.get(), lastKey.getOffset(), tmpKey, offset, lastKey.getLength());
             tmp = new ImmutableBytesWritable(tmpKey);
@@ -105,7 +112,15 @@ public class DistinctPrefixFilter extends FilterBase implements Writable {
                 } else {
                     // pad with a 0x00 byte (makes a copy)
                     tmp = new ImmutableBytesWritable(lastKey);
-                    ByteUtil.nullPad(tmp, tmp.getLength() + 1);
+                    ByteUtil.nullPad(tmp, tmp.getLength() + prefixLength - lastPosition);
+                    // Trim back length if:
+                    // 1) field is descending since the separator byte if 0xFF
+                    // 2) last key has trailing null 
+                    // Otherwise, in both cases we'd potentially be seeking to a row before
+                    // our current key.
+                    if (field.getSortOrder() == SortOrder.DESC || prefixLength - lastPosition > 1) {
+                        tmp.set(tmp.get(),tmp.getOffset(),tmp.getLength()-1);
+                    }
                 }
                 // calculate the next key
                 if (!ByteUtil.nextKey(tmp.get(), tmp.getOffset(), tmp.getLength())) {
@@ -126,7 +141,7 @@ public class DistinctPrefixFilter extends FilterBase implements Writable {
     public void write(DataOutput out) throws IOException {
         out.writeByte(VERSION);
         schema.write(out);
-        out.writeInt(prefixLengh);
+        out.writeInt(prefixLength);
     }
 
     @Override
@@ -134,7 +149,7 @@ public class DistinctPrefixFilter extends FilterBase implements Writable {
         in.readByte(); // ignore
         schema = new RowKeySchema();
         schema.readFields(in);
-        prefixLengh = in.readInt();
+        prefixLength = in.readInt();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d7533f70/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
index c9d951c..47da151 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
@@ -489,7 +489,9 @@ public class SkipScanFilter extends FilterBase implements Writable {
                 }
                 i++;
                 // If we run out of slots in our key, it means we have a partial key.
-                if (schema.next(ptr, ScanUtil.getRowKeyPosition(slotSpan, i), maxOffset, slotSpan[i]) == null) {
+                int rowKeyPos = ScanUtil.getRowKeyPosition(slotSpan, i);
+                int slotSpans = slotSpan[i];
+                if (schema.next(ptr, rowKeyPos, maxOffset, slotSpans) < rowKeyPos + slotSpans) {
                     // If the rest of the slots are checking for IS NULL, then break because
                     // that's the case (since we don't store trailing nulls).
                     if (allTrailingNulls(i)) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d7533f70/phoenix-core/src/main/java/org/apache/phoenix/schema/RowKeySchema.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/RowKeySchema.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/RowKeySchema.java
index 72ebddd..1a44ce1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/RowKeySchema.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/RowKeySchema.java
@@ -197,10 +197,11 @@ public class RowKeySchema extends ValueSchema {
      * @return true if a value was found and ptr was set, false if the value is null and ptr was not
      * set, and null if the value is null and there are no more values
      */
-    public Boolean next(ImmutableBytesWritable ptr, int position, int maxOffset, int extraSpan) {
-      Boolean returnValue = next(ptr, position, maxOffset);
-        readExtraFields(ptr, position + 1, maxOffset, extraSpan);
-        return returnValue;
+    public int next(ImmutableBytesWritable ptr, int position, int maxOffset, int extraSpan) {
+        if (next(ptr, position, maxOffset) == null) {
+            return position-1;
+        }
+        return readExtraFields(ptr, position + 1, maxOffset, extraSpan);
     }
     
     @edu.umd.cs.findbugs.annotations.SuppressWarnings(
@@ -337,18 +338,21 @@ public class RowKeySchema extends ValueSchema {
      * @param maxOffset  the maximum offset into the bytes pointer to allow
      * @param extraSpan  the number of extra fields to expand the ptr to contain.
      */
-    private void readExtraFields(ImmutableBytesWritable ptr, int position, int maxOffset, int extraSpan) {
+    private int readExtraFields(ImmutableBytesWritable ptr, int position, int maxOffset, int extraSpan) {
         int initialOffset = ptr.getOffset();
 
-        for(int i = 0; i < extraSpan; i++) {
-            Boolean returnValue = next(ptr, position + i, maxOffset);
+        int i = 0;
+        Boolean hasValue = Boolean.FALSE;
+        for(i = 0; i < extraSpan; i++) {
+            hasValue = next(ptr, position + i, maxOffset);
 
-            if(returnValue == null) {
+            if(hasValue == null) {
                 break;
             }
         }
 
         int finalLength = ptr.getOffset() - initialOffset + ptr.getLength();
         ptr.set(ptr.get(), initialOffset, finalLength);
+        return position + i - (Boolean.FALSE.equals(hasValue) ? 1 : 0);
     }
 }


[2/2] phoenix git commit: PHOENIX-4744 Reduce parallelism in integration test runs

Posted by ja...@apache.org.
PHOENIX-4744 Reduce parallelism in integration test runs


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9066ce39
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9066ce39
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9066ce39

Branch: refs/heads/4.x-HBase-1.3
Commit: 9066ce39508efcb5eda118012b82ad3b4e7bdc46
Parents: d7533f7
Author: James Taylor <jt...@salesforce.com>
Authored: Fri May 18 08:50:38 2018 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Fri May 18 17:07:17 2018 -0700

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9066ce39/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 96eb1ac..95654b0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -122,7 +122,7 @@
 
     <!-- Plugin options -->
     <numForkedUT>8</numForkedUT>
-    <numForkedIT>8</numForkedIT>
+    <numForkedIT>4</numForkedIT>
     <it.failIfNoSpecifiedTests>false</it.failIfNoSpecifiedTests>
     <surefire.failIfNoSpecifiedTests>false</surefire.failIfNoSpecifiedTests>