You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by st...@apache.org on 2020/10/23 08:02:37 UTC

[phoenix] branch master updated: PHOENIX-6197 AggregateIT and StoreNullsIT hangs

This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new 3d5cb8d  PHOENIX-6197 AggregateIT and StoreNullsIT hangs
3d5cb8d is described below

commit 3d5cb8db1fd3ce192c721a1c66dacaafb11e553b
Author: Richard Antal <an...@gmail.com>
AuthorDate: Wed Oct 21 16:24:35 2020 +0200

    PHOENIX-6197 AggregateIT and StoreNullsIT hangs
---
 .../it/java/org/apache/phoenix/end2end/StoreNullsIT.java   | 14 ++++++++++++++
 .../src/test/java/org/apache/phoenix/util/TestUtil.java    | 10 ++++++++--
 2 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java
index eaf36a4..5e72888 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java
@@ -31,6 +31,7 @@ import java.sql.Statement;
 import java.sql.Timestamp;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.TableName;
@@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
 import org.apache.phoenix.expression.KeyValueColumnExpression;
 import org.apache.phoenix.expression.SingleCellColumnExpression;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -52,12 +54,15 @@ import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -78,6 +83,15 @@ public class StoreNullsIT extends ParallelStatsDisabledIT {
     private final String ddlFormat;
     
     private String dataTableName;
+
+    // In this class we depend on the major compaction to remove every deleted row
+    // so wo are overwriting the PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY to 0
+    @BeforeClass
+    public static synchronized void doSetup() throws Exception {
+        Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
+        props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY, Integer.toString(0));
+        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    }
     
     public StoreNullsIT(boolean mutable, boolean columnEncoded, boolean storeNulls) {
         this.mutable = mutable;
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 1f72bdf..cc60444 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -840,10 +840,16 @@ public class TestUtil {
             byte[] markerRowKey = Bytes.toBytes("TO_DELETE");
            
             Put put = new Put(markerRowKey);
-            put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
+            long timestamp = 0L;
+            // We do not want to wait an hour because of PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY
+            // So set the timestamp of the put and delete as early as possible
+            put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
+                    QueryConstants.EMPTY_COLUMN_VALUE_BYTES, timestamp,
+                    QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
             htable.put(put);
             Delete delete = new Delete(markerRowKey);
-            delete.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
+            delete.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
+                    QueryConstants.EMPTY_COLUMN_VALUE_BYTES, timestamp);
             htable.delete(delete);
             htable.close();
             if (table.isTransactional()) {