You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by td...@apache.org on 2019/03/18 21:01:14 UTC

[phoenix] branch 4.14-HBase-1.3 updated: PHOENIX-4900 Modify MAX_MUTATION_SIZE_EXCEEDED and MAX_MUTATION_SIZE_BYTES_EXCEEDED exception message to recommend turning autocommit on for deletes

This is an automated email from the ASF dual-hosted git repository.

tdsilva pushed a commit to branch 4.14-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.14-HBase-1.3 by this push:
     new 5827f75  PHOENIX-4900 Modify MAX_MUTATION_SIZE_EXCEEDED and MAX_MUTATION_SIZE_BYTES_EXCEEDED exception message to recommend turning autocommit on for deletes
5827f75 is described below

commit 5827f755f89c08df197e00cae0496bba88dff69e
Author: Xinyi Yan <xy...@salesforce.com>
AuthorDate: Tue Mar 5 14:58:23 2019 -0800

    PHOENIX-4900 Modify MAX_MUTATION_SIZE_EXCEEDED and MAX_MUTATION_SIZE_BYTES_EXCEEDED exception message to recommend turning autocommit on for deletes
---
 .../apache/phoenix/end2end/MutationStateIT.java    | 50 +++++++++++++++++++++-
 .../apache/phoenix/exception/SQLExceptionCode.java |  8 +++-
 2 files changed, 55 insertions(+), 3 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
index 36782c1..6030caa 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
@@ -53,7 +53,51 @@ public class MutationStateIT extends ParallelStatsDisabledIT {
     }
 
     @Test
-    public void testMaxMutationSize() throws Exception {
+    public void testDeleteMaxMutationSize() throws SQLException {
+        String tableName = generateUniqueName();
+        int NUMBER_OF_ROWS = 20;
+        String ddl = "CREATE TABLE " + tableName + " (V BIGINT PRIMARY KEY, K BIGINT)";
+        PhoenixConnection conn = (PhoenixConnection) DriverManager.getConnection(getUrl());
+        conn.createStatement().execute(ddl);
+
+        for(int i = 0; i < NUMBER_OF_ROWS; i++) {
+            conn.createStatement().execute(
+                    "UPSERT INTO " + tableName + " VALUES (" + i + ", "+ i + ")");
+            conn.commit();
+        }
+
+        Properties props = new Properties();
+        props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB,
+                String.valueOf(NUMBER_OF_ROWS / 2));
+        PhoenixConnection connection =
+                (PhoenixConnection) DriverManager.getConnection(getUrl(), props);
+        connection.setAutoCommit(false);
+
+        try {
+            for(int i = 0; i < NUMBER_OF_ROWS; i++) {
+                connection.createStatement().execute(
+                        "DELETE FROM " + tableName + " WHERE K = " + i );
+            }
+        } catch (SQLException e) {
+            assertTrue(e.getMessage().contains(
+                    SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getMessage()));
+        }
+
+        props.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "10");
+        props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "10000");
+        connection = (PhoenixConnection) DriverManager.getConnection(getUrl(), props);
+        connection.setAutoCommit(false);
+
+        try {
+            connection.createStatement().execute("DELETE FROM " + tableName );
+        } catch (SQLException e) {
+            assertTrue(e.getMessage().contains(
+                    SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getMessage()));
+        }
+    }
+
+    @Test
+    public void testUpsertMaxMutationSize() throws Exception {
         Properties connectionProperties = new Properties();
         connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3");
         connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "1000000");
@@ -70,6 +114,8 @@ public class MutationStateIT extends ParallelStatsDisabledIT {
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getErrorCode(),
                 e.getErrorCode());
+            assertTrue(e.getMessage().contains(
+                    SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getMessage()));
         }
 
         // set the max mutation size (bytes) to a low value
@@ -83,6 +129,8 @@ public class MutationStateIT extends ParallelStatsDisabledIT {
         } catch (SQLException e) {
             assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getErrorCode(),
                 e.getErrorCode());
+            assertTrue(e.getMessage().contains(
+                    SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getMessage()));
         }
     }
 
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index d6a70f2..94b9c39 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -444,8 +444,12 @@ public enum SQLExceptionCode {
         "because this client already has the maximum number" +
         " of connections to the target cluster."),
     
-    MAX_MUTATION_SIZE_EXCEEDED(729, "LIM01", "MutationState size is bigger than maximum allowed number of rows"),
-    MAX_MUTATION_SIZE_BYTES_EXCEEDED(730, "LIM02", "MutationState size is bigger than maximum allowed number of bytes"), 
+    MAX_MUTATION_SIZE_EXCEEDED(729, "LIM01", "MutationState size is bigger" +
+            " than maximum allowed number of rows, try upserting rows in smaller batches or " +
+            "using autocommit on for deletes."),
+    MAX_MUTATION_SIZE_BYTES_EXCEEDED(730, "LIM02", "MutationState size is " +
+            "bigger than maximum allowed number of bytes, try upserting rows in smaller batches " +
+            "or using autocommit on for deletes."),
     INSUFFICIENT_MEMORY(999, "50M01", "Unable to allocate enough memory."),
     HASH_JOIN_CACHE_NOT_FOUND(900, "HJ01", "Hash Join cache not found");