You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2017/11/18 19:54:18 UTC

[1/3] phoenix git commit: Revert "PHOENIX-4386 Calculate the estimatedSize of MutationState using Map> mutations"

Repository: phoenix
Updated Branches:
  refs/heads/4.13-HBase-0.98 a561da9d0 -> 7699a030a


Revert "PHOENIX-4386 Calculate the estimatedSize of MutationState using Map<TableRef, Map<ImmutableBytesPtr,RowMutationState>> mutations"

This reverts commit a561da9d0f59a2c5e956fb687f51498a2d374a5d.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/8fc92cb8
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/8fc92cb8
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/8fc92cb8

Branch: refs/heads/4.13-HBase-0.98
Commit: 8fc92cb8390b78c42b13cd5fd38bbcb148d8648f
Parents: a561da9
Author: James Taylor <jt...@salesforce.com>
Authored: Sat Nov 18 11:52:19 2017 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat Nov 18 11:52:19 2017 -0800

----------------------------------------------------------------------
 .../apache/phoenix/end2end/MutationStateIT.java | 144 -------------------
 .../org/apache/phoenix/end2end/QueryMoreIT.java |  42 ++++++
 .../apache/phoenix/compile/DeleteCompiler.java  |   6 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |   4 +-
 .../apache/phoenix/execute/MutationState.java   |  50 ++-----
 .../org/apache/phoenix/util/KeyValueUtil.java   |  51 +++++--
 6 files changed, 96 insertions(+), 201 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/8fc92cb8/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
deleted file mode 100644
index 2d5f360..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MutationStateIT.java
+++ /dev/null
@@ -1,144 +0,0 @@
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Properties;
-
-import org.apache.phoenix.exception.SQLExceptionCode;
-import org.apache.phoenix.execute.MutationState;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryServices;
-import org.junit.Test;
-
-public class MutationStateIT extends ParallelStatsDisabledIT {
-
-    private static final String DDL =
-            " (ORGANIZATION_ID CHAR(15) NOT NULL, SCORE DOUBLE, "
-            + "ENTITY_ID CHAR(15) NOT NULL, TAGS VARCHAR, CONSTRAINT PAGE_SNAPSHOT_PK "
-            + "PRIMARY KEY (ORGANIZATION_ID, ENTITY_ID DESC)) MULTI_TENANT=TRUE";
-
-    private void upsertRows(PhoenixConnection conn, String fullTableName) throws SQLException {
-        PreparedStatement stmt =
-                conn.prepareStatement("upsert into " + fullTableName
-                        + " (organization_id, entity_id, score) values (?,?,?)");
-        for (int i = 0; i < 10000; i++) {
-            stmt.setString(1, "AAAA" + i);
-            stmt.setString(2, "BBBB" + i);
-            stmt.setInt(3, 1);
-            stmt.execute();
-        }
-    }
-
-    @Test
-    public void testMaxMutationSize() throws Exception {
-        Properties connectionProperties = new Properties();
-        connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3");
-        connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "1000000");
-        PhoenixConnection connection =
-                (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties);
-        String fullTableName = generateUniqueName();
-        try (Statement stmt = connection.createStatement()) {
-            stmt.execute(
-                "CREATE TABLE " + fullTableName + DDL);
-        }
-        try {
-            upsertRows(connection, fullTableName);
-            fail();
-        } catch (SQLException e) {
-            assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getErrorCode(),
-                e.getErrorCode());
-        }
-
-        // set the max mutation size (bytes) to a low value
-        connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "1000");
-        connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "4");
-        connection =
-                (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties);
-        try {
-            upsertRows(connection, fullTableName);
-            fail();
-        } catch (SQLException e) {
-            assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getErrorCode(),
-                e.getErrorCode());
-        }
-    }
-
-    @Test
-    public void testMutationEstimatedSize() throws Exception {
-        PhoenixConnection conn = (PhoenixConnection) DriverManager.getConnection(getUrl());
-        conn.setAutoCommit(false);
-        String fullTableName = generateUniqueName();
-        try (Statement stmt = conn.createStatement()) {
-            stmt.execute(
-                "CREATE TABLE " + fullTableName + DDL);
-        }
-
-        // upserting rows should increase the mutation state size
-        MutationState state = conn.unwrap(PhoenixConnection.class).getMutationState();
-        long prevEstimatedSize = state.getEstimatedSize();
-        upsertRows(conn, fullTableName);
-        assertTrue("Mutation state size should have increased",
-            state.getEstimatedSize() > prevEstimatedSize);
-        
-        
-        // after commit or rollback the size should be zero
-        conn.commit();
-        assertEquals("Mutation state size should be zero after commit", 0,
-            state.getEstimatedSize());
-        upsertRows(conn, fullTableName);
-        conn.rollback();
-        assertEquals("Mutation state size should be zero after rollback", 0,
-            state.getEstimatedSize());
-
-        // upsert one row
-        PreparedStatement stmt =
-                conn.prepareStatement("upsert into " + fullTableName
-                        + " (organization_id, entity_id, score) values (?,?,?)");
-        stmt.setString(1, "ZZZZ");
-        stmt.setString(2, "YYYY");
-        stmt.setInt(3, 1);
-        stmt.execute();
-        assertTrue("Mutation state size should be greater than zero ", state.getEstimatedSize()>0);
-
-        prevEstimatedSize = state.getEstimatedSize();
-        // upserting the same row twice should not increase the size
-        stmt.setString(1, "ZZZZ");
-        stmt.setString(2, "YYYY");
-        stmt.setInt(3, 1);
-        stmt.execute();
-        assertEquals(
-            "Mutation state size should only increase 4 bytes (size of the new statement index)",
-            prevEstimatedSize + 4, state.getEstimatedSize());
-        
-        prevEstimatedSize = state.getEstimatedSize();
-        // changing the value of one column of a row to a larger value should increase the estimated size 
-        stmt =
-                conn.prepareStatement("upsert into " + fullTableName
-                        + " (organization_id, entity_id, score, tags) values (?,?,?,?)");
-        stmt.setString(1, "ZZZZ");
-        stmt.setString(2, "YYYY");
-        stmt.setInt(3, 1);
-        stmt.setString(4, "random text string random text string random text string");
-        stmt.execute();
-        assertTrue("Mutation state size should increase", prevEstimatedSize+4 < state.getEstimatedSize());
-        
-        prevEstimatedSize = state.getEstimatedSize();
-        // changing the value of one column of a row to a smaller value should decrease the estimated size 
-        stmt =
-                conn.prepareStatement("upsert into " + fullTableName
-                        + " (organization_id, entity_id, score, tags) values (?,?,?,?)");
-        stmt.setString(1, "ZZZZ");
-        stmt.setString(2, "YYYY");
-        stmt.setInt(3, 1);
-        stmt.setString(4, "");
-        stmt.execute();
-        assertTrue("Mutation state size should decrease", prevEstimatedSize+4 > state.getEstimatedSize());
-    }
-    
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8fc92cb8/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 9109c12..77cb19f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.Date;
@@ -38,6 +39,7 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -508,6 +510,46 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
         assertEquals(4L, connection.getMutationState().getBatchCount());
     }
     
+    @Test
+    public void testMaxMutationSize() throws Exception {
+        Properties connectionProperties = new Properties();
+        connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3");
+        connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "1000000");
+        PhoenixConnection connection = (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties);
+        String fullTableName = generateUniqueName();
+        try (Statement stmt = connection.createStatement()) {
+            stmt.execute("CREATE TABLE " + fullTableName + "(\n" +
+                "    ORGANIZATION_ID CHAR(15) NOT NULL,\n" +
+                "    SCORE DOUBLE NOT NULL,\n" +
+                "    ENTITY_ID CHAR(15) NOT NULL\n" +
+                "    CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n" +
+                "        ORGANIZATION_ID,\n" +
+                "        SCORE DESC,\n" +
+                "        ENTITY_ID DESC\n" +
+                "    )\n" +
+                ") MULTI_TENANT=TRUE");
+        }
+        try {
+            upsertRows(connection, fullTableName);
+            fail();
+        }
+        catch(SQLException e) {
+            assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_EXCEEDED.getErrorCode(), e.getErrorCode());
+        }
+        
+        // set the max mutation size (bytes) to a low value
+        connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "1000");
+        connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "4");
+        connection = (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties);
+        try {
+            upsertRows(connection, fullTableName);
+            fail();
+        }
+        catch(SQLException e) {
+            assertEquals(SQLExceptionCode.MAX_MUTATION_SIZE_BYTES_EXCEEDED.getErrorCode(), e.getErrorCode());
+        }
+    }
+
     private void upsertRows(PhoenixConnection conn, String fullTableName) throws SQLException {
         PreparedStatement stmt = conn.prepareStatement("upsert into " + fullTableName +
                 " (organization_id, entity_id, score) values (?,?,?)");

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8fc92cb8/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index 4218c59..f038cda 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -207,7 +207,7 @@ public class DeleteCompiler {
                 // row key will already have its value.
                 // Check for otherTableRefs being empty required when deleting directly from the index
                 if (otherTableRefs.isEmpty() || table.getIndexType() != IndexType.LOCAL) {
-                    mutations.put(rowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
+                    mutations.put(rowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
                 }
                 for (int i = 0; i < otherTableRefs.size(); i++) {
                     PTable otherTable = otherTableRefs.get(i).getTable();
@@ -221,7 +221,7 @@ public class DeleteCompiler {
                     } else {
                         indexPtr.set(maintainers[i].buildRowKey(getter, rowKeyPtr, null, null, HConstants.LATEST_TIMESTAMP));
                     }
-                    indexMutations.get(i).put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
+                    indexMutations.get(i).put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
                 }
                 if (mutations.size() > maxSize) {
                     throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
@@ -835,4 +835,4 @@ public class DeleteCompiler {
             };
         }
     }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8fc92cb8/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index dcae16f..f5e2ae0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -119,7 +119,6 @@ public class UpsertCompiler {
             PTable table, Map<ImmutableBytesPtr, RowMutationState> mutation,
             PhoenixStatement statement, boolean useServerTimestamp, IndexMaintainer maintainer,
             byte[][] viewConstants, byte[] onDupKeyBytes, int numSplColumns) throws SQLException {
-        long columnValueSize = 0;
         Map<PColumn,byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length);
         byte[][] pkValues = new byte[table.getPKColumns().size()][];
         // If the table uses salting, the first byte is the salting byte, set to an empty array
@@ -149,7 +148,6 @@ public class UpsertCompiler {
                 }
             } else {
                 columnValues.put(column, value);
-                columnValueSize += (column.getEstimatedSize() + value.length);
             }
         }
         ImmutableBytesPtr ptr = new ImmutableBytesPtr();
@@ -167,7 +165,7 @@ public class UpsertCompiler {
                 ptr.set(ScanRanges.prefixKey(ptr.get(), 0, regionPrefix, regionPrefix.length));
             }
         } 
-        mutation.put(ptr, new RowMutationState(columnValues, columnValueSize, statement.getConnection().getStatementExecutionCounter(), rowTsColInfo, onDupKeyBytes));
+        mutation.put(ptr, new RowMutationState(columnValues, statement.getConnection().getStatementExecutionCounter(), rowTsColInfo, onDupKeyBytes));
     }
     
     public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, RowProjector projector,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8fc92cb8/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index c342e4a..1e5a8ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -94,7 +94,6 @@ import org.apache.phoenix.util.SQLCloseable;
 import org.apache.phoenix.util.SQLCloseables;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.ServerUtil;
-import org.apache.phoenix.util.SizedUtil;
 import org.apache.phoenix.util.TransactionUtil;
 import org.cloudera.htrace.Span;
 import org.cloudera.htrace.TraceScope;
@@ -196,13 +195,9 @@ public class MutationState implements SQLCloseable {
             this.mutations.put(table, mutations);
         }
         this.numRows = mutations.size();
-        this.estimatedSize = KeyValueUtil.getEstimatedRowMutationSize(this.mutations);
+        this.estimatedSize = KeyValueUtil.getEstimatedRowSize(table, mutations);
         throwIfTooBig();
     }
-    
-    public long getEstimatedSize() {
-        return estimatedSize;
-    }
 
     public long getMaxSize() {
         return maxSize;
@@ -442,16 +437,9 @@ public class MutationState implements SQLCloseable {
         this.sizeOffset += newMutationState.sizeOffset;
         int oldNumRows = this.numRows;
         joinMutationState(newMutationState.mutations, this.mutations);
+        // here we increment the estimated size by the fraction of new rows we added from the newMutationState 
         if (newMutationState.numRows>0) {
-            // if we added all the rows from newMutationState we can just increment the
-            // estimatedSize by newMutationState.estimatedSize
-            if (newMutationState.numRows == this.numRows-oldNumRows) {
-                this.estimatedSize +=  newMutationState.estimatedSize;
-            }
-            // we merged the two mutation states so we need to recalculate the size
-            else {
-                this.estimatedSize = KeyValueUtil.getEstimatedRowMutationSize(this.mutations);
-            }
+            this.estimatedSize += ((double)(this.numRows-oldNumRows)/newMutationState.numRows) * newMutationState.estimatedSize;
         }
         if (!newMutationState.txMutations.isEmpty()) {
             if (txMutations.isEmpty()) {
@@ -987,6 +975,8 @@ public class MutationState implements SQLCloseable {
                 long mutationCommitTime = 0;
                 long numFailedMutations = 0;;
                 long startTime = 0;
+                long startNumRows = numRows;
+                long startEstimatedSize = estimatedSize;
                 do {
                     TableRef origTableRef = tableInfo.getOrigTableRef();
                     PTable table = origTableRef.getTable();
@@ -1032,13 +1022,13 @@ public class MutationState implements SQLCloseable {
                         GLOBAL_MUTATION_COMMIT_TIME.update(mutationCommitTime);
                         numFailedMutations = 0;
                         
-                        // Remove batches as we process them
-                        mutations.remove(origTableRef);
                         if (tableInfo.isDataTable()) {
                             numRows -= numMutations;
-                            // recalculate the estimated size
-                            estimatedSize = KeyValueUtil.getEstimatedRowMutationSize(mutations);
+                            // decrement estimated size by the fraction of rows we sent to hbase
+                            estimatedSize -= ((double)numMutations/startNumRows)*startEstimatedSize;
                         }
+                        // Remove batches as we process them
+                        mutations.remove(origTableRef);
                     } catch (Exception e) {
                     	mutationCommitTime = System.currentTimeMillis() - startTime;
                         serverTimestamp = ServerUtil.parseServerTimestamp(e);
@@ -1437,9 +1427,8 @@ public class MutationState implements SQLCloseable {
         private int[] statementIndexes;
         @Nonnull private final RowTimestampColInfo rowTsColInfo;
         private byte[] onDupKeyBytes;
-        private long colValuesSize;
         
-        public RowMutationState(@Nonnull Map<PColumn,byte[]> columnValues, long colValuesSize, int statementIndex, @Nonnull RowTimestampColInfo rowTsColInfo,
+        public RowMutationState(@Nonnull Map<PColumn,byte[]> columnValues, int statementIndex, @Nonnull RowTimestampColInfo rowTsColInfo,
                 byte[] onDupKeyBytes) {
             checkNotNull(columnValues);
             checkNotNull(rowTsColInfo);
@@ -1447,12 +1436,6 @@ public class MutationState implements SQLCloseable {
             this.statementIndexes = new int[] {statementIndex};
             this.rowTsColInfo = rowTsColInfo;
             this.onDupKeyBytes = onDupKeyBytes;
-            this.colValuesSize = colValuesSize;
-        }
-
-        public long calculateEstimatedSize() {
-            return colValuesSize + statementIndexes.length * SizedUtil.INT_SIZE + SizedUtil.LONG_SIZE
-                    + (onDupKeyBytes != null ? onDupKeyBytes.length : 0);
         }
 
         byte[] getOnDupKeyBytes() {
@@ -1471,16 +1454,7 @@ public class MutationState implements SQLCloseable {
             // If we already have a row and the new row has an ON DUPLICATE KEY clause
             // ignore the new values (as that's what the server will do).
             if (newRow.onDupKeyBytes == null) {
-                // increment the column value size by the new row column value size
-                colValuesSize+=newRow.colValuesSize;
-                for (Map.Entry<PColumn,byte[]> entry : newRow.columnValues.entrySet()) {
-                    PColumn col = entry.getKey();
-                    byte[] oldValue = columnValues.put(col, entry.getValue());
-                    if (oldValue!=null) {
-                        // decrement column value size by the size of all column values that were replaced
-                        colValuesSize-=(col.getEstimatedSize() + oldValue.length);
-                    }
-                }
+                getColumnValues().putAll(newRow.getColumnValues());
             }
             // Concatenate ON DUPLICATE KEY bytes to allow multiple
             // increments of the same row in the same commit batch.
@@ -1492,7 +1466,7 @@ public class MutationState implements SQLCloseable {
         RowTimestampColInfo getRowTimestampColInfo() {
             return rowTsColInfo;
         }
-
+       
     }
     
     public ReadMetricQueue getReadMetricQueue() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/8fc92cb8/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
index 318c9d6..2dfe1b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/KeyValueUtil.java
@@ -30,10 +30,14 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.execute.MutationState.RowMutationState;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.types.PArrayDataTypeEncoder;
 
 /**
  * 
@@ -183,26 +187,47 @@ public class KeyValueUtil {
     }
 
     /**
-     * Estimates the size of rows stored in RowMutationState (in memory)
+     * Estimates the storage size of a row
      * @param mutations map from table to row to RowMutationState
      * @return estimated row size
      */
     public static long
-            getEstimatedRowMutationSize(Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> tableMutationMap) {
+            getEstimatedRowSize(TableRef tableRef, Map<ImmutableBytesPtr, RowMutationState> mutations) {
         long size = 0;
-        // iterate over table
-        for (Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> tableEntry : tableMutationMap.entrySet()) {
-            // iterate over rows
-            for (Entry<ImmutableBytesPtr, RowMutationState> rowEntry : tableEntry.getValue().entrySet()) {
-                size += calculateRowMutationSize(rowEntry);
+        PTable table = tableRef.getTable();
+        // iterate over rows
+        for (Entry<ImmutableBytesPtr, RowMutationState> rowEntry : mutations.entrySet()) {
+            int rowLength = rowEntry.getKey().getLength();
+            Map<PColumn, byte[]> colValueMap = rowEntry.getValue().getColumnValues();
+            switch (table.getImmutableStorageScheme()) {
+            case ONE_CELL_PER_COLUMN:
+                // iterate over columns
+                for (Entry<PColumn, byte[]> colValueEntry : colValueMap.entrySet()) {
+                    PColumn pColumn = colValueEntry.getKey();
+                    size +=
+                            KeyValue.getKeyValueDataStructureSize(rowLength,
+                                pColumn.getFamilyName().getBytes().length,
+                                pColumn.getColumnQualifierBytes().length,
+                                colValueEntry.getValue().length);
+                }
+                break;
+            case SINGLE_CELL_ARRAY_WITH_OFFSETS:
+                // we store all the column values in a single key value that contains all the
+                // column values followed by an offset array
+                size +=
+                        PArrayDataTypeEncoder.getEstimatedByteSize(table, rowLength,
+                            colValueMap);
+                break;
             }
+            // count the empty key value
+            Pair<byte[], byte[]> emptyKeyValueInfo =
+                    EncodedColumnsUtil.getEmptyKeyValueInfo(table);
+            size +=
+                    KeyValue.getKeyValueDataStructureSize(rowLength,
+                        SchemaUtil.getEmptyColumnFamilyPtr(table).getLength(),
+                        emptyKeyValueInfo.getFirst().length,
+                        emptyKeyValueInfo.getSecond().length);
         }
         return size;
     }
-
-    private static long calculateRowMutationSize(Entry<ImmutableBytesPtr, RowMutationState> rowEntry) {
-        int rowLength = rowEntry.getKey().getLength();
-        long colValuesLength = rowEntry.getValue().calculateEstimatedSize();
-        return (rowLength + colValuesLength);
-    }
 }


[3/3] phoenix git commit: PHOENIX-4387 DefaultColumnValueIT failing in non-US build environments (Pedro Boado)

Posted by ja...@apache.org.
PHOENIX-4387 DefaultColumnValueIT failing in non-US build environments (Pedro Boado)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7699a030
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7699a030
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7699a030

Branch: refs/heads/4.13-HBase-0.98
Commit: 7699a030a030e1462b009583462581c8fbc8b1d4
Parents: cd77306
Author: James Taylor <jt...@salesforce.com>
Authored: Sat Nov 18 11:36:53 2017 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat Nov 18 11:53:10 2017 -0800

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/DefaultColumnValueIT.java    | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7699a030/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultColumnValueIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultColumnValueIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultColumnValueIT.java
index 62d79bc..59b15d8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultColumnValueIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultColumnValueIT.java
@@ -32,6 +32,7 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Time;
 import java.sql.Timestamp;
+import java.text.DecimalFormatSymbols;
 
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.util.ByteUtil;
@@ -44,6 +45,8 @@ public class DefaultColumnValueIT extends ParallelStatsDisabledIT {
     private String sharedTable1;
     private String sharedTable2;
 
+    private String DEFAULT_CURRENCY_SYMBOL = DecimalFormatSymbols.getInstance().getCurrencySymbol();
+
     @Before
     public void init() {
         sharedTable1 = generateUniqueName();
@@ -583,7 +586,7 @@ public class DefaultColumnValueIT extends ParallelStatsDisabledIT {
                 "pk INTEGER PRIMARY KEY,"
                 + "c1 INTEGER DEFAULT 1 + 9,"
                 + "c2 DOUBLE DEFAULT SQRT(91506.25),"
-                + "c3 DECIMAL DEFAULT TO_NUMBER('$123.33', '\u00A4###.##'),"
+                + "c3 DECIMAL DEFAULT TO_NUMBER('" + DEFAULT_CURRENCY_SYMBOL + "123.33', '\u00A4###.##'),"
                 + "c4 VARCHAR DEFAULT 'AB' || 'CD',"
                 + "c5 CHAR(5) DEFAULT 'E' || 'F',"
                 + "c6 INTEGER DEFAULT \"MONTH\"(TO_TIMESTAMP('2015-6-05'))"
@@ -599,7 +602,7 @@ public class DefaultColumnValueIT extends ParallelStatsDisabledIT {
                 "pk INTEGER NOT NULL,"
                 + "c1 INTEGER NOT NULL DEFAULT 1 + 9,"
                 + "c2 DOUBLE NOT NULL DEFAULT SQRT(91506.25),"
-                + "c3 DECIMAL NOT NULL DEFAULT TO_NUMBER('$123.33', '\u00A4###.##'),"
+                + "c3 DECIMAL NOT NULL DEFAULT TO_NUMBER('" + DEFAULT_CURRENCY_SYMBOL + "123.33', '\u00A4###.##'),"
                 + "c4 VARCHAR NOT NULL DEFAULT 'AB' || 'CD',"
                 + "c5 CHAR(5) NOT NULL DEFAULT 'E' || 'F',"
                 + "c6 INTEGER NOT NULL DEFAULT \"MONTH\"(TO_TIMESTAMP('2015-6-05')),"
@@ -1045,7 +1048,7 @@ public class DefaultColumnValueIT extends ParallelStatsDisabledIT {
                 "pk INTEGER PRIMARY KEY,"
                 + "c1 INTEGER DEFAULT 1 + 9,"
                 + "c2 DOUBLE DEFAULT SQRT(91506.25),"
-                + "c3 DECIMAL DEFAULT TO_NUMBER('$123.33', '\u00A4###.##'),"
+                + "c3 DECIMAL DEFAULT TO_NUMBER('" + DEFAULT_CURRENCY_SYMBOL + "123.33', '\u00A4###.##'),"
                 + "c4 VARCHAR DEFAULT 'AB' || 'CD',"
                 + "c5 CHAR(5) DEFAULT 'E' || 'F',"
                 + "c6 INTEGER DEFAULT \"MONTH\"(TO_TIMESTAMP('2015-6-05'))"


[2/3] phoenix git commit: PHOENIX-4384 Phoenix server jar doesn't include icu4j jars (Shehzaad Nakhoda)

Posted by ja...@apache.org.
PHOENIX-4384 Phoenix server jar doesn't include icu4j jars (Shehzaad Nakhoda)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cd773064
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cd773064
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cd773064

Branch: refs/heads/4.13-HBase-0.98
Commit: cd773064360cc64bae95ec15e0d5371312d1e02b
Parents: 8fc92cb
Author: James Taylor <jt...@salesforce.com>
Authored: Sat Nov 18 11:31:46 2017 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Sat Nov 18 11:53:05 2017 -0800

----------------------------------------------------------------------
 phoenix-core/pom.xml   | 1 +
 phoenix-server/pom.xml | 3 +++
 pom.xml                | 3 ++-
 3 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd773064/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 9fb11b3..553cdd9 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -473,6 +473,7 @@
     <dependency>
       <groupId>com.salesforce.i18n</groupId>
       <artifactId>i18n-util</artifactId>
+      <version>${i18n-util.version}</version>
     </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd773064/phoenix-server/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-server/pom.xml b/phoenix-server/pom.xml
index ff67cbd..b1194f7 100644
--- a/phoenix-server/pom.xml
+++ b/phoenix-server/pom.xml
@@ -135,6 +135,9 @@
                   <include>org.apache.thrift:libthrift</include>
                   <include>com.clearspring.analytics:stream</include>
                   <include>com.salesforce.i18n:i18n-util</include>
+                  <include>com.ibm.icu:icu4j</include>
+                  <include>com.ibm.icu:icu4j-charset</include>
+                  <include>com.ibm.icu:icu4j-localespi</include>
                 </includes>
                   <excludes>
                     <exclude>org.apache.phoenix:phoenix-server</exclude>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd773064/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 4b92cd8..0b6dad5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -106,6 +106,7 @@
     <scala.version>2.11.8</scala.version>
     <scala.binary.version>2.11</scala.binary.version>
     <stream.version>2.9.5</stream.version>
+    <i18n-util.version>1.0.1</i18n-util.version>
     <!-- Test Dependencies -->
     <mockito-all.version>1.8.5</mockito-all.version>
     <junit.version>4.12</junit.version>
@@ -910,7 +911,7 @@
       <dependency>
         <groupId>com.salesforce.i18n</groupId>
         <artifactId>i18n-util</artifactId>
-        <version>1.0.1</version>
+        <version>${i18n-util.version}</version>
       </dependency>
     </dependencies>
   </dependencyManagement>