You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by sa...@apache.org on 2016/11/22 02:54:15 UTC
[11/36] phoenix git commit: PHOENIX-3482 Provide a work around for
HBASE-17096
PHOENIX-3482 Provide a work around for HBASE-17096
Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a19089b5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a19089b5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a19089b5
Branch: refs/heads/encodecolumns2
Commit: a19089b57e2b9dc29d8b43cbbb3b193116811362
Parents: 62da42a
Author: Samarth <sa...@salesforce.com>
Authored: Wed Nov 16 17:42:30 2016 -0800
Committer: Samarth <sa...@salesforce.com>
Committed: Wed Nov 16 17:42:30 2016 -0800
----------------------------------------------------------------------
.../query/ConnectionQueryServicesImpl.java | 27 +++++++++++---------
pom.xml | 2 +-
2 files changed, 16 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a19089b5/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index f458f09..1203c81 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -79,7 +79,6 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTableInterface;
@@ -87,10 +86,8 @@ import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
@@ -280,7 +277,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
private final boolean isAutoUpgradeEnabled;
private final AtomicBoolean upgradeRequired = new AtomicBoolean(false);
private static final byte[] UPGRADE_MUTEX = "UPGRADE_MUTEX".getBytes();
- private static final byte[] UPGRADE_MUTEX_VALUE = UPGRADE_MUTEX;
+ private static final byte[] UPGRADE_MUTEX_LOCKED = "UPGRADE_MUTEX_LOCKED".getBytes();
+ private static final byte[] UPGRADE_MUTEX_UNLOCKED = "UPGRADE_MUTEX_UNLOCKED".getBytes();
private static interface FeatureSupported {
boolean isSupported(ConnectionQueryServices services);
@@ -2982,6 +2980,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
columnDesc.setTimeToLive(TTL_FOR_MUTEX); // Let mutex expire after some time
tableDesc.addFamily(columnDesc);
admin.createTable(tableDesc);
+ try (HTableInterface sysMutexTable = getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
+ Put put = new Put(rowToLock);
+ put.add(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, UPGRADE_MUTEX_UNLOCKED);
+ sysMutexTable.put(put);
+ }
} catch (TableExistsException e) {
// Ignore
}
@@ -2989,11 +2992,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try (HTableInterface sysMutexTable = getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
- byte[] oldValue = null;
- byte[] newValue = UPGRADE_MUTEX_VALUE;
+ byte[] oldValue = UPGRADE_MUTEX_UNLOCKED;
+ byte[] newValue = UPGRADE_MUTEX_LOCKED;
Put put = new Put(rowToLock);
put.add(family, qualifier, newValue);
- boolean acquired = sysMutexTable.checkAndPut(rowToLock, family, qualifier, oldValue, put);
+ boolean acquired = sysMutexTable.checkAndPut(rowToLock, family, qualifier, oldValue, put);
if (!acquired) { throw new UpgradeInProgressException(getVersion(currentServerSideTableTimestamp),
getVersion(MIN_SYSTEM_TABLE_TIMESTAMP)); }
return true;
@@ -3006,11 +3009,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try (HTableInterface sysMutexTable = getTable(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME_BYTES)) {
byte[] family = PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES;
byte[] qualifier = UPGRADE_MUTEX;
- byte[] expectedValue = UPGRADE_MUTEX_VALUE;
- Delete delete = new Delete(mutexRowKey);
- RowMutations mutations = new RowMutations(mutexRowKey);
- mutations.add(delete);
- released = sysMutexTable.checkAndMutate(mutexRowKey, family, qualifier, CompareOp.EQUAL, expectedValue, mutations);
+ byte[] expectedValue = UPGRADE_MUTEX_LOCKED;
+ byte[] newValue = UPGRADE_MUTEX_UNLOCKED;
+ Put put = new Put(mutexRowKey);
+ put.add(family, qualifier, newValue);
+ released = sysMutexTable.checkAndPut(mutexRowKey, family, qualifier, expectedValue, put);
} catch (Exception e) {
logger.warn("Release of upgrade mutex failed", e);
}
http://git-wip-us.apache.org/repos/asf/phoenix/blob/a19089b5/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 4959463..a1b248c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -64,7 +64,7 @@
<top.dir>${project.basedir}</top.dir>
<!-- Hadoop Versions -->
- <hbase.version>0.98.17-hadoop2</hbase.version>
+ <hbase.version>0.98.23-hadoop2</hbase.version>
<hadoop-two.version>2.7.1</hadoop-two.version>
<!-- Dependency versions -->