You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ch...@apache.org on 2019/07/11 21:21:09 UTC
[phoenix] branch 4.x-HBase-1.5 updated: PHOENIX-5228 use slf4j for
logging in phoenix project (addendum)
This is an automated email from the ASF dual-hosted git repository.
chinmayskulkarni pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git
The following commit(s) were added to refs/heads/4.x-HBase-1.5 by this push:
new f9d6285 PHOENIX-5228 use slf4j for logging in phoenix project (addendum)
f9d6285 is described below
commit f9d6285a349fcffee3cae4f147d8699580f2879a
Author: Xinyi <xy...@salesforce.com>
AuthorDate: Sat Jun 15 16:40:32 2019 -0700
PHOENIX-5228 use slf4j for logging in phoenix project (addendum)
Signed-off-by: Chinmay Kulkarni <ch...@apache.org>
---
...WALReplayWithIndexWritesAndCompressedWALIT.java | 3 +-
.../org/apache/phoenix/end2end/BaseQueryIT.java | 6 +-
.../end2end/ConnectionQueryServicesTestImpl.java | 5 +-
.../end2end/PartialScannerResultsDisabledIT.java | 5 +-
.../apache/phoenix/end2end/PermissionsCacheIT.java | 4 +-
.../end2end/TableSnapshotReadsMapReduceIT.java | 4 +-
.../index/IndexRebuildIncrementDisableCountIT.java | 3 +-
.../index/InvalidIndexStateClientSideIT.java | 3 +-
.../phoenix/end2end/index/MutableIndexIT.java | 2 -
.../execute/UpsertSelectOverlappingBatchesIT.java | 19 ++--
.../index/FailForUnsupportedHBaseVersionsIT.java | 3 +-
.../phoenix/monitoring/PhoenixMetricsIT.java | 3 +-
.../util/CoprocessorHConnectionTableFactoryIT.java | 3 +-
.../hbase/ipc/PhoenixRpcSchedulerFactory.java | 6 +-
.../IndexHalfStoreFileReaderGenerator.java | 3 +-
.../java/org/apache/phoenix/cache/GlobalCache.java | 14 ++-
.../apache/phoenix/cache/ServerCacheClient.java | 28 +++--
.../org/apache/phoenix/cache/TenantCacheImpl.java | 10 +-
.../apache/phoenix/cache/aggcache/SpillFile.java | 12 +-
.../cache/aggcache/SpillableGroupByCache.java | 22 ++--
.../org/apache/phoenix/compile/FromCompiler.java | 17 ++-
.../GroupedAggregateRegionObserver.java | 36 +++---
.../phoenix/coprocessor/MetaDataEndpointImpl.java | 92 +++++++--------
.../coprocessor/MetaDataRegionObserver.java | 75 +++++++-----
.../coprocessor/PhoenixAccessController.java | 6 +-
.../UngroupedAggregateRegionObserver.java | 40 +++----
.../coprocessor/tasks/DropChildViewsTask.java | 3 +-
.../coprocessor/tasks/IndexRebuildTask.java | 3 +-
.../org/apache/phoenix/execute/AggregatePlan.java | 4 +-
.../org/apache/phoenix/execute/BaseQueryPlan.java | 6 +-
.../org/apache/phoenix/execute/HashJoinPlan.java | 6 +-
.../org/apache/phoenix/execute/MutationState.java | 28 ++---
.../java/org/apache/phoenix/execute/ScanPlan.java | 4 +-
.../apache/phoenix/expression/LikeExpression.java | 20 ++--
.../aggregator/FirstLastValueServerAggregator.java | 6 +-
.../aggregator/SizeTrackingServerAggregators.java | 4 +-
.../expression/function/CollationKeyFunction.java | 6 +-
.../phoenix/filter/RowKeyComparisonFilter.java | 6 +-
.../org/apache/phoenix/hbase/index/Indexer.java | 18 ++-
.../hbase/index/util/IndexManagementUtil.java | 3 +-
.../index/write/ParallelWriterIndexCommitter.java | 1 -
.../hbase/index/write/RecoveryIndexWriter.java | 3 +-
.../TrackingParallelWriterIndexCommitter.java | 10 +-
.../phoenix/index/PhoenixIndexFailurePolicy.java | 19 ++--
.../phoenix/iterate/BaseResultIterators.java | 10 +-
.../phoenix/iterate/ChunkedResultIterator.java | 8 +-
.../apache/phoenix/iterate/ParallelIterators.java | 6 +-
.../phoenix/iterate/RoundRobinResultIterator.java | 6 +-
.../phoenix/iterate/TableResultIterator.java | 4 +-
.../org/apache/phoenix/jdbc/PhoenixDriver.java | 16 +--
.../apache/phoenix/jdbc/PhoenixEmbeddedDriver.java | 11 +-
.../org/apache/phoenix/jdbc/PhoenixStatement.java | 26 +++--
.../java/org/apache/phoenix/log/QueryLogger.java | 2 +-
.../apache/phoenix/log/QueryLoggerDisruptor.java | 5 +-
.../mapreduce/FormatToBytesWritableMapper.java | 3 +-
.../phoenix/mapreduce/MultiHfileOutputFormat.java | 3 +-
.../apache/phoenix/mapreduce/OrphanViewTool.java | 3 +-
.../phoenix/mapreduce/PhoenixRecordReader.java | 9 +-
.../PhoenixServerBuildIndexInputFormat.java | 3 +-
.../phoenix/mapreduce/RegexToKeyValueMapper.java | 4 +-
.../apache/phoenix/mapreduce/index/IndexTool.java | 7 +-
.../index/PhoenixIndexImportDirectReducer.java | 3 +-
.../index/PhoenixIndexPartialBuildMapper.java | 3 +-
.../index/PhoenixServerBuildIndexMapper.java | 4 -
.../index/automation/PhoenixMRJobSubmitter.java | 3 +-
.../mapreduce/util/PhoenixConfigurationUtil.java | 8 +-
.../apache/phoenix/memory/GlobalMemoryManager.java | 4 +-
.../monitoring/GlobalMetricRegistriesAdapter.java | 9 +-
.../phoenix/query/ConnectionQueryServicesImpl.java | 111 ++++++++++--------
.../phoenix/query/PhoenixStatsCacheLoader.java | 4 +-
.../org/apache/phoenix/schema/MetaDataClient.java | 28 ++---
.../schema/stats/DefaultStatisticsCollector.java | 6 +-
.../phoenix/schema/stats/StatisticsScanner.java | 12 +-
.../java/org/apache/phoenix/trace/TraceReader.java | 4 +-
.../transaction/OmidTransactionContext.java | 6 +-
.../transaction/TephraTransactionContext.java | 6 +-
.../phoenix/util/EquiDepthStreamHistogram.java | 3 +-
.../java/org/apache/phoenix/util/MetaDataUtil.java | 8 +-
.../org/apache/phoenix/util/ReadOnlyProps.java | 4 +-
.../java/org/apache/phoenix/util/UpgradeUtil.java | 126 +++++++++++----------
.../phoenix/util/json/JsonUpsertExecutor.java | 3 +-
.../java/org/apache/phoenix/query/BaseTest.java | 22 ++--
.../tool/ParameterizedPhoenixCanaryToolIT.java | 4 +-
.../main/java/org/apache/phoenix/pherf/Pherf.java | 28 ++---
.../pherf/configuration/XMLConfigParser.java | 6 +-
.../apache/phoenix/pherf/result/ResultManager.java | 4 +-
.../apache/phoenix/pherf/rules/RulesApplier.java | 14 +--
.../apache/phoenix/pherf/schema/SchemaReader.java | 8 +-
.../org/apache/phoenix/pherf/util/PhoenixUtil.java | 22 ++--
.../apache/phoenix/pherf/util/ResourceList.java | 18 +--
.../pherf/workload/MultiThreadedRunner.java | 10 +-
.../pherf/workload/MultithreadedDiffer.java | 1 -
.../phoenix/pherf/workload/QueryExecutor.java | 8 +-
.../phoenix/pherf/workload/QueryVerifier.java | 6 +-
.../phoenix/pherf/workload/WorkloadExecutor.java | 4 +-
.../phoenix/pherf/workload/WriteWorkload.java | 24 ++--
.../phoenix/pherf/ConfigurationParserTest.java | 4 +-
97 files changed, 685 insertions(+), 553 deletions(-)
diff --git a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
index 10e5b80..fa248a5 100644
--- a/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
+++ b/phoenix-core/src/it/java/org/apache/hadoop/hbase/regionserver/wal/WALReplayWithIndexWritesAndCompressedWALIT.java
@@ -86,7 +86,8 @@ import org.slf4j.LoggerFactory;
@Category(NeedsOwnMiniClusterTest.class)
public class WALReplayWithIndexWritesAndCompressedWALIT {
- public static final Logger LOGGER = LoggerFactory.getLogger(WALReplayWithIndexWritesAndCompressedWALIT.class);
+ public static final Logger LOGGER =
+ LoggerFactory.getLogger(WALReplayWithIndexWritesAndCompressedWALIT.class);
@Rule
public TableName table = new TableName();
private String INDEX_TABLE_NAME = table.getTableNameString() + "_INDEX";
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
index e88dc57..e7f3ad9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -84,7 +84,7 @@ public abstract class BaseQueryIT extends ParallelStatsDisabledIT {
protected String tableName;
protected String indexName;
- private static final Logger logger = LoggerFactory.getLogger(BaseQueryIT.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(BaseQueryIT.class);
public BaseQueryIT(String idxDdl, boolean columnEncoded, boolean keepDeletedCells) throws Exception {
StringBuilder optionBuilder = new StringBuilder();
@@ -102,7 +102,7 @@ public abstract class BaseQueryIT extends ParallelStatsDisabledIT {
date = new Date(System.currentTimeMillis()), null, getUrl(),
tableDDLOptions);
} catch (Exception e) {
- logger.error("Exception when creating aTable ", e);
+ LOGGER.error("Exception when creating aTable ", e);
throw e;
}
this.indexName = generateUniqueName();
@@ -113,7 +113,7 @@ public abstract class BaseQueryIT extends ParallelStatsDisabledIT {
try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
conn.createStatement().execute(this.indexDDL);
} catch (Exception e) {
- logger.error("Exception while creating index: " + indexDDL, e);
+ LOGGER.error("Exception while creating index: " + indexDDL, e);
throw e;
}
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
index 969e0f4..3bb99f6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
@@ -50,7 +50,8 @@ import com.google.common.collect.Sets;
* @since 0.1
*/
public class ConnectionQueryServicesTestImpl extends ConnectionQueryServicesImpl {
- private static final Logger logger = LoggerFactory.getLogger(ConnectionQueryServicesTestImpl.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(ConnectionQueryServicesTestImpl.class);
protected int NUM_SLAVES_BASE = 1; // number of slaves for the cluster
// Track open connections to free them on close as unit tests don't always do this.
private Set<PhoenixConnection> connections = Sets.newHashSet();
@@ -85,7 +86,7 @@ public class ConnectionQueryServicesTestImpl extends ConnectionQueryServicesImpl
try {
service.close();
} catch (IOException e) {
- logger.warn(e.getMessage(), e);
+ LOGGER.warn(e.getMessage(), e);
}
}
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
index 59471dd..6de1c35 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PartialScannerResultsDisabledIT.java
@@ -78,7 +78,8 @@ public class PartialScannerResultsDisabledIT extends ParallelStatsDisabledIT {
private String schemaName;
private String dataTableFullName;
private static String indexTableFullName;
- private static final Logger logger = LoggerFactory.getLogger(PartialScannerResultsDisabledIT.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(PartialScannerResultsDisabledIT.class);
private static Random random = new Random(1);
// background writer threads
private static Random sourceOfRandomness = new Random(0);
@@ -99,7 +100,7 @@ public class PartialScannerResultsDisabledIT extends ParallelStatsDisabledIT {
// TODO: it's likely that less data could be written if whatever
// config parameters decide this are lowered.
writeSingleBatch(conn, 100, 20, dataTableFullName);
- logger.info("Running scrutiny");
+ LOGGER.info("Running scrutiny");
// Scutunize index to see if partial results are silently returned
// In that case we'll get a false positive on the scrutiny run.
long rowCount = IndexScrutiny.scrutinizeIndex(conn, dataTableFullName, indexTableFullName);
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
index ed36e63..030c03f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PermissionsCacheIT.java
@@ -43,12 +43,12 @@ public class PermissionsCacheIT extends BasePermissionsIT {
public PermissionsCacheIT() throws Exception {
super(true);
}
-
+
@BeforeClass
public static void doSetup() throws Exception {
BasePermissionsIT.initCluster(true);
}
-
+
@Test
public void testPermissionsCachedWithAccessChecker() throws Throwable {
if (!isNamespaceMapped) {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
index e35e159..2d8c475 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TableSnapshotReadsMapReduceIT.java
@@ -60,7 +60,7 @@ import org.slf4j.LoggerFactory;
public class TableSnapshotReadsMapReduceIT extends BaseUniqueNamesOwnClusterIT {
- private static final Logger logger = LoggerFactory.getLogger(TableSnapshotReadsMapReduceIT.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(TableSnapshotReadsMapReduceIT.class);
private final static String SNAPSHOT_NAME = "FOO";
private static final String FIELD1 = "FIELD1";
@@ -222,7 +222,7 @@ public class TableSnapshotReadsMapReduceIT extends BaseUniqueNamesOwnClusterIT {
if (hRegionInfoList.size() >= expectedRegions) {
break;
}
- logger.info("Sleeping for 1000 ms while waiting for " + hbaseTableName.getNameAsString() + " to split");
+ LOGGER.info("Sleeping for 1000 ms while waiting for " + hbaseTableName.getNameAsString() + " to split");
Thread.sleep(1000);
}
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
index cf48f5f..bdeb735 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexRebuildIncrementDisableCountIT.java
@@ -56,7 +56,8 @@ import org.slf4j.LoggerFactory;
import com.google.common.collect.Maps;
public class IndexRebuildIncrementDisableCountIT extends BaseUniqueNamesOwnClusterIT {
- private static final Logger LOGGER = LoggerFactory.getLogger(IndexRebuildIncrementDisableCountIT.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(IndexRebuildIncrementDisableCountIT.class);
private static long pendingDisableCount = 0;
private static String ORG_PREFIX = "ORG";
private static Result pendingDisableCountResult = null;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
index 2b39acb..aea9759 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/InvalidIndexStateClientSideIT.java
@@ -54,7 +54,8 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class InvalidIndexStateClientSideIT extends ParallelStatsDisabledIT {
- private static final Logger LOGGER = LoggerFactory.getLogger(InvalidIndexStateClientSideIT.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(InvalidIndexStateClientSideIT.class);
@Test
public void testCachedConnections() throws Throwable {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index d6e467a..4e3a309 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -635,8 +635,6 @@ public class MutableIndexIT extends ParallelStatsDisabledIT {
"CREATE " + (localIndex ? "LOCAL" : "")+" INDEX " + indexName + " ON " + tableName + "(v1"+(isReverse?" DESC":"")+") include (k3)");
}
-
-
private List<HRegionInfo> splitDuringScan(Connection conn1, String tableName, String indexName, String[] strings, HBaseAdmin admin, boolean isReverse)
throws SQLException, IOException, InterruptedException {
ResultSet rs;
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
index dc9de81..3c81879 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/execute/UpsertSelectOverlappingBatchesIT.java
@@ -66,7 +66,8 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterIT {
- private static final Logger logger = LoggerFactory.getLogger(UpsertSelectOverlappingBatchesIT.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(UpsertSelectOverlappingBatchesIT.class);
private Properties props;
private static volatile String dataTable;
private String index;
@@ -129,11 +130,11 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
}
catch (Exception e) {
if (ExceptionUtils.indexOfThrowable(e, InterruptedException.class) != -1) {
- logger.info("Interrupted, exiting", e);
+ LOGGER.info("Interrupted, exiting", e);
Thread.currentThread().interrupt();
return;
}
- logger.error("Hit exception while writing", e);
+ LOGGER.error("Hit exception while writing", e);
}
}
}};
@@ -214,17 +215,17 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
try {
List<HRegionInfo> regions = admin.getTableRegions(dataTN);
if (regions.size() > 1) {
- logger.info("Found region was split");
+ LOGGER.info("Found region was split");
return true;
}
if (regions.size() == 0) {
// This happens when region in transition or closed
- logger.info("No region returned");
+ LOGGER.info("No region returned");
return false;
}
;
HRegionInfo hRegion = regions.get(0);
- logger.info("Attempting to split region");
+ LOGGER.info("Attempting to split region");
admin.splitRegion(hRegion.getRegionName(), Bytes.toBytes(2));
return false;
} catch (NotServingRegionException nsre) {
@@ -263,7 +264,7 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
final HBaseAdmin admin = utility.getHBaseAdmin();
final HRegionInfo dataRegion =
admin.getTableRegions(TableName.valueOf(dataTable)).get(0);
- logger.info("Closing data table region");
+ LOGGER.info("Closing data table region");
admin.closeRegion(dataRs.getServerName(), dataRegion);
// make sure the region is offline
utility.waitFor(60000L, 1000, new Waiter.Predicate<Exception>() {
@@ -273,11 +274,11 @@ public class UpsertSelectOverlappingBatchesIT extends BaseUniqueNamesOwnClusterI
admin.getOnlineRegions(dataRs.getServerName());
for (HRegionInfo onlineRegion : onlineRegions) {
if (onlineRegion.equals(dataRegion)) {
- logger.info("Data region still online");
+ LOGGER.info("Data region still online");
return false;
}
}
- logger.info("Region is no longer online");
+ LOGGER.info("Region is no longer online");
return true;
}
});
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
index b920bf4..aaf533e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/FailForUnsupportedHBaseVersionsIT.java
@@ -46,7 +46,8 @@ import org.slf4j.LoggerFactory;
*/
@Category(NeedsOwnMiniClusterTest.class)
public class FailForUnsupportedHBaseVersionsIT {
- private static final Logger LOGGER = LoggerFactory.getLogger(FailForUnsupportedHBaseVersionsIT.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(FailForUnsupportedHBaseVersionsIT.class);
/**
* We don't support WAL Compression for HBase < 0.94.9, so we shouldn't even allow the server
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 8d5754f..48a02e6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -231,7 +231,8 @@ public class PhoenixMetricsIT extends BasePhoenixMetricsIT {
long expectedValue = value;
long actualValue = metric.value().longValue();
if (expectedValue != actualValue) {
- LOGGER.warn("Metric from Hadoop Sink: " + metric.name() + " didn't match expected.");
+ LOGGER.warn("Metric from Hadoop Sink: "
+ + metric.name() + " didn't match expected.");
return false;
}
expectedMetrics.remove(metric.name());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryIT.java
index 723502b..0d5d784 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/util/CoprocessorHConnectionTableFactoryIT.java
@@ -41,7 +41,8 @@ import org.slf4j.LoggerFactory;
*/
public class CoprocessorHConnectionTableFactoryIT extends BaseUniqueNamesOwnClusterIT {
private static String ORG_PREFIX = "ORG";
- private static final Logger LOGGER = LoggerFactory.getLogger(CoprocessorHConnectionTableFactoryIT.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(CoprocessorHConnectionTableFactoryIT.class);
@BeforeClass
public static final void doSetup() throws Exception {
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
index bf33992..16e15a7 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/ipc/PhoenixRpcSchedulerFactory.java
@@ -38,7 +38,8 @@ import com.google.common.base.Preconditions;
*/
public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
- private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(PhoenixRpcSchedulerFactory.class);
private static final String VERSION_TOO_OLD_FOR_INDEX_RPC =
"Running an older version of HBase (less than 0.98.4), Phoenix index RPC handling cannot be enabled.";
@@ -64,7 +65,8 @@ public class PhoenixRpcSchedulerFactory implements RpcSchedulerFactory {
// validate index and metadata priorities are not the same
Preconditions.checkArgument(indexPriority != metadataPriority, "Index and Metadata priority must not be same "+ indexPriority);
- LOGGER.info("Using custom Phoenix Index RPC Handling with index rpc priority " + indexPriority + " and metadata rpc priority " + metadataPriority);
+ LOGGER.info("Using custom Phoenix Index RPC Handling with index rpc priority "
+ + indexPriority + " and metadata rpc priority " + metadataPriority);
PhoenixRpcScheduler scheduler =
new PhoenixRpcScheduler(conf, delegate, indexPriority, metadataPriority);
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 6f97cba..786eeee 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -67,7 +67,8 @@ import com.google.common.collect.Lists;
public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
private static final String LOCAL_INDEX_AUTOMATIC_REPAIR = "local.index.automatic.repair";
- public static final Logger LOGGER = LoggerFactory.getLogger(IndexHalfStoreFileReaderGenerator.class);
+ public static final Logger LOGGER =
+ LoggerFactory.getLogger(IndexHalfStoreFileReaderGenerator.class);
@Override
public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
index 5f3e29b..a18ec6d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/GlobalCache.java
@@ -54,7 +54,7 @@ import com.google.common.cache.Weigher;
* @since 0.1
*/
public class GlobalCache extends TenantCacheImpl {
- private static final Logger logger = LoggerFactory.getLogger(GlobalCache.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(GlobalCache.class);
private static volatile GlobalCache INSTANCE;
private final Configuration config;
@@ -65,16 +65,20 @@ public class GlobalCache extends TenantCacheImpl {
public long clearTenantCache() {
long unfreedBytes = getMemoryManager().getMaxMemory() - getMemoryManager().getAvailableMemory();
- if (unfreedBytes != 0 && logger.isDebugEnabled()) {
- logger.debug("Found " + (getMemoryManager().getMaxMemory() - getMemoryManager().getAvailableMemory()) + " bytes not freed from global cache");
+ if (unfreedBytes != 0 && LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Found " + (getMemoryManager().getMaxMemory() -
+ getMemoryManager().getAvailableMemory()) +
+ " bytes not freed from global cache");
}
removeAllServerCache();
for (Map.Entry<ImmutableBytesWritable, TenantCache> entry : perTenantCacheMap.entrySet()) {
TenantCache cache = entry.getValue();
long unfreedTenantBytes = cache.getMemoryManager().getMaxMemory() - cache.getMemoryManager().getAvailableMemory();
- if (unfreedTenantBytes != 0 && logger.isDebugEnabled()) {
+ if (unfreedTenantBytes != 0 && LOGGER.isDebugEnabled()) {
ImmutableBytesWritable cacheId = entry.getKey();
- logger.debug("Found " + unfreedTenantBytes + " bytes not freed for tenant " + Bytes.toStringBinary(cacheId.get(), cacheId.getOffset(), cacheId.getLength()));
+ LOGGER.debug("Found " + unfreedTenantBytes + " bytes not freed for tenant " +
+ Bytes.toStringBinary(cacheId.get(), cacheId.getOffset(),
+ cacheId.getLength()));
}
unfreedBytes += unfreedTenantBytes;
cache.removeAllServerCache();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
index 2a0ecf6..d333377 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/ServerCacheClient.java
@@ -284,7 +284,10 @@ public class ServerCacheClient {
cacheUsingTable.getIndexType() == IndexType.LOCAL)) {
// Call RPC once per server
servers.add(entry);
- if (LOGGER.isDebugEnabled()) {LOGGER.debug(addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));}
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(addCustomAnnotations(
+ "Adding cache entry to be sent for " + entry, connection));
+ }
final byte[] key = getKeyInRegion(entry.getRegionInfo().getStartKey());
final HTableInterface htable = services.getTable(cacheUsingTable.getPhysicalName().getBytes());
closeables.add(htable);
@@ -311,10 +314,13 @@ public class ServerCacheClient {
}
}));
} else {
- if (LOGGER.isDebugEnabled()) {LOGGER.debug(addCustomAnnotations("NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection));}
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(addCustomAnnotations(
+ "NOT adding cache entry to be sent for " + entry +
+ " since one already exists for that entry", connection));
+ }
}
}
-
hashCacheSpec = new ServerCache(cacheId,servers,cachePtr, services, storeCacheOnClient);
// Execute in parallel
int timeoutMs = services.getProps().getInt(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
@@ -350,7 +356,10 @@ public class ServerCacheClient {
}
}
}
- if (LOGGER.isDebugEnabled()) {LOGGER.debug(addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));}
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(addCustomAnnotations("Cache " + cacheId +
+ " successfully added to servers.", connection));
+ }
return hashCacheSpec;
}
@@ -377,7 +386,8 @@ public class ServerCacheClient {
* to.
*/
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));
+ LOGGER.debug(addCustomAnnotations("Removing Cache " +
+ cacheId + " from servers.", connection));
}
for (HRegionLocation entry : locations) {
// Call once per server
@@ -420,13 +430,15 @@ public class ServerCacheClient {
remainingOnServers.remove(entry);
} catch (Throwable t) {
lastThrowable = t;
- LOGGER.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection),
- t);
+ LOGGER.error(addCustomAnnotations(
+ "Error trying to remove hash cache for " + entry,
+ connection), t);
}
}
}
if (!remainingOnServers.isEmpty()) {
- LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection),
+ LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for "
+ + remainingOnServers, connection),
lastThrowable);
}
} finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
index dc4c9e3..22a7050 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/TenantCacheImpl.java
@@ -50,7 +50,7 @@ import com.google.common.cache.RemovalNotification;
* @since 0.1
*/
public class TenantCacheImpl implements TenantCache {
- private static final Logger logger = LoggerFactory.getLogger(TenantCacheImpl.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(TenantCacheImpl.class);
private final int maxTimeToLiveMs;
private final int maxPersistenceTimeToLiveMs;
private final MemoryManager memoryManager;
@@ -199,7 +199,7 @@ public class TenantCacheImpl implements TenantCache {
}
synchronized private void evictInactiveEntries(long bytesNeeded) {
- logger.debug("Trying to evict inactive cache entries to free up " + bytesNeeded + " bytes");
+ LOGGER.debug("Trying to evict inactive cache entries to free up " + bytesNeeded + " bytes");
CacheEntry[] entries = getPersistentServerCaches().asMap().values().toArray(new CacheEntry[]{});
Arrays.sort(entries);
long available = this.getMemoryManager().getAvailableMemory();
@@ -208,7 +208,8 @@ public class TenantCacheImpl implements TenantCache {
ImmutableBytesPtr cacheId = entry.getCacheId();
getPersistentServerCaches().invalidate(cacheId);
available = this.getMemoryManager().getAvailableMemory();
- logger.debug("Evicted cache ID " + Bytes.toLong(cacheId.get()) + ", we now have " + available + " bytes available");
+ LOGGER.debug("Evicted cache ID " + Bytes.toLong(cacheId.get()) + ", we now have "
+ + available + " bytes available");
}
}
@@ -273,7 +274,8 @@ public class TenantCacheImpl implements TenantCache {
}
entry.decrementLiveQueryCount();
if (!entry.isLive()) {
- logger.debug("Cache ID " + Bytes.toLong(cacheId.get()) + " is no longer live, invalidate it");
+ LOGGER.debug("Cache ID " + Bytes.toLong(cacheId.get())
+ + " is no longer live, invalidate it");
getServerCaches().invalidate(cacheId);
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java
index a47cfdf..ec08ac3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillFile.java
@@ -42,7 +42,7 @@ import java.util.UUID;
*/
public class SpillFile implements Closeable {
- private static final Logger logger = LoggerFactory.getLogger(SpillFile.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(SpillFile.class);
// Default size for a single spillFile 2GB
private static final int SPILL_FILE_SIZE = Integer.MAX_VALUE;
// Page size for a spill file 4K
@@ -68,13 +68,13 @@ public class SpillFile implements Closeable {
Closeables.closeQuietly(rndFile);
if (file != null) {
- if (logger.isDebugEnabled()) {
- logger.debug("Deleting tempFile: " + file.getAbsolutePath());
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Deleting tempFile: " + file.getAbsolutePath());
}
try {
file.delete();
} catch (SecurityException e) {
- logger.warn("IOException thrown while closing Closeable." + e);
+ LOGGER.warn("IOException thrown while closing Closeable." + e);
}
}
}
@@ -104,8 +104,8 @@ public class SpillFile implements Closeable {
// Create temp file in temp dir or custom dir if provided
File tempFile = File.createTempFile(UUID.randomUUID().toString(),
null, spillFilesDirectory);
- if (logger.isDebugEnabled()) {
- logger.debug("Creating new SpillFile: " + tempFile.getAbsolutePath());
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Creating new SpillFile: " + tempFile.getAbsolutePath());
}
RandomAccessFile file = new RandomAccessFile(tempFile, "rw");
file.setLength(SPILL_FILE_SIZE);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
index dc0ae21..821dc6b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/cache/aggcache/SpillableGroupByCache.java
@@ -93,7 +93,7 @@ import org.slf4j.LoggerFactory;
public class SpillableGroupByCache implements GroupByCache {
- private static final Logger logger = LoggerFactory.getLogger(SpillableGroupByCache.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(SpillableGroupByCache.class);
// Min size of 1st level main memory cache in bytes --> lower bound
private static final int SPGBY_CACHE_MIN_SIZE = 4096; // 4K
@@ -148,13 +148,14 @@ public class SpillableGroupByCache implements GroupByCache {
try {
this.chunk = tenantCache.getMemoryManager().allocate(estSize);
} catch (InsufficientMemoryException ime) {
- logger.error("Requested Map size exceeds memory limit, please decrease max size via config paramter: "
+ LOGGER.error("Requested Map size exceeds memory limit, " +
+ "please decrease max size via config paramter: "
+ GROUPBY_MAX_CACHE_SIZE_ATTRIB);
throw ime;
}
- if (logger.isDebugEnabled()) {
- logger.debug("Instantiating LRU groupby cache of element size: " + maxCacheSize);
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Instantiating LRU groupby cache of element size: " + maxCacheSize);
}
// LRU cache implemented as LinkedHashMap with access order
@@ -240,8 +241,8 @@ public class SpillableGroupByCache implements GroupByCache {
if (rowAggregators == null) {
// No, key never spilled before, create a new tuple
rowAggregators = aggregators.newAggregators(env.getConfiguration());
- if (logger.isDebugEnabled()) {
- logger.debug("Adding new aggregate bucket for row key "
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Adding new aggregate bucket for row key "
+ Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()));
}
}
@@ -359,10 +360,11 @@ public class SpillableGroupByCache implements GroupByCache {
ImmutableBytesWritable key = ce.getKey();
Aggregator[] aggs = ce.getValue();
byte[] value = aggregators.toBytes(aggs);
- if (logger.isDebugEnabled()) {
- logger.debug("Adding new distinct group: "
- + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators "
- + aggs.toString() + " value = " + Bytes.toStringBinary(value));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Adding new distinct group: "
+ + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) +
+ " with aggregators " + aggs.toString() + " value = " +
+ Bytes.toStringBinary(value));
}
results.add(KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY,
SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 3bc15fd..97ac0f6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -107,7 +107,7 @@ import com.google.common.collect.Lists;
* @since 0.1
*/
public class FromCompiler {
- private static final Logger logger = LoggerFactory.getLogger(FromCompiler.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(FromCompiler.class);
public static final ColumnResolver EMPTY_TABLE_RESOLVER = new ColumnResolver() {
@@ -646,8 +646,13 @@ public class FromCompiler {
timeStamp += tsAddition;
}
TableRef tableRef = new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty());
- if (logger.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
- logger.debug(LogUtil.addCustomAnnotations("Re-resolved stale table " + fullTableName + " with seqNum " + tableRef.getTable().getSequenceNumber() + " at timestamp " + tableRef.getTable().getTimeStamp() + " with " + tableRef.getTable().getColumns().size() + " columns: " + tableRef.getTable().getColumns(), connection));
+ if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
+ LOGGER.debug(LogUtil.addCustomAnnotations(
+ "Re-resolved stale table " + fullTableName + " with seqNum "
+ + tableRef.getTable().getSequenceNumber() + " at timestamp "
+ + tableRef.getTable().getTimeStamp() + " with "
+ + tableRef.getTable().getColumns().size() + " columns: "
+ + tableRef.getTable().getColumns(), connection));
}
return tableRef;
}
@@ -695,8 +700,10 @@ public class FromCompiler {
timeStamp += tsAddition;
}
- if (logger.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
- logger.debug(LogUtil.addCustomAnnotations("Re-resolved stale function " + functionNames.toString() + "at timestamp " + timeStamp, connection));
+ if (LOGGER.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
+ LOGGER.debug(LogUtil.addCustomAnnotations(
+ "Re-resolved stale function " + functionNames.toString() +
+ "at timestamp " + timeStamp, connection));
}
return functionsFound;
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index aefe916..3ea04a9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -88,7 +88,7 @@ import com.google.common.collect.Maps;
* @since 0.1
*/
public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
- private static final Logger logger = LoggerFactory
+ private static final Logger LOGGER = LoggerFactory
.getLogger(GroupedAggregateRegionObserver.class);
public static final int MIN_DISTINCT_VALUES = 100;
@@ -278,8 +278,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
// If Aggregators not found for this distinct
// value, clone our original one (we need one
// per distinct value)
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Adding new aggregate bucket for row key "
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations("Adding new aggregate bucket for row key "
+ Bytes.toStringBinary(key.get(), key.getOffset(),
key.getLength()), customAnnotations));
}
@@ -313,8 +313,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
// Generate byte array of Aggregators and set as value of row
byte[] value = aggregators.toBytes(rowAggregators);
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Adding new distinct group: "
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations("Adding new distinct group: "
+ Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength())
+ " with aggregators " + Arrays.asList(rowAggregators).toString()
+ " value = " + Bytes.toStringBinary(value), customAnnotations));
@@ -382,9 +382,11 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan,
final RegionScanner scanner, final List<Expression> expressions,
final ServerAggregators aggregators, long limit) throws IOException {
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan
- + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations(
+ "Grouped aggregation over unordered rows with scan " + scan
+ + ", group by " + expressions + ", aggregators " + aggregators,
+ ScanUtil.getCustomAnnotations(scan)));
}
RegionCoprocessorEnvironment env = c.getEnvironment();
Configuration conf = env.getConfiguration();
@@ -410,8 +412,10 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
try {
boolean hasMore;
Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations(
+ "Spillable groupby enabled: " + spillableEnabled,
+ ScanUtil.getCustomAnnotations(scan)));
}
Region region = c.getEnvironment().getRegion();
boolean acquiredLock = false;
@@ -466,9 +470,11 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
final Scan scan, final RegionScanner scanner, final List<Expression> expressions,
final ServerAggregators aggregators, final long limit) throws IOException {
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over ordered rows with scan " + scan + ", group by "
- + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations(
+ "Grouped aggregation over ordered rows with scan " + scan + ", group by "
+ + expressions + ", aggregators " + aggregators,
+ ScanUtil.getCustomAnnotations(scan)));
}
final Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
final boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(minMaxQualifiers);
@@ -508,8 +514,8 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
aggBoundary = currentKey != null && currentKey.compareTo(key) != 0;
if (!aggBoundary) {
aggregators.aggregate(rowAggregators, result);
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations(
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations(
"Row passed filters: " + kvs
+ ", aggregated values: "
+ Arrays.asList(rowAggregators),
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index cc24511..a49beb8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -297,7 +297,7 @@ import com.google.protobuf.Service;
*/
@SuppressWarnings("deprecation")
public class MetaDataEndpointImpl extends MetaDataProtocol implements CoprocessorService, Coprocessor {
- private static final Logger logger = LoggerFactory.getLogger(MetaDataEndpointImpl.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataEndpointImpl.class);
// Column to track tables that have been upgraded based on PHOENIX-2067
public static final String ROW_KEY_ORDER_OPTIMIZABLE = "ROW_KEY_ORDER_OPTIMIZABLE";
@@ -571,7 +571,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
this.allowSystemCatalogRollback = config.getBoolean(QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK,
QueryServicesOptions.DEFAULT_ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK);
- logger.info("Starting Tracing-Metrics Systems");
+ LOGGER.info("Starting Tracing-Metrics Systems");
// Start the phoenix trace collection
Tracing.addTraceMetricsSource();
Metrics.ensureConfigured();
@@ -651,7 +651,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
done.run(builder.build());
} catch (Throwable t) {
- logger.error("getTable failed", t);
+ LOGGER.error("getTable failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
}
@@ -1025,8 +1025,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// the PTable added to the cache doesn't include parent columns as we always call
// combine columns after looking up the PTable from the cache
&& !skipAddingIndexes) {
- if (logger.isDebugEnabled()) {
- logger.debug("Caching table "
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Caching table "
+ Bytes.toStringBinary(cacheKey.get(), cacheKey.getOffset(),
cacheKey.getLength())
+ " at seqNum " + newTable.getSequenceNumber()
@@ -2222,7 +2222,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
if (tableType == PTableType.INDEX || allowSystemCatalogRollback) {
result = checkTableKeyInRegion(parentTableKey, region);
if (result != null) {
- logger.error("Unable to lock parentTableKey "+Bytes.toStringBinary(parentTableKey));
+ LOGGER.error("Unable to lock parentTableKey "+Bytes.toStringBinary(parentTableKey));
// if allowSystemCatalogRollback is true and we can't lock the parentTableKey (because
// SYSTEM.CATALOG already split) return UNALLOWED_TABLE_MUTATION so that the client
// knows the create statement failed
@@ -2423,10 +2423,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
else {
String msg = "Found unexpected mutations while creating "+fullTableName;
- logger.error(msg);
+ LOGGER.error(msg);
for (Mutation m : remoteMutations) {
- logger.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
- logger.debug("Mutation family cell map : " + m.getFamilyCellMap());
+ LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
+ LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap());
}
throw new IllegalStateException(msg);
}
@@ -2461,7 +2461,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
releaseRowLocks(region,locks);
}
} catch (Throwable t) {
- logger.error("createTable failed", t);
+ LOGGER.error("createTable failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(fullTableName, t));
}
@@ -2510,8 +2510,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
byte[] viewTenantId = viewInfo.getTenantId();
byte[] viewSchemaName = viewInfo.getSchemaName();
byte[] viewName = viewInfo.getTableName();
- if (logger.isDebugEnabled()) {
- logger.debug("dropChildViews :" + Bytes.toString(schemaName) + "." + Bytes.toString(tableName) +
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("dropChildViews :" + Bytes.toString(schemaName) + "." + Bytes.toString(tableName) +
" -> " + Bytes.toString(viewSchemaName) + "." + Bytes.toString(viewName) +
"with tenant id :" + Bytes.toString(viewTenantId));
}
@@ -2528,7 +2528,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
new DropTableStatement(viewTableName, PTableType.VIEW, true, true, true));
}
catch (TableNotFoundException e) {
- logger.info("Ignoring view "+viewTableName+" as it has already been dropped");
+ LOGGER.info("Ignoring view "+viewTableName+" as it has already been dropped");
}
}
}
@@ -2689,10 +2689,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
if (!remoteMutations.isEmpty()) {
// while dropping a table all the mutations should be local
String msg = "Found unexpected mutations while dropping table "+SchemaUtil.getTableName(schemaName, tableName);
- logger.error(msg);
+ LOGGER.error(msg);
for (Mutation m : remoteMutations) {
- logger.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
- logger.debug("Mutation family cell map : " + m.getFamilyCellMap());
+ LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
+ LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap());
}
throw new IllegalStateException(msg);
}
@@ -2726,7 +2726,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
releaseRowLocks(region, locks);
}
} catch (Throwable t) {
- logger.error("dropTable failed", t);
+ LOGGER.error("dropTable failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
}
@@ -2759,7 +2759,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
SchemaUtil.getPhysicalTableName(systemTableName, env.getConfiguration()))) {
hTable.batch(remoteMutations);
} catch (Throwable t) {
- logger.error("Unable to write mutations to " + Bytes.toString(systemTableName), t);
+ LOGGER.error("Unable to write mutations to " + Bytes.toString(systemTableName), t);
builder.setReturnCode(mutationCode);
builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
return builder.build();
@@ -2836,7 +2836,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
Task.addTask(conn, PTable.TaskType.DROP_CHILD_VIEWS, Bytes.toString(tenantId),
Bytes.toString(schemaName), Bytes.toString(tableName), this.accessCheckEnabled);
} catch (Throwable t) {
- logger.error("Adding a task to drop child views failed!", t);
+ LOGGER.error("Adding a task to drop child views failed!", t);
}
}
}
@@ -2927,12 +2927,12 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
invalidateList.add(cacheKey);
long clientTimeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
PTable table = getTableFromCache(cacheKey, clientTimeStamp, clientVersion, false, false, null);
- if (logger.isDebugEnabled()) {
+ if (LOGGER.isDebugEnabled()) {
if (table == null) {
- logger.debug("Table " + Bytes.toStringBinary(key)
+ LOGGER.debug("Table " + Bytes.toStringBinary(key)
+ " not found in cache. Will build through scan");
} else {
- logger.debug("Table " + Bytes.toStringBinary(key)
+ LOGGER.debug("Table " + Bytes.toStringBinary(key)
+ " found in cache with timestamp " + table.getTimeStamp()
+ " seqNum " + table.getSequenceNumber());
}
@@ -2944,7 +2944,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// found
table = buildDeletedTable(key, cacheKey, region, clientTimeStamp);
if (table != null) {
- logger.info("Found newer table deleted as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
+ LOGGER.info("Found newer table deleted as of " + table.getTimeStamp() + " versus client timestamp of " + clientTimeStamp);
return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND,
EnvironmentEdgeManager.currentTimeMillis(), null);
}
@@ -2952,7 +2952,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
EnvironmentEdgeManager.currentTimeMillis(), null);
}
if (table.getTimeStamp() >= clientTimeStamp) {
- logger.info("Found newer table as of " + table.getTimeStamp() + " versus client timestamp of "
+ LOGGER.info("Found newer table as of " + table.getTimeStamp() + " versus client timestamp of "
+ clientTimeStamp);
return new MetaDataMutationResult(MutationCode.NEWER_TABLE_FOUND,
EnvironmentEdgeManager.currentTimeMillis(), table);
@@ -2961,15 +2961,15 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
long expectedSeqNum = MetaDataUtil.getSequenceNumber(tableMetadata) - 1; // lookup TABLE_SEQ_NUM in
// tableMetaData
- if (logger.isDebugEnabled()) {
- logger.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum "
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("For table " + Bytes.toStringBinary(key) + " expecting seqNum "
+ expectedSeqNum + " and found seqNum " + table.getSequenceNumber()
+ " with " + table.getColumns().size() + " columns: "
+ table.getColumns());
}
if (expectedSeqNum != table.getSequenceNumber()) {
- if (logger.isDebugEnabled()) {
- logger.debug("For table " + Bytes.toStringBinary(key)
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("For table " + Bytes.toStringBinary(key)
+ " returning CONCURRENT_TABLE_MUTATION due to unexpected seqNum");
}
return new MetaDataMutationResult(MutationCode.CONCURRENT_TABLE_MUTATION,
@@ -3028,10 +3028,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
}
else {
String msg = "Found unexpected mutations while adding or dropping column to "+fullTableName;
- logger.error(msg);
+ LOGGER.error(msg);
for (Mutation m : remoteMutations) {
- logger.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
- logger.debug("Mutation family cell map : " + m.getFamilyCellMap());
+ LOGGER.debug("Mutation rowkey : " + Bytes.toStringBinary(m.getRow()));
+ LOGGER.debug("Mutation family cell map : " + m.getFamilyCellMap());
}
throw new IllegalStateException(msg);
}
@@ -3169,7 +3169,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
// which will deadlock)
// we also don't need to include indexes
if (view == null) {
- logger.warn("Found invalid tenant view row in SYSTEM.CATALOG with tenantId:"
+ LOGGER.warn("Found invalid tenant view row in SYSTEM.CATALOG with tenantId:"
+ Bytes.toString(tenantId) + ", schema:" + Bytes.toString(schema)
+ ", table:" + Bytes.toString(table));
continue;
@@ -3477,14 +3477,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
EnvironmentEdgeManager.currentTimeMillis(), null);
}
else if (request.getClientVersion()< MIN_SPLITTABLE_SYSTEM_CATALOG ) {
- logger.error(
+ LOGGER.error(
"Unable to add a column as the client is older than "
+ MIN_SPLITTABLE_SYSTEM_CATALOG);
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
EnvironmentEdgeManager.currentTimeMillis(), null);
}
else if (allowSystemCatalogRollback) {
- logger.error("Unable to add a column as the "
+ LOGGER.error("Unable to add a column as the "
+ QueryServices.ALLOW_SPLITTABLE_SYSTEM_CATALOG_ROLLBACK
+ " config is set to true");
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
@@ -3612,7 +3612,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(MetaDataMutationResult.toProto(result));
}
} catch (Throwable e) {
- logger.error("Add column failed: ", e);
+ LOGGER.error("Add column failed: ", e);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException("Error when adding column: ", e));
}
@@ -3938,7 +3938,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(MetaDataMutationResult.toProto(result));
}
} catch (Throwable e) {
- logger.error("Drop column failed: ", e);
+ LOGGER.error("Drop column failed: ", e);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException("Error when dropping column: ", e));
}
@@ -4048,7 +4048,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
Configuration config = env.getConfiguration();
if (isTablesMappingEnabled
&& MetaDataProtocol.MIN_NAMESPACE_MAPPED_PHOENIX_VERSION > request.getClientVersion()) {
- logger.error("Old client is not compatible when" + " system tables are upgraded to map to namespace");
+ LOGGER.error("Old client is not compatible when" + " system tables are upgraded to map to namespace");
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
@@ -4065,7 +4065,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES, HConstants.LATEST_TIMESTAMP, null,
request.getClientVersion(), false, false, null);
} catch (Throwable t) {
- logger.error("loading system catalog table inside getVersion failed", t);
+ LOGGER.error("loading system catalog table inside getVersion failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
@@ -4339,7 +4339,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
rowLock.release();
}
} catch (Throwable t) {
- logger.error("updateIndexState failed", t);
+ LOGGER.error("updateIndexState failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
}
@@ -4353,7 +4353,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
private static MetaDataMutationResult checkTableKeyInRegion(byte[] key, Region region) {
MetaDataMutationResult result = checkKeyInRegion(key, region, MutationCode.TABLE_NOT_IN_REGION);
if (result!=null) {
- logger.error("Table rowkey " + Bytes.toStringBinary(key)
+ LOGGER.error("Table rowkey " + Bytes.toStringBinary(key)
+ " is not in the current region " + region.getRegionInfo());
}
return result;
@@ -4407,7 +4407,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
GlobalCache.getInstance(this.env).getMetaDataCache();
metaDataCache.invalidate(cacheKey);
} catch (Throwable t) {
- logger.error("clearTableFromCache failed", t);
+ LOGGER.error("clearTableFromCache failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
}
@@ -4504,7 +4504,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
done.run(builder.build());
return;
} catch (Throwable t) {
- logger.error("getFunctions failed", t);
+ LOGGER.error("getFunctions failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(functionNames.toString(), t));
}
@@ -4578,7 +4578,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
releaseRowLocks(region,locks);
}
} catch (Throwable t) {
- logger.error("createFunction failed", t);
+ LOGGER.error("createFunction failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(Bytes.toString(functionName), t));
}
@@ -4630,7 +4630,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
releaseRowLocks(region,locks);
}
} catch (Throwable t) {
- logger.error("dropFunction failed", t);
+ LOGGER.error("dropFunction failed", t);
ProtobufUtil.setControllerException(controller,
ServerUtil.createIOException(Bytes.toString(functionName), t));
}
@@ -4745,7 +4745,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
releaseRowLocks(region,locks);
}
} catch (Throwable t) {
- logger.error("Creating the schema" + schemaName + "failed", t);
+ LOGGER.error("Creating the schema" + schemaName + "failed", t);
ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t));
}
}
@@ -4789,7 +4789,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
releaseRowLocks(region,locks);
}
} catch (Throwable t) {
- logger.error("drop schema failed:", t);
+ LOGGER.error("drop schema failed:", t);
ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(schemaName, t));
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index 6340f73..4477451 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -181,8 +181,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
});
} catch (Exception exception) {
- LOGGER.warn("Exception while truncate stats..,"
- + " please check and delete stats manually inorder to get proper result with old client!!");
+ LOGGER.warn("Exception while truncate stats.., please check and delete stats " +
+ "manually inorder to get proper result with old client!!");
LOGGER.warn(exception.getStackTrace().toString());
} finally {
try {
@@ -204,7 +204,6 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
LOGGER.info("Failure Index Rebuild is skipped by configuration.");
return;
}
-
// Ensure we only run one of the index rebuilder tasks
if (ServerUtil.isKeyInRegion(SYSTEM_CATALOG_KEY, e.getEnvironment().getRegion())) {
try {
@@ -261,7 +260,9 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
IndexUtil.incrementCounterForIndex(conn, indexName, -PENDING_DISABLE_INACTIVE_STATE_COUNT);
indexesIncremented.add(index);
}catch(Exception e) {
- LOGGER.warn("Decrement of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT +" for index :" + index.getName().getString() + "of table: " + dataPTable.getName().getString(), e);
+ LOGGER.warn("Decrement of -" + PENDING_DISABLE_INACTIVE_STATE_COUNT
+ + " for index :" + index.getName().getString() + "of table: "
+ + dataPTable.getName().getString(), e);
}
}
return indexesIncremented;
@@ -339,7 +340,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
String dataTableFullName = SchemaUtil.getTableName(schemaName, dataTable);
if (onlyTheseTables != null && !onlyTheseTables.contains(dataTableFullName)) {
- LOGGER.debug("Could not find " + dataTableFullName + " in " + onlyTheseTables);
+ LOGGER.debug("Could not find " + dataTableFullName +
+ " in " + onlyTheseTables);
continue;
}
@@ -353,7 +355,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
PTable indexPTable = PhoenixRuntime.getTableNoCache(conn, indexTableFullName);
// Sanity check in case index was removed from table
if (!dataPTable.getIndexes().contains(indexPTable)) {
- LOGGER.debug(dataTableFullName + " does not contain " + indexPTable.getName().getString());
+ LOGGER.debug(dataTableFullName + " does not contain " +
+ indexPTable.getName().getString());
continue;
}
@@ -375,8 +378,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
// an index write fails.
if ((indexState == PIndexState.DISABLE || indexState == PIndexState.PENDING_ACTIVE)
&& !MetaDataUtil.tableRegionsOnline(this.env.getConfiguration(), indexPTable)) {
- LOGGER.debug("Index rebuild has been skipped because not all regions of index table="
- + indexPTable.getName() + " are online.");
+ LOGGER.debug("Index rebuild has been skipped because not all regions of" +
+ " index table=" + indexPTable.getName() + " are online.");
continue;
}
@@ -390,9 +393,10 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
try {
IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.DISABLE, 0l);
LOGGER.error("Unable to rebuild index " + indexTableFullName
- + ". Won't attempt again since index disable timestamp is older than current time by "
- + indexDisableTimestampThreshold
- + " milliseconds. Manual intervention needed to re-build the index");
+ + ". Won't attempt again since index disable timestamp is" +
+ " older than current time by " + indexDisableTimestampThreshold
+ + " milliseconds. Manual intervention needed to re-build" +
+ " the index");
} catch (Throwable ex) {
LOGGER.error(
"Unable to mark index " + indexTableFullName + " as disabled.", ex);
@@ -413,7 +417,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, null);
continue; // Must wait until clients start to do index maintenance again
} else if (indexState != PIndexState.INACTIVE && indexState != PIndexState.ACTIVE) {
- LOGGER.warn("Unexpected index state of " + indexTableFullName + "=" + indexState + ". Skipping partial rebuild attempt.");
+ LOGGER.warn("Unexpected index state of " + indexTableFullName + "="
+ + indexState + ". Skipping partial rebuild attempt.");
continue;
}
long currentTime = EnvironmentEdgeManager.currentTimeMillis();
@@ -422,7 +427,9 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_FORWARD_TIME);
// Wait until no failures have occurred in at least forwardOverlapDurationMs
if (indexStateCell.getTimestamp() + forwardOverlapDurationMs > currentTime) {
- LOGGER.debug("Still must wait " + (indexStateCell.getTimestamp() + forwardOverlapDurationMs - currentTime) + " before starting rebuild for " + indexTableFullName);
+ LOGGER.debug("Still must wait " + (indexStateCell.getTimestamp() +
+ forwardOverlapDurationMs - currentTime) +
+ " before starting rebuild for " + indexTableFullName);
continue; // Haven't waited long enough yet
}
Long upperBoundOfRebuild = indexStateCell.getTimestamp() + forwardOverlapDurationMs;
@@ -433,8 +440,9 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
indexesToPartiallyRebuild = Lists.newArrayListWithExpectedSize(dataPTable.getIndexes().size());
dataTableToIndexesMap.put(dataPTable, indexesToPartiallyRebuild);
}
- LOGGER.debug("We have found " + indexPTable.getIndexState() + " Index:" + indexPTable.getName()
- + " on data table:" + dataPTable.getName() + " which failed to be updated at "
+ LOGGER.debug("We have found " + indexPTable.getIndexState() + " Index:" +
+ indexPTable.getName() + " on data table:" + dataPTable.getName() +
+ " which failed to be updated at "
+ indexPTable.getIndexDisableTimestamp());
indexesToPartiallyRebuild.add(new Pair<PTable,Long>(indexPTable,upperBoundOfRebuild));
} while (hasMore);
@@ -469,7 +477,10 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
long disabledTimeStampVal = index.getIndexDisableTimestamp();
if (disabledTimeStampVal != 0) {
if (signOfDisableTimeStamp != 0 && signOfDisableTimeStamp != Long.signum(disabledTimeStampVal)) {
- LOGGER.warn("Found unexpected mix of signs with INDEX_DISABLE_TIMESTAMP for " + dataPTable.getName().getString() + " with " + indexesToPartiallyRebuild);
+ LOGGER.warn("Found unexpected mix of signs with " +
+ "INDEX_DISABLE_TIMESTAMP for " +
+ dataPTable.getName().getString() + " with " +
+ indexesToPartiallyRebuild);
}
signOfDisableTimeStamp = Long.signum(disabledTimeStampVal);
disabledTimeStampVal = Math.abs(disabledTimeStampVal);
@@ -492,8 +503,9 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
long scanBeginTime = Math.max(0, earliestDisableTimestamp - backwardOverlapDurationMs);
long scanEndTime = Math.min(latestUpperBoundTimestamp,
getTimestampForBatch(scanBeginTime,batchExecutedPerTableMap.get(dataPTable.getName())));
- LOGGER.info("Starting to build " + dataPTable + " indexes " + indexesToPartiallyRebuild
- + " from timestamp=" + scanBeginTime + " until " + scanEndTime);
+ LOGGER.info("Starting to build " + dataPTable + " indexes "
+ + indexesToPartiallyRebuild + " from timestamp=" +
+ scanBeginTime + " until " + scanEndTime);
TableRef tableRef = new TableRef(null, dataPTable, HConstants.LATEST_TIMESTAMP, false);
// TODO Need to set high timeout
@@ -512,18 +524,21 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
byte[] attribValue = ByteUtil.copyKeyBytesIfNecessary(indexMetaDataPtr);
dataTableScan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, attribValue);
ScanUtil.setClientVersion(dataTableScan, MetaDataProtocol.PHOENIX_VERSION);
- LOGGER.info("Starting to partially build indexes:" + indexesToPartiallyRebuild
- + " on data table:" + dataPTable.getName() + " with the earliest disable timestamp:"
+ LOGGER.info("Starting to partially build indexes:" +
+ indexesToPartiallyRebuild + " on data table:" +
+ dataPTable.getName() + " with the earliest disable timestamp:"
+ earliestDisableTimestamp + " till "
- + (scanEndTime == HConstants.LATEST_TIMESTAMP ? "LATEST_TIMESTAMP" : scanEndTime));
+ + (scanEndTime == HConstants.LATEST_TIMESTAMP ?
+ "LATEST_TIMESTAMP" : scanEndTime));
MutationState mutationState = plan.execute();
long rowCount = mutationState.getUpdateCount();
decrementIndexesPendingDisableCount(conn, dataPTable, indexesToPartiallyRebuild);
if (scanEndTime == latestUpperBoundTimestamp) {
- LOGGER.info("Rebuild completed for all inactive/disabled indexes in data table:"
- + dataPTable.getName());
+ LOGGER.info("Rebuild completed for all inactive/disabled indexes" +
+ " in data table:" + dataPTable.getName());
}
- LOGGER.info(" no. of datatable rows read in rebuilding process is " + rowCount);
+ LOGGER.info(" no. of datatable rows read in rebuilding process is "
+ + rowCount);
for (PTable indexPTable : indexesToPartiallyRebuild) {
String indexTableFullName = SchemaUtil.getTableName(
indexPTable.getSchemaName().getString(),
@@ -533,7 +548,8 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
IndexUtil.updateIndexState(conn, indexTableFullName, PIndexState.ACTIVE, 0L,
latestUpperBoundTimestamp);
batchExecutedPerTableMap.remove(dataPTable.getName());
- LOGGER.info("Making Index:" + indexPTable.getTableName() + " active after rebuilding");
+ LOGGER.info("Making Index:" + indexPTable.getTableName() +
+ " active after rebuilding");
} else {
// Increment timestamp so that client sees updated disable timestamp
IndexUtil.updateIndexState(conn, indexTableFullName, indexPTable.getIndexState(),
@@ -544,15 +560,18 @@ public class MetaDataRegionObserver extends BaseRegionObserver {
}
batchExecutedPerTableMap.put(dataPTable.getName(), ++noOfBatches);
LOGGER.info(
- "During Round-robin build: Successfully updated index disabled timestamp for "
+ "During Round-robin build: Successfully updated " +
+ "index disabled timestamp for "
+ indexTableFullName + " to " + scanEndTime);
}
} catch (SQLException e) {
- LOGGER.error("Unable to rebuild " + dataPTable + " index " + indexTableFullName, e);
+ LOGGER.error("Unable to rebuild " + dataPTable + " index " +
+ indexTableFullName, e);
}
}
} catch (Exception e) {
- LOGGER.error("Unable to rebuild " + dataPTable + " indexes " + indexesToPartiallyRebuild, e);
+ LOGGER.error("Unable to rebuild " + dataPTable + " indexes " +
+ indexesToPartiallyRebuild, e);
}
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
index c80070c..f1198ee 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -120,7 +120,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
this.accessCheckEnabled = conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
if (!this.accessCheckEnabled) {
- LOGGER.warn("PhoenixAccessController has been loaded with authorization checks disabled.");
+ LOGGER.warn(
+ "PhoenixAccessController has been loaded with authorization checks disabled.");
}
if (env instanceof PhoenixMetaDataControllerEnvironment) {
this.env = (PhoenixMetaDataControllerEnvironment)env;
@@ -595,7 +596,8 @@ public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
}
}
} else if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("No permissions found for table=" + table + " or namespace=" + table.getNamespaceAsString());
+ LOGGER.debug("No permissions found for table=" +
+ table + " or namespace=" + table.getNamespaceAsString());
}
return false;
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 5595dda..b9a0f11 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -206,7 +206,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
private int scansReferenceCount = 0;
@GuardedBy("lock")
private boolean isRegionClosingOrSplitting = false;
- private static final Logger logger = LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(UngroupedAggregateRegionObserver.class);
private KeyValueBuilder kvBuilder;
private Configuration upsertSelectConfig;
private Configuration compactionConfig;
@@ -279,7 +279,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
}
}
// TODO: should we use the one that is all or none?
- logger.debug("Committing batch of " + mutations.size() + " mutations for " + region.getRegionInfo().getTable().getNameAsString());
+ LOGGER.debug("Committing batch of " + mutations.size() + " mutations for " + region.getRegionInfo().getTable().getNameAsString());
region.batchMutate(mutations.toArray(mutationArray), HConstants.NO_NONCE, HConstants.NO_NONCE);
}
@@ -305,7 +305,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
return;
}
- logger.debug("Committing batch of " + mutations.size() + " mutations for " + table);
+ LOGGER.debug("Committing batch of " + mutations.size() + " mutations for " + table);
try {
table.batch(mutations);
} catch (InterruptedException e) {
@@ -433,7 +433,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
byte[] descRowKeyTableBytes = scan.getAttribute(UPGRADE_DESC_ROW_KEY);
boolean isDescRowKeyOrderUpgrade = descRowKeyTableBytes != null;
if (isDescRowKeyOrderUpgrade) {
- logger.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
+ LOGGER.debug("Upgrading row key for " + region.getRegionInfo().getTable().getNameAsString());
projectedTable = deserializeTable(descRowKeyTableBytes);
try {
writeToTable = PTableImpl.builderWithColumns(projectedTable,
@@ -552,8 +552,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
rowAggregators = aggregators.getAggregators();
Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations("Starting ungrouped coprocessor scan " + scan + " "+region.getRegionInfo(), ScanUtil.getCustomAnnotations(scan)));
}
boolean useIndexProto = true;
byte[] indexMaintainersPtr = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
@@ -832,7 +832,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
synchronized (lock) {
scansReferenceCount--;
if (scansReferenceCount < 0) {
- logger.warn(
+ LOGGER.warn(
"Scan reference count went below zero. Something isn't correct. Resetting it back to zero");
scansReferenceCount = 0;
}
@@ -844,7 +844,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
try {
targetHTable.close();
} catch (IOException e) {
- logger.error("Closing table: " + targetHTable + " failed: ", e);
+ LOGGER.error("Closing table: " + targetHTable + " failed: ", e);
}
}
} finally {
@@ -855,8 +855,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
}
}
}
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan, ScanUtil.getCustomAnnotations(scan)));
}
final boolean hadAny = hasAny;
@@ -1037,8 +1037,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
} catch (Exception e) {
// If we can't reach the stats table, don't interrupt the normal
// compaction operation, just log a warning.
- if (logger.isWarnEnabled()) {
- logger.warn("Unable to collect stats for " + table, e);
+ if (LOGGER.isWarnEnabled()) {
+ LOGGER.warn("Unable to collect stats for " + table, e);
}
}
return internalScanner;
@@ -1134,7 +1134,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
}
}
} catch (IOException e) {
- logger.error("IOException during rebuilding: " + Throwables.getStackTraceAsString(e));
+ LOGGER.error("IOException during rebuilding: " + Throwables.getStackTraceAsString(e));
throw e;
} finally {
region.closeRegionOperation();
@@ -1194,7 +1194,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
}
} else {
rowCount = CONCURRENT_UPDATE_STATS_ROW_COUNT;
- logger.info("UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region "
+ LOGGER.info("UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region "
+ region.getRegionInfo().getRegionNameAsString());
}
byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
@@ -1291,18 +1291,18 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
}
return compactionRunning ? COMPACTION_UPDATE_STATS_ROW_COUNT : rowCount;
} catch (IOException e) {
- logger.error("IOException in update stats: " + Throwables.getStackTraceAsString(e));
+ LOGGER.error("IOException in update stats: " + Throwables.getStackTraceAsString(e));
throw e;
} finally {
try {
if (noErrors && !compactionRunning) {
statsCollector.updateStatistics(region, scan);
- logger.info("UPDATE STATISTICS finished successfully for scanner: "
+ LOGGER.info("UPDATE STATISTICS finished successfully for scanner: "
+ innerScanner + ". Number of rows scanned: " + rowCount
+ ". Time: " + (System.currentTimeMillis() - startTime));
}
if (compactionRunning) {
- logger.info("UPDATE STATISTICS stopped in between because major compaction was running for region "
+ LOGGER.info("UPDATE STATISTICS stopped in between because major compaction was running for region "
+ region.getRegionInfo().getRegionNameAsString());
}
} finally {
@@ -1450,7 +1450,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
// FIXME need to handle views and indexes on views as well
for (PTable index : indexes) {
if (index.getIndexDisableTimestamp() != 0) {
- logger.info(
+ LOGGER.info(
"Modifying major compaction scanner to retain deleted cells for a table with disabled index: "
+ fullTableName);
Scan scan = new Scan();
@@ -1470,10 +1470,10 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
}
} catch (Exception e) {
if (e instanceof TableNotFoundException) {
- logger.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName);
+ LOGGER.debug("Ignoring HBase table that is not a Phoenix table: " + fullTableName);
// non-Phoenix HBase tables won't be found, do nothing
} else {
- logger.error("Unable to modify compaction scanner to retain deleted cells for a table with disabled Index; "
+ LOGGER.error("Unable to modify compaction scanner to retain deleted cells for a table with disabled Index; "
+ fullTableName,
e);
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
index e8d5aed..79d90f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/DropChildViewsTask.java
@@ -61,7 +61,8 @@ public class DropChildViewsTask extends BaseTask {
return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, "");
} else if (System.currentTimeMillis() < timeMaxInterval + timestamp.getTime()) {
// skip this task as it has not been expired and its parent table has not been dropped yet
- LOGGER.info("Skipping a child view drop task. The parent table has not been dropped yet : " +
+ LOGGER.info("Skipping a child view drop task. " +
+ "The parent table has not been dropped yet : " +
taskRecord.getSchemaName() + "." + taskRecord.getTableName() +
" with tenant id " + (tenantId == null ? " IS NULL" : tenantId) +
" and timestamp " + timestamp.toString());
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
index c5aaa99..0069592 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/tasks/IndexRebuildTask.java
@@ -166,7 +166,8 @@ public class IndexRebuildTask extends BaseTask {
if (job != null && job.isComplete()) {
if (job.isSuccessful()) {
- LOGGER.warn("IndexRebuildTask checkCurrentResult job is successful " + taskRecord.getTableName());
+ LOGGER.warn("IndexRebuildTask checkCurrentResult job is successful "
+ + taskRecord.getTableName());
return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.SUCCESS, "");
} else {
return new TaskRegionObserver.TaskResult(TaskRegionObserver.TaskResultCode.FAIL,
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
index fb722d3..5d4d1d3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/AggregatePlan.java
@@ -90,7 +90,7 @@ public class AggregatePlan extends BaseQueryPlan {
private final Expression having;
private List<KeyRange> splits;
private List<List<Scan>> scans;
- private static final Logger logger = LoggerFactory.getLogger(AggregatePlan.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(AggregatePlan.class);
private boolean isSerial;
private OrderBy actualOutputOrderBy;
@@ -112,7 +112,7 @@ public class AggregatePlan extends BaseQueryPlan {
boolean hasSerialHint = statement.getHint().hasHint(HintNode.Hint.SERIAL);
boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table.getTable(), orderBy, context);
if (hasSerialHint && !canBeExecutedSerially) {
- logger.warn("This query cannot be executed serially. Ignoring the hint");
+ LOGGER.warn("This query cannot be executed serially. Ignoring the hint");
}
this.isSerial = hasSerialHint && canBeExecutedSerially;
this.actualOutputOrderBy = convertActualOutputOrderBy(orderBy, groupBy, context);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index d33dd2f..caa60a0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -358,12 +358,14 @@ public abstract class BaseQueryPlan implements QueryPlan {
}
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
+ LOGGER.debug(LogUtil.addCustomAnnotations(
+ "Scan ready for iteration: " + scan, connection));
}
ResultIterator iterator = newIterator(scanGrouper, scan, caches);
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
+ LOGGER.debug(LogUtil.addCustomAnnotations(
+ "Iterator ready: " + iterator, connection));
}
// wrap the iterator so we start/end tracing as we expect
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
index 2117d22..e8f761a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/HashJoinPlan.java
@@ -553,7 +553,8 @@ public class HashJoinPlan extends DelegateQueryPlan {
} else {
cacheId = Bytes.toBytes(RANDOM.nextLong());
}
- LOGGER.debug("Using cache ID " + Hex.encodeHexString(cacheId) + " for " + queryString);
+ LOGGER.debug("Using cache ID " + Hex.encodeHexString(cacheId) +
+ " for " + queryString);
if (cache == null) {
LOGGER.debug("Making RPC to add cache " + Hex.encodeHexString(cacheId));
cache = parent.hashClient.addHashCache(ranges, cacheId, iterator,
@@ -566,7 +567,8 @@ public class HashJoinPlan extends DelegateQueryPlan {
- parent.firstJobEndTime.get()) > parent.maxServerCacheTimeToLive) {
LOGGER.warn(addCustomAnnotations(
"Hash plan [" + index
- + "] execution seems too slow. Earlier hash cache(s) might have expired on servers.",
+ + "] execution seems too slow. Earlier" +
+ " hash cache(s) might have expired on servers.",
parent.delegate.getContext().getConnection()));
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
index 6b3f1d4..9ad6172 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/MutationState.java
@@ -123,7 +123,7 @@ import com.google.common.collect.Sets;
* Tracks the uncommitted state
*/
public class MutationState implements SQLCloseable {
- private static final Logger logger = LoggerFactory.getLogger(MutationState.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(MutationState.class);
private static final int[] EMPTY_STATEMENT_INDEX_ARRAY = new int[0];
private static final int MAX_COMMIT_RETRIES = 3;
@@ -967,7 +967,7 @@ public class MutationState implements SQLCloseable {
sendMutations(verifiedOrDeletedIndexMutations.entrySet().iterator(), span, indexMetaDataPtr);
} catch (SQLException ex) {
// TODO: add a metric here
- logger.warn(
+ LOGGER.warn(
"Ignoring exception that happened during setting index verified value to verified=TRUE "
+ verifiedOrDeletedIndexMutations.toString(),
ex);
@@ -1096,8 +1096,8 @@ public class MutationState implements SQLCloseable {
itrListMutation.remove();
batchCount++;
- if (logger.isDebugEnabled())
- logger.debug("Sent batch of " + mutationBatch.size() + " for "
+ if (LOGGER.isDebugEnabled())
+ LOGGER.debug("Sent batch of " + mutationBatch.size() + " for "
+ Bytes.toString(htableName));
}
child.stop();
@@ -1130,7 +1130,7 @@ public class MutationState implements SQLCloseable {
// If it fails again, we don't retry.
String msg = "Swallowing exception and retrying after clearing meta cache on connection. "
+ inferredE;
- logger.warn(LogUtil.addCustomAnnotations(msg, connection));
+ LOGGER.warn(LogUtil.addCustomAnnotations(msg, connection));
connection.getQueryServices().clearTableRegionCache(htableName);
// add a new child span as this one failed
@@ -1368,8 +1368,8 @@ public class MutationState implements SQLCloseable {
finishSuccessful = true;
}
} catch (SQLException e) {
- if (logger.isInfoEnabled())
- logger.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer()
+ if (LOGGER.isInfoEnabled())
+ LOGGER.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer()
+ " with retry count of " + retryCount);
retryCommit = (e.getErrorCode() == SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION
.getErrorCode() && retryCount < MAX_COMMIT_RETRIES);
@@ -1383,9 +1383,9 @@ public class MutationState implements SQLCloseable {
if (!finishSuccessful) {
try {
phoenixTransactionContext.abort();
- if (logger.isInfoEnabled()) logger.info("Abort successful");
+ if (LOGGER.isInfoEnabled()) LOGGER.info("Abort successful");
} catch (SQLException e) {
- if (logger.isInfoEnabled()) logger.info("Abort failed with " + e);
+ if (LOGGER.isInfoEnabled()) LOGGER.info("Abort failed with " + e);
if (sqlE == null) {
sqlE = e;
} else {
@@ -1439,7 +1439,7 @@ public class MutationState implements SQLCloseable {
* @throws SQLException
*/
private boolean shouldResubmitTransaction(Set<TableRef> txTableRefs) throws SQLException {
- if (logger.isInfoEnabled()) logger.info("Checking for index updates as of " + getInitialWritePointer());
+ if (LOGGER.isInfoEnabled()) LOGGER.info("Checking for index updates as of " + getInitialWritePointer());
MetaDataClient client = new MetaDataClient(connection);
PMetaData cache = connection.getMetaDataCache();
boolean addedAnyIndexes = false;
@@ -1466,13 +1466,13 @@ public class MutationState implements SQLCloseable {
// that an index was dropped and recreated with the same name but different
// indexed/covered columns.
addedAnyIndexes = (!oldIndexes.equals(updatedDataTable.getIndexes()));
- if (logger.isInfoEnabled())
- logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + " to "
+ if (LOGGER.isInfoEnabled())
+ LOGGER.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + " to "
+ updatedDataTable.getName().getString() + " with indexes " + updatedDataTable.getIndexes());
}
}
- if (logger.isInfoEnabled())
- logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + getInitialWritePointer()
+ if (LOGGER.isInfoEnabled())
+ LOGGER.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + getInitialWritePointer()
+ " over " + (allImmutableTables ? " all immutable tables" : " some mutable tables"));
// If all tables are immutable, we know the conflict we got was due to our DDL/DML fence.
// If any indexes were added, then the conflict might be due to DDL/DML fence.
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
index 38d47c9..d2019fc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/ScanPlan.java
@@ -88,7 +88,7 @@ import org.slf4j.LoggerFactory;
* @since 0.1
*/
public class ScanPlan extends BaseQueryPlan {
- private static final Logger logger = LoggerFactory.getLogger(ScanPlan.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ScanPlan.class);
private List<KeyRange> splits;
private List<List<Scan>> scans;
private boolean allowPageFilter;
@@ -139,7 +139,7 @@ public class ScanPlan extends BaseQueryPlan {
boolean canBeExecutedSerially = ScanUtil.canQueryBeExecutedSerially(table, orderBy, context);
if (!canBeExecutedSerially) {
if (hasSerialHint) {
- logger.warn("This query cannot be executed serially. Ignoring the hint");
+ LOGGER.warn("This query cannot be executed serially. Ignoring the hint");
}
return false;
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
index 532012f..fa72c83 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/LikeExpression.java
@@ -51,7 +51,7 @@ import com.google.common.collect.Lists;
* @since 0.1
*/
public abstract class LikeExpression extends BaseCompoundExpression {
- private static final Logger logger = LoggerFactory.getLogger(LikeExpression.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(LikeExpression.class);
private static final String ZERO_OR_MORE = "\\E.*\\Q";
private static final String ANY_ONE = "\\E.\\Q";
@@ -267,15 +267,15 @@ public abstract class LikeExpression extends BaseCompoundExpression {
AbstractBasePattern pattern = this.pattern;
if (pattern == null) { // TODO: don't allow? this is going to be slooowwww
if (!getPatternExpression().evaluate(tuple, ptr)) {
- if (logger.isTraceEnabled()) {
- logger.trace("LIKE is FALSE: pattern is null");
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("LIKE is FALSE: pattern is null");
}
return false;
}
String value = (String) PVarchar.INSTANCE.toObject(ptr, getPatternExpression().getSortOrder());
pattern = compilePattern(value);
- if (logger.isTraceEnabled()) {
- logger.trace("LIKE pattern is expression: " + pattern.pattern());
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("LIKE pattern is expression: " + pattern.pattern());
}
}
@@ -283,21 +283,21 @@ public abstract class LikeExpression extends BaseCompoundExpression {
SortOrder strSortOrder = strExpression.getSortOrder();
PVarchar strDataType = PVarchar.INSTANCE;
if (!strExpression.evaluate(tuple, ptr)) {
- if (logger.isTraceEnabled()) {
- logger.trace("LIKE is FALSE: child expression is null");
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("LIKE is FALSE: child expression is null");
}
return false;
}
String value = null;
- if (logger.isTraceEnabled()) {
+ if (LOGGER.isTraceEnabled()) {
value = (String) strDataType.toObject(ptr, strSortOrder);
}
strDataType.coerceBytes(ptr, strDataType, strSortOrder, SortOrder.ASC);
pattern.matches(ptr);
- if (logger.isTraceEnabled()) {
+ if (LOGGER.isTraceEnabled()) {
boolean matched = ((Boolean) PBoolean.INSTANCE.toObject(ptr)).booleanValue();
- logger.trace("LIKE(value='" + value + "'pattern='" + pattern.pattern() + "' is " + matched);
+ LOGGER.trace("LIKE(value='" + value + "'pattern='" + pattern.pattern() + "' is " + matched);
}
return true;
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
index b41c6c6..f647c45 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/FirstLastValueServerAggregator.java
@@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
*/
public class FirstLastValueServerAggregator extends BaseAggregator {
- private static final Logger logger = LoggerFactory.getLogger(FirstLastValueServerAggregator.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(FirstLastValueServerAggregator.class);
protected List<Expression> children;
protected BinaryComparator topOrder = new BinaryComparator(ByteUtil.EMPTY_BYTE_ARRAY);
protected byte[] topValue;
@@ -88,7 +88,7 @@ public class FirstLastValueServerAggregator extends BaseAggregator {
try {
addFlag = true;
} catch (Exception e) {
- logger.error(e.getMessage());
+ LOGGER.error(e.getMessage());
}
} else {
if (isAscending) {
@@ -180,7 +180,7 @@ public class FirstLastValueServerAggregator extends BaseAggregator {
try {
ptr.set(payload.getPayload());
} catch (IOException ex) {
- logger.error(ex.getMessage());
+ LOGGER.error(ex.getMessage());
return false;
}
return true;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java
index 983968b..e057173 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/aggregator/SizeTrackingServerAggregators.java
@@ -25,7 +25,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SizeTrackingServerAggregators extends ServerAggregators {
- private static final Logger logger = LoggerFactory.getLogger(SizeTrackingServerAggregators.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(SizeTrackingServerAggregators.class);
private final MemoryChunk chunk;
private final int sizeIncrease;
@@ -50,7 +50,7 @@ public class SizeTrackingServerAggregators extends ServerAggregators {
expressions[i].reset();
}
while(dsize > chunk.getSize()) {
- logger.info("Request: {}, resizing {} by 1024*1024", dsize, chunk.getSize());
+ LOGGER.info("Request: {}, resizing {} by 1024*1024", dsize, chunk.getSize());
chunk.resize(chunk.getSize() + sizeIncrease);
}
memoryUsed = dsize;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
index 6644a7e..7dfb1d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/CollationKeyFunction.java
@@ -125,7 +125,8 @@ public class CollationKeyFunction extends ScalarFunction {
byte[] collationKeyByteArray = collator.getCollationKey(inputString).toByteArray();
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("CollationKey bytes: " + VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
+ LOGGER.trace("CollationKey bytes: " +
+ VarBinaryFormatter.INSTANCE.format(collationKeyByteArray));
}
ptr.set(collationKeyByteArray);
@@ -167,7 +168,8 @@ public class CollationKeyFunction extends ScalarFunction {
}
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace(String.format("Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
+ LOGGER.trace(String.format(
+ "Collator: [strength: %d, decomposition: %d], Special-Upper-Case: %s",
collator.getStrength(), collator.getDecomposition(),
BooleanUtils.isTrue(useSpecialUpperCaseCollator)));
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
index 2eb69bd..df81957 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/RowKeyComparisonFilter.java
@@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
*
*/
public class RowKeyComparisonFilter extends BooleanExpressionFilter {
- private static final Logger logger = LoggerFactory.getLogger(RowKeyComparisonFilter.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(RowKeyComparisonFilter.class);
private boolean evaluate = true;
private boolean keepRow = false;
@@ -70,8 +70,8 @@ public class RowKeyComparisonFilter extends BooleanExpressionFilter {
if (evaluate) {
inputTuple.setKey(v.getRowArray(), v.getRowOffset(), v.getRowLength());
this.keepRow = Boolean.TRUE.equals(evaluate(inputTuple));
- if (logger.isTraceEnabled()) {
- logger.trace("RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER")
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("RowKeyComparisonFilter: " + (this.keepRow ? "KEEP" : "FILTER")
+ " row " + inputTuple);
}
evaluate = false;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 1c036ac..66c4594 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -325,7 +325,8 @@ public class Indexer extends BaseRegionObserver {
long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
if (duration >= slowIndexPrepareThreshold) {
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock", duration, slowPreIncrementThreshold));
+ LOGGER.debug(getCallTooSlowMessage("preIncrementAfterRowLock",
+ duration, slowPreIncrementThreshold));
}
metricSource.incrementSlowDuplicateKeyCheckCalls();
}
@@ -350,7 +351,8 @@ public class Indexer extends BaseRegionObserver {
long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
if (duration >= slowIndexPrepareThreshold) {
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(getCallTooSlowMessage("preBatchMutate", duration, slowIndexPrepareThreshold));
+ LOGGER.debug(getCallTooSlowMessage("preBatchMutate",
+ duration, slowIndexPrepareThreshold));
}
metricSource.incrementNumSlowIndexPrepareCalls();
}
@@ -504,7 +506,8 @@ public class Indexer extends BaseRegionObserver {
long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
if (duration >= slowIndexPrepareThreshold) {
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(getCallTooSlowMessage("indexPrepare", duration, slowIndexPrepareThreshold));
+ LOGGER.debug(getCallTooSlowMessage(
+ "indexPrepare", duration, slowIndexPrepareThreshold));
}
metricSource.incrementNumSlowIndexPrepareCalls();
}
@@ -576,7 +579,8 @@ public class Indexer extends BaseRegionObserver {
long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
if (duration >= slowIndexWriteThreshold) {
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably", duration, slowIndexWriteThreshold));
+ LOGGER.debug(getCallTooSlowMessage("postBatchMutateIndispensably",
+ duration, slowIndexWriteThreshold));
}
metricSource.incrementNumSlowIndexWriteCalls();
}
@@ -616,7 +620,8 @@ public class Indexer extends BaseRegionObserver {
long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
if (duration >= slowIndexWriteThreshold) {
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold));
+ LOGGER.debug(getCallTooSlowMessage("indexWrite",
+ duration, slowIndexWriteThreshold));
}
metricSource.incrementNumSlowIndexWriteCalls();
}
@@ -723,7 +728,8 @@ public class Indexer extends BaseRegionObserver {
long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
if (duration >= slowPreWALRestoreThreshold) {
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug(getCallTooSlowMessage("preWALRestore", duration, slowPreWALRestoreThreshold));
+ LOGGER.debug(getCallTooSlowMessage("preWALRestore",
+ duration, slowPreWALRestoreThreshold));
}
metricSource.incrementNumSlowPreWALRestoreCalls();
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index 389af36..84ccdb1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -194,7 +194,8 @@ public class IndexManagementUtil {
LOGGER.info("Rethrowing " + e);
throw e1;
} catch (Throwable e1) {
- LOGGER.info("Rethrowing " + e1 + " as a " + IndexBuildingFailureException.class.getSimpleName());
+ LOGGER.info("Rethrowing " + e1 + " as a " +
+ IndexBuildingFailureException.class.getSimpleName());
throw new IndexBuildingFailureException("Failed to build index for unexpected reason!", e1);
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
index 24569a5..e999d5b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/ParallelWriterIndexCommitter.java
@@ -42,7 +42,6 @@ public class ParallelWriterIndexCommitter extends AbstractParallelWriterIndexCom
@Override
public void write(Multimap<HTableInterfaceReference, Mutation> toWrite, final boolean allowLocalUpdates, final int clientVersion) throws SingleIndexWriteFailureException {
-
super.write(toWrite, allowLocalUpdates, clientVersion);
// actually submit the tasks to the pool and wait for them to finish/fail
try {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
index db7e6a0..363e780 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/RecoveryIndexWriter.java
@@ -101,7 +101,8 @@ public class RecoveryIndexWriter extends IndexWriter {
ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
HTableInterfaceReference table = tables.get(ptr);
if (nonExistingTablesList.contains(table)) {
- LOGGER.debug("Edits found for non existing table: " + table.getTableName() + " so skipping it!!");
+ LOGGER.debug("Edits found for non existing table: " +
+ table.getTableName() + " so skipping it!!");
continue;
}
if (table == null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
index 934e116..88de4d9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/TrackingParallelWriterIndexCommitter.java
@@ -68,7 +68,8 @@ import com.google.common.collect.Multimap;
* client.
*/
public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
- private static final Logger LOGGER = LoggerFactory.getLogger(TrackingParallelWriterIndexCommitter.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(TrackingParallelWriterIndexCommitter.class);
public static final String NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY = "index.writer.threads.max";
private static final int DEFAULT_CONCURRENT_INDEX_WRITER_THREADS = 10;
@@ -171,14 +172,15 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
} catch (IOException ignord) {
// when it's failed we fall back to the standard & slow way
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
- + ignord);
+ LOGGER.trace("indexRegion.batchMutate failed and fall " +
+ "back to HTable.batch(). Got error=" + ignord);
}
}
}
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("Writing index update:" + mutations + " to table: " + tableReference);
+ LOGGER.trace("Writing index update:" + mutations + " to table: "
+ + tableReference);
}
// if the client can retry index writes, then we don't need to retry here
HTableFactory factory;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index 3d77c47..eb264b5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -282,25 +282,29 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp,
systemTable, newState);
if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
- LOGGER.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
+ LOGGER.info("Index " + indexTableName +
+ " has been dropped. Ignore uncommitted mutations");
continue;
}
if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
if (leaveIndexActive) {
- LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = "
+ LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP "
+ + " failed with code = "
+ result.getMutationCode());
// If we're not disabling the index, then we don't want to throw as throwing
// will lead to the RS being shutdown.
if (blockDataTableWritesOnFailure) { throw new DoNotRetryIOException(
"Attempt to update INDEX_DISABLE_TIMESTAMP failed."); }
} else {
- LOGGER.warn("Attempt to disable index " + indexTableName + " failed with code = "
- + result.getMutationCode() + ". Will use default failure policy instead.");
+ LOGGER.warn("Attempt to disable index " + indexTableName +
+ " failed with code = " + result.getMutationCode() +
+ ". Will use default failure policy instead.");
throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
}
}
- LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName
- + " due to an exception while writing updates. indexState=" + newState,
+ LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " +
+ indexTableName + " due to an exception while" +
+ " writing updates. indexState=" + newState,
cause);
} catch (Throwable t) {
if (t instanceof Exception) {
@@ -351,7 +355,8 @@ public class PhoenixIndexFailurePolicy extends DelegateIndexFailurePolicy {
mutation.getRow().length - offset));
String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId));
if (indexTableName == null) {
- LOGGER.error("Unable to find local index on " + ref.getTableName() + " with viewID of " + Bytes.toStringBinary(viewId));
+ LOGGER.error("Unable to find local index on " + ref.getTableName() +
+ " with viewID of " + Bytes.toStringBinary(viewId));
} else {
indexTableNames.add(indexTableName);
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 27aaed3..8eeca26 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -143,7 +143,7 @@ import com.google.common.collect.Lists;
* @since 0.1
*/
public abstract class BaseResultIterators extends ExplainTable implements ResultIterators {
- public static final Logger logger = LoggerFactory.getLogger(BaseResultIterators.class);
+ public static final Logger LOGGER = LoggerFactory.getLogger(BaseResultIterators.class);
private static final int ESTIMATED_GUIDEPOSTS_PER_REGION = 20;
private static final int MIN_SEEK_TO_COLUMN_VERSION = VersionUtil.encodeVersion("0", "98", "12");
private final List<List<Scan>> scans;
@@ -1226,8 +1226,8 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
*/
@Override
public List<PeekingResultIterator> getIterators() throws SQLException {
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this,
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations("Getting iterators for " + this,
ScanUtil.getCustomAnnotations(scan)));
}
boolean isReverse = ScanUtil.isReversed(scan);
@@ -1313,7 +1313,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
Scan oldScan = scanPair.getFirst();
byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW);
if(e2 instanceof HashJoinCacheNotFoundException){
- logger.debug(
+ LOGGER.debug(
"Retrying when Hash Join cache is not found on the server ,by sending the cache again");
if(retryCount<=0){
throw e2;
@@ -1454,7 +1454,7 @@ public abstract class BaseResultIterators extends ExplainTable implements Result
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (ExecutionException e) {
- logger.info("Failed to execute task during cancel", e);
+ LOGGER.info("Failed to execute task during cancel", e);
continue;
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
index 1aab2d5..2fb7b72 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ChunkedResultIterator.java
@@ -54,7 +54,7 @@ import com.google.common.base.Preconditions;
*/
@Deprecated
public class ChunkedResultIterator implements PeekingResultIterator {
- private static final Logger logger = LoggerFactory.getLogger(ChunkedResultIterator.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ChunkedResultIterator.class);
private final ParallelIteratorFactory delegateIteratorFactory;
private ImmutableBytesWritable lastKey = new ImmutableBytesWritable();
@@ -89,7 +89,7 @@ public class ChunkedResultIterator implements PeekingResultIterator {
@Override
public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String tableName, QueryPlan plan) throws SQLException {
- if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("ChunkedResultIteratorFactory.newIterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
+ if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("ChunkedResultIteratorFactory.newIterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
return new ChunkedResultIterator(delegateFactory, mutationState, context, tableRef, scan,
mutationState.getConnection().getQueryServices().getProps().getLong(
QueryServices.SCAN_RESULT_CHUNK_SIZE,
@@ -110,7 +110,7 @@ public class ChunkedResultIterator implements PeekingResultIterator {
// Instantiate single chunk iterator and the delegate iterator in constructor
// to get parallel scans kicked off in separate threads. If we delay this,
// we'll get serialized behavior (see PHOENIX-
- if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Get first chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
+ if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("Get first chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
ResultIterator singleChunkResultIterator = new SingleChunkResultIterator(scanner, chunkSize);
String tableName = tableRef.getTable().getPhysicalName().getString();
resultIterator = delegateIteratorFactory.newIterator(context, singleChunkResultIterator, scan, tableName, plan);
@@ -149,7 +149,7 @@ public class ChunkedResultIterator implements PeekingResultIterator {
} else {
scan.setStartRow(ByteUtil.copyKeyBytesIfNecessary(lastKey));
}
- if (logger.isDebugEnabled()) logger.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
+ if (LOGGER.isDebugEnabled()) LOGGER.debug(LogUtil.addCustomAnnotations("Get next chunked result iterator over " + tableRef.getTable().getPhysicalName().getString() + " with " + scan, ScanUtil.getCustomAnnotations(scan)));
String tableName = tableRef.getTable().getPhysicalName().getString();
ReadMetricQueue readMetrics = context.getReadMetricsQueue();
ScanMetricsHolder scanMetricsHolder = ScanMetricsHolder.getInstance(readMetrics, tableName, scan,
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 262ae44..3d5c96b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -52,7 +52,7 @@ import com.google.common.collect.Lists;
* @since 0.1
*/
public class ParallelIterators extends BaseResultIterators {
- private static final Logger logger = LoggerFactory.getLogger(ParallelIterators.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ParallelIterators.class);
private static final String NAME = "PARALLEL";
private final ParallelIteratorFactory iteratorFactory;
private final boolean initFirstScanOnly;
@@ -122,8 +122,8 @@ public class ParallelIterators extends BaseResultIterators {
@Override
public PeekingResultIterator call() throws Exception {
long startTime = System.currentTimeMillis();
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan, ScanUtil.getCustomAnnotations(scan)));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations("Id: " + scanId + ", Time: " + (System.currentTimeMillis() - startTime) + "ms, Scan: " + scan, ScanUtil.getCustomAnnotations(scan)));
}
PeekingResultIterator iterator = iteratorFactory.newIterator(context, tableResultItr, scan, physicalTableName, ParallelIterators.this.plan);
if (initFirstScanOnly) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
index 5624f5f..bc77c98 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/RoundRobinResultIterator.java
@@ -49,7 +49,7 @@ import com.google.common.base.Throwables;
*/
public class RoundRobinResultIterator implements ResultIterator {
- private static final Logger logger = LoggerFactory.getLogger(RoundRobinResultIterator.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(RoundRobinResultIterator.class);
private final int threshold;
@@ -223,8 +223,8 @@ public class RoundRobinResultIterator implements ResultIterator {
final ConnectionQueryServices services = context.getConnection().getQueryServices();
ExecutorService executor = services.getExecutor();
numParallelFetches++;
- if (logger.isDebugEnabled()) {
- logger.debug("Performing parallel fetch for " + openIterators.size() + " iterators. ");
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Performing parallel fetch for " + openIterators.size() + " iterators. ");
}
for (final RoundRobinIterator itr : openIterators) {
Future<Tuple> future = executor.submit(new Callable<Tuple>() {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
index 4bf41dd..69bf8c1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/TableResultIterator.java
@@ -80,7 +80,7 @@ public class TableResultIterator implements ResultIterator {
private final long renewLeaseThreshold;
private final QueryPlan plan;
private final ParallelScanGrouper scanGrouper;
- private static final Logger logger = LoggerFactory.getLogger(TableResultIterator.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(TableResultIterator.class);
private Tuple lastTuple = null;
private ImmutableBytesWritable ptr = new ImmutableBytesWritable();
@GuardedBy("renewLeaseLock")
@@ -189,7 +189,7 @@ public class TableResultIterator implements ResultIterator {
}
}
plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getName().getName());
- logger.debug(
+ LOGGER.debug(
"Retrying when Hash Join cache is not found on the server ,by sending the cache again");
if (retry <= 0) {
throw e1;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index 67ac9c9..8ac5375 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -61,7 +61,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* @since 0.1
*/
public final class PhoenixDriver extends PhoenixEmbeddedDriver {
- private static final Logger logger = LoggerFactory.getLogger(PhoenixDriver.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixDriver.class);
public static final PhoenixDriver INSTANCE;
private static volatile String driverShutdownMsg;
static {
@@ -100,11 +100,11 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
// policy). We don't care about any exceptions, we're going down anyways.
future.get(millisBeforeShutdown, TimeUnit.MILLISECONDS);
} catch (ExecutionException e) {
- logger.warn("Failed to close instance", e);
+ LOGGER.warn("Failed to close instance", e);
} catch (InterruptedException e) {
- logger.warn("Interrupted waiting to close instance", e);
+ LOGGER.warn("Interrupted waiting to close instance", e);
} catch (TimeoutException e) {
- logger.warn("Timed out waiting to close instance", e);
+ LOGGER.warn("Timed out waiting to close instance", e);
} finally {
// We're going down, but try to clean up.
svc.shutdownNow();
@@ -116,7 +116,7 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
// Don't want to register it if we're already in the process of going down.
DriverManager.registerDriver(INSTANCE);
} catch (IllegalStateException e) {
- logger.warn("Failed to register PhoenixDriver shutdown hook as the JVM is already shutting down");
+ LOGGER.warn("Failed to register PhoenixDriver shutdown hook as the JVM is already shutting down");
// Close the instance now because we don't have the shutdown hook
closeInstance(INSTANCE);
@@ -132,7 +132,7 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
try {
instance.close();
} catch (SQLException e) {
- logger.warn("Unable to close PhoenixDriver on shutdown", e);
+ LOGGER.warn("Unable to close PhoenixDriver on shutdown", e);
} finally {
driverShutdownMsg = "Phoenix driver closed because server is shutting down";
}
@@ -156,14 +156,14 @@ public final class PhoenixDriver extends PhoenixEmbeddedDriver {
@Override
public void onRemoval(RemovalNotification<ConnectionInfo, ConnectionQueryServices> notification) {
String connInfoIdentifier = notification.getKey().toString();
- logger.debug("Expiring " + connInfoIdentifier + " because of "
+ LOGGER.debug("Expiring " + connInfoIdentifier + " because of "
+ notification.getCause().name());
try {
notification.getValue().close();
}
catch (SQLException se) {
- logger.error("Error while closing expired cache connection " + connInfoIdentifier, se);
+ LOGGER.error("Error while closing expired cache connection " + connInfoIdentifier, se);
}
}
};
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 7d54a2d..2b6a6b0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -65,7 +65,6 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
/**
* The protocol for Phoenix Network Client
*/
-
private final static String DNC_JDBC_PROTOCOL_SUFFIX = "//";
private final static String DRIVER_NAME = "PhoenixEmbeddedDriver";
private static final String TERMINATOR = "" + PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
@@ -193,7 +192,8 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
* @since 0.1.1
*/
public static class ConnectionInfo {
- private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(ConnectionInfo.class);
+ private static final org.slf4j.Logger LOGGER =
+ LoggerFactory.getLogger(ConnectionInfo.class);
private static final Object KERBEROS_LOGIN_LOCK = new Object();
private static final char WINDOWS_SEPARATOR_CHAR = '\\';
private static final String REALM_EQUIVALENCY_WARNING_MSG = "Provided principal does not contan a realm and the default realm cannot be determined. Ignoring realm equivalency check.";
@@ -374,7 +374,9 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
currentUser = UserGroupInformation.getCurrentUser();
if (!currentUser.hasKerberosCredentials() || !isSameName(currentUser.getUserName(), principal)) {
final Configuration config = getConfiguration(props, info, principal, keytab);
- LOGGER.info("Trying to connect to a secure cluster as {} with keytab {}", config.get(QueryServices.HBASE_CLIENT_PRINCIPAL),
+ LOGGER.info("Trying to connect to a secure cluster as {} " +
+ "with keytab {}",
+ config.get(QueryServices.HBASE_CLIENT_PRINCIPAL),
config.get(QueryServices.HBASE_CLIENT_KEYTAB));
UserGroupInformation.setConfiguration(config);
User.login(config, QueryServices.HBASE_CLIENT_KEYTAB, QueryServices.HBASE_CLIENT_PRINCIPAL, null);
@@ -629,7 +631,8 @@ public abstract class PhoenixEmbeddedDriver implements Driver, SQLCloseable {
throw getMalFormedUrlException(url);
}
String znodeParent = config.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
- LOGGER.debug("Getting default jdbc connection url " + quorum + ":" + port + ":" + znodeParent);
+ LOGGER.debug("Getting default jdbc connection url "
+ + quorum + ":" + port + ":" + znodeParent);
return new ConnectionInfo(quorum, port, znodeParent);
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
index 7359fc2..2352b8f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixStatement.java
@@ -214,7 +214,7 @@ import com.google.common.math.IntMath;
*/
public class PhoenixStatement implements Statement, SQLCloseable {
- private static final Logger logger = LoggerFactory.getLogger(PhoenixStatement.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixStatement.class);
public enum Operation {
QUERY("queried", false),
@@ -311,9 +311,10 @@ public class PhoenixStatement implements Statement, SQLCloseable {
// this will create its own trace internally, so we don't wrap this
// whole thing in tracing
ResultIterator resultIterator = plan.iterator();
- if (logger.isDebugEnabled()) {
+ if (LOGGER.isDebugEnabled()) {
String explainPlan = QueryUtil.getExplainPlan(resultIterator);
- logger.debug(LogUtil.addCustomAnnotations("Explain plan: " + explainPlan, connection));
+ LOGGER.debug(LogUtil.addCustomAnnotations(
+ "Explain plan: " + explainPlan, connection));
}
StatementContext context = plan.getContext();
context.setQueryLogger(queryLogger);
@@ -338,8 +339,9 @@ public class PhoenixStatement implements Statement, SQLCloseable {
//Force update cache and retry if meta not found error occurs
catch (MetaDataEntityNotFoundException e) {
if(doRetryOnMetaNotFoundError && e.getTableName()!=null){
- if(logger.isDebugEnabled())
- logger.debug("Reloading table "+ e.getTableName()+" data from server");
+ if(LOGGER.isDebugEnabled())
+ LOGGER.debug("Reloading table "
+ + e.getTableName()+" data from server");
if(new MetaDataClient(connection).updateCache(connection.getTenantId(),
e.getSchemaName(), e.getTableName(), true).wasUpdated()){
//TODO we can log retry count and error for debugging in LOG table
@@ -424,8 +426,9 @@ public class PhoenixStatement implements Statement, SQLCloseable {
//Force update cache and retry if meta not found error occurs
catch (MetaDataEntityNotFoundException e) {
if(doRetryOnMetaNotFoundError && e.getTableName()!=null){
- if(logger.isDebugEnabled())
- logger.debug("Reloading table "+ e.getTableName()+" data from server");
+ if(LOGGER.isDebugEnabled())
+ LOGGER.debug("Reloading table "+ e.getTableName()
+ +" data from server");
if(new MetaDataClient(connection).updateCache(connection.getTenantId(),
e.getSchemaName(), e.getTableName(), true).wasUpdated()){
return executeMutation(stmt, false);
@@ -1763,8 +1766,8 @@ public class PhoenixStatement implements Statement, SQLCloseable {
}
public MutationPlan compileMutation(String sql) throws SQLException {
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Execute update: " + sql, connection));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations("Execute update: " + sql, connection));
}
CompilableStatement stmt = parseStatement(sql);
return compileMutation(stmt, sql);
@@ -1796,8 +1799,9 @@ public class PhoenixStatement implements Statement, SQLCloseable {
@Override
public ResultSet executeQuery(String sql) throws SQLException {
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Execute query: " + sql, connection));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations(
+ "Execute query: " + sql, connection));
}
CompilableStatement stmt = parseStatement(sql);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
index 7433f6a..27d4ba4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLogger.java
@@ -41,7 +41,7 @@ public class QueryLogger {
private LogLevel logLevel;
private Builder<QueryLogInfo, Object> queryLogBuilder = ImmutableMap.builder();
private boolean isSynced;
- private static final Logger LOGGER = LoggerFactory.getLogger(QueryLoggerDisruptor.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(QueryLogger.class);
private QueryLogger(PhoenixConnection connection) {
this.queryId = UUID.randomUUID().toString();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
index c4f227a..1d3ebc9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/log/QueryLoggerDisruptor.java
@@ -76,8 +76,9 @@ public class QueryLoggerDisruptor implements Closeable{
final QueryLogDetailsEventHandler[] handlers = { new QueryLogDetailsEventHandler(configuration) };
disruptor.handleEventsWith(handlers);
- LOGGER.info("Starting QueryLoggerDisruptor for with ringbufferSize=" + disruptor.getRingBuffer().getBufferSize()
- + ", waitStrategy=" + waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler="
+ LOGGER.info("Starting QueryLoggerDisruptor for with ringbufferSize=" +
+ disruptor.getRingBuffer().getBufferSize() + ", waitStrategy=" +
+ waitStrategy.getClass().getSimpleName() + ", " + "exceptionHandler="
+ errorHandler + "...");
disruptor.start();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
index cc23c43..d476e4d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
@@ -77,7 +77,8 @@ import com.google.common.collect.Lists;
public abstract class FormatToBytesWritableMapper<RECORD> extends Mapper<LongWritable, Text, TableRowkeyPair,
ImmutableBytesWritable> {
- protected static final Logger LOGGER = LoggerFactory.getLogger(FormatToBytesWritableMapper.class);
+ protected static final Logger LOGGER =
+ LoggerFactory.getLogger(FormatToBytesWritableMapper.class);
protected static final String COUNTER_GROUP_NAME = "Phoenix MapReduce Import";
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index a8de1d1..1d68025 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -692,7 +692,8 @@ public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Ce
}
}
- LOGGER.info("Configuring " + tablesStartKeys.size() + " reduce partitions to match current region count");
+ LOGGER.info("Configuring " + tablesStartKeys.size() +
+ " reduce partitions to match current region count");
job.setNumReduceTasks(tablesStartKeys.size());
configurePartitioner(job, tablesStartKeys);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
index 7334afc..3e999c1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/OrphanViewTool.java
@@ -884,7 +884,8 @@ public class OrphanViewTool extends Configured implements Tool {
}
return 0;
} catch (Exception ex) {
- LOGGER.error("Orphan View Tool : An exception occurred " + ExceptionUtils.getMessage(ex) + " at:\n" +
+ LOGGER.error("Orphan View Tool : An exception occurred "
+ + ExceptionUtils.getMessage(ex) + " at:\n" +
ExceptionUtils.getStackTrace(ex));
return -1;
} finally {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
index fffb165..6726042 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixRecordReader.java
@@ -113,7 +113,8 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
final PhoenixInputSplit pSplit = (PhoenixInputSplit)split;
final List<Scan> scans = pSplit.getScans();
try {
- LOGGER.info("Generating iterators for " + scans.size() + " scans in keyrange: " + pSplit.getKeyRange());
+ LOGGER.info("Generating iterators for " + scans.size() + " scans in keyrange: "
+ + pSplit.getKeyRange());
List<PeekingResultIterator> iterators = Lists.newArrayListWithExpectedSize(scans.size());
StatementContext ctx = queryPlan.getContext();
ReadMetricQueue readMetrics = ctx.getReadMetricsQueue();
@@ -161,7 +162,8 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
this.resultSet = new PhoenixResultSet(this.resultIterator, queryPlan.getProjector().cloneIfNecessary(), queryPlan.getContext());
} catch (SQLException e) {
- LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",e.getMessage()));
+ LOGGER.error(String.format(" Error [%s] initializing PhoenixRecordReader. ",
+ e.getMessage()));
Throwables.propagate(e);
}
}
@@ -182,7 +184,8 @@ public class PhoenixRecordReader<T extends DBWritable> extends RecordReader<Null
value.readFields(resultSet);
return true;
} catch (SQLException e) {
- LOGGER.error(String.format(" Error [%s] occurred while iterating over the resultset. ",e.getMessage()));
+ LOGGER.error(String.format(" Error [%s] occurred while iterating over the resultset. ",
+ e.getMessage()));
throw new RuntimeException(e);
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
index 76d5a83..2beb10c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixServerBuildIndexInputFormat.java
@@ -50,7 +50,8 @@ import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.getInde
public class PhoenixServerBuildIndexInputFormat<T extends DBWritable> extends PhoenixInputFormat {
QueryPlan queryPlan = null;
- private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixServerBuildIndexInputFormat.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(PhoenixServerBuildIndexInputFormat.class);
/**
* instantiated by framework
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
index 798183c..04de360 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/RegexToKeyValueMapper.java
@@ -110,7 +110,9 @@ public class RegexToKeyValueMapper extends FormatToBytesWritableMapper<Map<?, ?>
Map<String, Object> data = new HashMap<>();
Matcher m = inputPattern.matcher(input);
if (m.groupCount() != columnInfoList.size()) {
- LOGGER.debug(String.format("based on the regex and input, input fileds %s size doesn't match the table columns %s size", m.groupCount(), columnInfoList.size()));
+ LOGGER.debug(String.format("based on the regex and input, input fileds %s size " +
+ "doesn't match the table columns %s size", m.groupCount(),
+ columnInfoList.size()));
return data;
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
index 2cd841f..b479f9e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/IndexTool.java
@@ -654,7 +654,9 @@ public class IndexTool extends Configured implements Tool {
int autosplitNumRegions = nOpt == null ? DEFAULT_AUTOSPLIT_NUM_REGIONS : Integer.parseInt(nOpt);
String rateOpt = cmdLine.getOptionValue(SPLIT_INDEX_OPTION.getOpt());
double samplingRate = rateOpt == null ? DEFAULT_SPLIT_SAMPLING_RATE : Double.parseDouble(rateOpt);
- LOGGER.info(String.format("Will split index %s , autosplit=%s , autoSplitNumRegions=%s , samplingRate=%s", indexTable, autosplit, autosplitNumRegions, samplingRate));
+ LOGGER.info(String.format("Will split index %s , autosplit=%s ," +
+ " autoSplitNumRegions=%s , samplingRate=%s", indexTable,
+ autosplit, autosplitNumRegions, samplingRate));
splitIndexTable(connection.unwrap(PhoenixConnection.class), autosplit, autosplitNumRegions, samplingRate, configuration);
}
@@ -676,7 +678,8 @@ public class IndexTool extends Configured implements Tool {
job.submit();
return 0;
}
- LOGGER.info("Running Index Build in Foreground. Waits for the build to complete. This may take a long time!.");
+ LOGGER.info("Running Index Build in Foreground. Waits for the build to complete." +
+ " This may take a long time!.");
boolean result = job.waitForCompletion(true);
if (result) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
index 0813620..5424407 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexImportDirectReducer.java
@@ -49,7 +49,8 @@ import static org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil.MAPREDU
public class PhoenixIndexImportDirectReducer extends
Reducer<ImmutableBytesWritable, IntWritable, NullWritable, NullWritable> {
- private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(PhoenixIndexImportDirectReducer.class);
@Override
protected void cleanup(Context context) throws IOException, InterruptedException{
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
index b168032..6148b6c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
@@ -59,7 +59,8 @@ import com.google.common.collect.Lists;
*/
public class PhoenixIndexPartialBuildMapper extends TableMapper<ImmutableBytesWritable, IntWritable> {
- private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(PhoenixIndexPartialBuildMapper.class);
private PhoenixConnection connection;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
index 0544d02..35173bc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixServerBuildIndexMapper.java
@@ -43,8 +43,6 @@ import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.query.QueryServicesOptions;
import org.apache.phoenix.util.ColumnInfo;
import org.apache.phoenix.util.PhoenixRuntime;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
/**
* Mapper that does not do much as regions servers actually build the index from the data table regions directly
@@ -52,8 +50,6 @@ import org.slf4j.LoggerFactory;
public class PhoenixServerBuildIndexMapper extends
Mapper<NullWritable, PhoenixIndexDBWritable, ImmutableBytesWritable, IntWritable> {
- private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixServerBuildIndexMapper.class);
-
@Override
protected void setup(final Context context) throws IOException, InterruptedException {
super.setup(context);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
index 954ee23..c0fdcbb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/automation/PhoenixMRJobSubmitter.java
@@ -237,7 +237,8 @@ public class PhoenixMRJobSubmitter {
if (!ZKBasedMasterElectionUtil.acquireLock(zookeeperWatcher, PHOENIX_LOCKS_PARENT,
AUTO_INDEX_BUILD_LOCK_NAME)) {
- LOGGER.info("Some other node is already running Automated Index Build. Skipping execution!");
+ LOGGER.info("Some other node is already running Automated Index Build." +
+ " Skipping execution!");
return -1;
}
// 1) Query Phoenix SYSTEM.CATALOG table to get a list of all candidate indexes to be built
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
index 2709297..39ce183 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/util/PhoenixConfigurationUtil.java
@@ -402,9 +402,9 @@ public final class PhoenixConfigurationUtil {
final Configuration configuration) {
List<String> selectColumnList = PhoenixConfigurationUtil.getSelectColumnNames(configuration);
if(!selectColumnList.isEmpty()) {
- LOGGER.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, selectColumnList=%s "
- ,!selectColumnList.isEmpty(), selectColumnList.size(), Joiner.on(",").join(selectColumnList)
- ));
+ LOGGER.info(String.format("UseSelectColumns=%s, selectColumnList.size()=%s, " +
+ "selectColumnList=%s ",!selectColumnList.isEmpty(),
+ selectColumnList.size(), Joiner.on(",").join(selectColumnList)));
}
return selectColumnList;
}
@@ -720,7 +720,7 @@ public final class PhoenixConfigurationUtil {
if (tenantId != null) {
tenantId = null;
} else {
- BaseResultIterators.logger.warn(
+ BaseResultIterators.LOGGER.warn(
"Unable to find parent table \"" + parentTableName + "\" of table \""
+ table.getName().getString() + "\" to determine USE_STATS_FOR_PARALLELIZATION",
e);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java b/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
index f557089..48fb374 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/memory/GlobalMemoryManager.java
@@ -32,7 +32,7 @@ import org.slf4j.LoggerFactory;
* @since 0.1
*/
public class GlobalMemoryManager implements MemoryManager {
- private static final Logger logger = LoggerFactory.getLogger(GlobalMemoryManager.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(GlobalMemoryManager.class);
private final Object sync = new Object();
private final long maxMemoryBytes;
@@ -151,7 +151,7 @@ public class GlobalMemoryManager implements MemoryManager {
protected void finalize() throws Throwable {
try {
if (size > 0) {
- logger.warn("Orphaned chunk of " + size + " bytes found during finalize");
+ LOGGER.warn("Orphaned chunk of " + size + " bytes found during finalize");
//logger.warn("Orphaned chunk of " + size + " bytes found during finalize allocated here:\n" + stack);
}
freeMemory();
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
index 3b9ec99..27aa9b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/monitoring/GlobalMetricRegistriesAdapter.java
@@ -48,7 +48,8 @@ import org.slf4j.LoggerFactory;
*/
public class GlobalMetricRegistriesAdapter {
- private static final Logger LOGGER = LoggerFactory.getLogger(GlobalMetricRegistriesAdapter.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(GlobalMetricRegistriesAdapter.class);
private static GlobalMetricRegistriesAdapter INSTANCE = new GlobalMetricRegistriesAdapter();
private GlobalMetricRegistriesAdapter() {
@@ -74,7 +75,8 @@ public class GlobalMetricRegistriesAdapter {
* Class to convert HBase Metric Objects to Hadoop Metrics2 Metric Objects
*/
private static class HBaseMetrics2HadoopMetricsAdapter implements MetricsSource {
- private static final Logger LOGGER = LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class);
private final MetricRegistry registry;
private final String metricTag;
@@ -85,7 +87,8 @@ public class GlobalMetricRegistriesAdapter {
private void registerToDefaultMetricsSystem() {
MetricRegistryInfo info = registry.getMetricRegistryInfo();
- LOGGER.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription() + " into DefaultMetricsSystem");
+ LOGGER.info("Registering " + info.getMetricsJmxContext() +
+ " " + info.getMetricsDescription() + " into DefaultMetricsSystem");
DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), info.getMetricsDescription(), this);
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index e9fd074..c624b65 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -283,7 +283,8 @@ import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
public class ConnectionQueryServicesImpl extends DelegateQueryServices implements ConnectionQueryServices {
- private static final Logger logger = LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
private static final int INITIAL_CHILD_SERVICES_CAPACITY = 100;
private static final int DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS = 1000;
private static final int TTL_FOR_MUTEX = 15 * 60; // 15min
@@ -434,7 +435,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try {
this.queryDisruptor = new QueryLoggerDisruptor(this.config);
} catch (SQLException e) {
- logger.warn("Unable to initiate qeuery logging service !!");
+ LOGGER.warn("Unable to initiate qeuery logging service !!");
e.printStackTrace();
}
}
@@ -445,7 +446,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try {
this.connection = HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
GLOBAL_HCONNECTIONS_COUNTER.increment();
- logger.info("HConnection established. Stacktrace for informational purposes: " + connection + " " + LogUtil.getCallerStackTrace());
+ LOGGER.info("HConnection established. Stacktrace for informational purposes: "
+ + connection + " " + LogUtil.getCallerStackTrace());
} catch (IOException e) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
.setRootCause(e).build().buildException();
@@ -680,7 +682,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
mutator.mutate(metaData);
break;
} else if (table.getSequenceNumber() >= tableSeqNum) {
- logger.warn("Attempt to cache older version of " + tableName + ": current= " + table.getSequenceNumber() + ", new=" + tableSeqNum);
+ LOGGER.warn("Attempt to cache older version of " + tableName +
+ ": current= " + table.getSequenceNumber() +
+ ", new=" + tableSeqNum);
break;
}
} catch (TableNotFoundException e) {
@@ -689,7 +693,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// We waited long enough - just remove the table from the cache
// and the next time it's used it'll be pulled over from the server.
if (waitTime <= 0) {
- logger.warn("Unable to update meta data repo within " + (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) + " seconds for " + tableName);
+ LOGGER.warn("Unable to update meta data repo within " +
+ (DEFAULT_OUT_OF_ORDER_MUTATIONS_WAIT_TIME_MS/1000) +
+ " seconds for " + tableName);
// There will never be a parentTableName here, as that would only
// be non null for an index an we never add/remove columns from an index.
metaData.removeTable(tenantId, tableName, null, HConstants.LATEST_TIMESTAMP);
@@ -1105,8 +1111,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
+ watch.elapsedMillis() + " ms "
+ (numTries > 1 ? ("after trying " + numTries + (numTries > 1 ? "times." : "time.")) : ""));
} else {
- if (logger.isDebugEnabled()) {
- logger.debug("Operation "
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Operation "
+ op.getOperationName()
+ " completed within "
+ watch.elapsedMillis()
@@ -1166,7 +1172,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try (HBaseAdmin admin = getAdmin()) {
final String quorum = ZKConfig.getZKQuorumServersString(config);
final String znode = this.getProps().get(HConstants.ZOOKEEPER_ZNODE_PARENT);
- logger.debug("Found quorum: " + quorum + ":" + znode);
+ LOGGER.debug("Found quorum: " + quorum + ":" + znode);
if (isMetaTable) {
if(SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, this.getProps())) {
@@ -1457,7 +1463,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try {
ht.close();
} catch (IOException e) {
- logger.warn("Could not close HTable", e);
+ LOGGER.warn("Could not close HTable", e);
}
}
}
@@ -2755,9 +2761,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
stmt.executeUpdate();
metaConnection.commit();
} catch (NewerTableAlreadyExistsException e) {
- logger.warn("Table already modified at this timestamp, so assuming column already nullable: " + columnName);
+ LOGGER.warn("Table already modified at this timestamp," +
+ " so assuming column already nullable: " + columnName);
} catch (SQLException e) {
- logger.warn("Add column failed due to:" + e);
+ LOGGER.warn("Add column failed due to:" + e);
sqlE = e;
} finally {
try {
@@ -2787,9 +2794,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try {
metaConnection.createStatement().executeUpdate("ALTER TABLE " + tableName + " ADD " + (addIfNotExists ? " IF NOT EXISTS " : "") + columns );
} catch (NewerTableAlreadyExistsException e) {
- logger.warn("Table already modified at this timestamp, so assuming add of these columns already done: " + columns);
+ LOGGER.warn("Table already modified at this timestamp," +
+ " so assuming add of these columns already done: " + columns);
} catch (SQLException e) {
- logger.warn("Add column failed due to:" + e);
+ LOGGER.warn("Add column failed due to:" + e);
sqlE = e;
} finally {
try {
@@ -2907,7 +2915,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
boolean success = false;
try {
GLOBAL_QUERY_SERVICES_COUNTER.increment();
- logger.info("An instance of ConnectionQueryServices was created.");
+ LOGGER.info("An instance of ConnectionQueryServices was created.");
openConnection();
hConnectionEstablished = true;
boolean isDoNotUpgradePropSet = UpgradeUtil.isNoUpgradeSet(props);
@@ -2938,7 +2946,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
if (inspectIfAnyExceptionInChain(e, Collections
.<Class<? extends Exception>> singletonList(AccessDeniedException.class))) {
// Pass
- logger.warn("Could not check for Phoenix SYSTEM tables, assuming they exist and are properly configured");
+ LOGGER.warn("Could not check for Phoenix SYSTEM tables," +
+ " assuming they exist and are properly configured");
checkClientServerCompatibility(SchemaUtil.getPhysicalName(SYSTEM_CATALOG_NAME_BYTES, getProps()).getName());
success = true;
} else if (inspectIfAnyExceptionInChain(e,
@@ -2973,7 +2982,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
upgradeSystemTables(url, props);
} else {
// We expect the user to manually run the "EXECUTE UPGRADE" command first.
- logger.error("Upgrade is required. Must run 'EXECUTE UPGRADE' "
+ LOGGER.error("Upgrade is required. Must run 'EXECUTE UPGRADE' "
+ "before any other command");
}
}
@@ -3027,7 +3036,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try {
if(admin.tableExists(PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME) || admin.tableExists(TableName.valueOf(
PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME,PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME))) {
- logger.debug("System mutex table already appears to exist, not creating it");
+ LOGGER.debug("System mutex table already appears to exist, not creating it");
return;
}
final TableName mutexTableName = SchemaUtil.getPhysicalTableName(
@@ -3045,7 +3054,8 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// Ignore TableExistsException as another client might beat us during upgrade.
// Ignore AccessDeniedException, as it may be possible underpriviliged user trying to use the connection
// which doesn't required upgrade.
- logger.debug("Ignoring exception while creating mutex table during connection initialization: "
+ LOGGER.debug("Ignoring exception while creating mutex table" +
+ " during connection initialization: "
+ Throwables.getStackTraceAsString(e));
} else {
throw e;
@@ -3213,7 +3223,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
* column BASE_COLUMN_COUNT is already part of the meta-data schema as the
* signal that the server side upgrade has finished or is in progress.
*/
- logger.debug("No need to run 4.5 upgrade");
+ LOGGER.debug("No need to run 4.5 upgrade");
}
Properties p = PropertiesUtil.deepCopy(metaConnection.getClientInfo());
p.remove(PhoenixRuntime.CURRENT_SCN_ATTRIB);
@@ -3225,18 +3235,21 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
List<String> tablesNeedingUpgrade = UpgradeUtil
.getPhysicalTablesWithDescRowKey(conn);
if (!tablesNeedingUpgrade.isEmpty()) {
- logger.warn("The following tables require upgrade due to a bug causing the row key to be incorrect for descending columns and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n"
+ LOGGER.warn("The following tables require upgrade due to a bug " +
+ "causing the row key to be incorrect for descending columns " +
+ "and ascending BINARY columns (PHOENIX-2067 and PHOENIX-2120):\n"
+ Joiner.on(' ').join(tablesNeedingUpgrade)
+ "\nTo upgrade issue the \"bin/psql.py -u\" command.");
}
List<String> unsupportedTables = UpgradeUtil
.getPhysicalTablesWithDescVarbinaryRowKey(conn);
if (!unsupportedTables.isEmpty()) {
- logger.warn("The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n"
+ LOGGER.warn("The following tables use an unsupported " +
+ "VARBINARY DESC construct and need to be changed:\n"
+ Joiner.on(' ').join(unsupportedTables));
}
} catch (Exception ex) {
- logger.error(
+ LOGGER.error(
"Unable to determine tables requiring upgrade due to PHOENIX-2067",
ex);
} finally {
@@ -3442,7 +3455,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
if (SchemaUtil.isNamespaceMappingEnabled(PTableType.SYSTEM, ConnectionQueryServicesImpl.this.getProps())) {
// Try acquiring a lock in SYSMUTEX table before migrating the tables since it involves disabling the table.
if (acquiredMutexLock = acquireUpgradeMutex(MetaDataProtocol.MIN_SYSTEM_TABLE_MIGRATION_TIMESTAMP)) {
- logger.debug("Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM namespace "
+ LOGGER.debug("Acquired lock in SYSMUTEX table for migrating SYSTEM tables to SYSTEM namespace "
+ "and/or upgrading " + sysCatalogTableName);
}
// We will not reach here if we fail to acquire the lock, since it throws UpgradeInProgressException
@@ -3450,7 +3463,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// If SYSTEM tables exist, they are migrated to HBase SYSTEM namespace
// If they don't exist or they're already migrated, this method will return immediately
ensureSystemTablesMigratedToSystemNamespace();
- logger.debug("Migrated SYSTEM tables to SYSTEM namespace");
+ LOGGER.debug("Migrated SYSTEM tables to SYSTEM namespace");
metaConnection = upgradeSystemCatalogIfRequired(metaConnection,
currentServerSideTableTimeStamp);
}
@@ -3462,11 +3475,11 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// it means some old client is either migrating SYSTEM tables or trying to upgrade the schema of
// SYSCAT table and hence it should not be interrupted
if (acquiredMutexLock = acquireUpgradeMutex(currentServerSideTableTimeStamp)) {
- logger.debug("Acquired lock in SYSMUTEX table for upgrading " + sysCatalogTableName);
+ LOGGER.debug("Acquired lock in SYSMUTEX table for upgrading " + sysCatalogTableName);
snapshotName = getSysCatalogSnapshotName(currentServerSideTableTimeStamp);
createSnapshot(snapshotName, sysCatalogTableName);
snapshotCreated = true;
- logger.debug("Created snapshot for SYSCAT");
+ LOGGER.debug("Created snapshot for SYSCAT");
}
// We will not reach here if we fail to acquire the lock, since it throws UpgradeInProgressException
}
@@ -3531,7 +3544,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try {
releaseUpgradeMutex();
} catch (IOException e) {
- logger.warn("Release of upgrade mutex failed ", e);
+ LOGGER.warn("Release of upgrade mutex failed ", e);
}
}
}
@@ -3816,7 +3829,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try {
admin = getAdmin();
admin.snapshot(snapshotName, tableName);
- logger.info("Successfully created snapshot " + snapshotName + " for "
+ LOGGER.info("Successfully created snapshot " + snapshotName + " for "
+ tableName);
} catch (Exception e) {
sqlE = new SQLException(e);
@@ -3848,14 +3861,14 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
SQLException sqlE = null;
HBaseAdmin admin = null;
try {
- logger.warn("Starting restore of " + tableName + " using snapshot "
+ LOGGER.warn("Starting restore of " + tableName + " using snapshot "
+ snapshotName + " because upgrade failed");
admin = getAdmin();
admin.disableTable(tableName);
tableDisabled = true;
admin.restoreSnapshot(snapshotName);
snapshotRestored = true;
- logger.warn("Successfully restored " + tableName + " using snapshot "
+ LOGGER.warn("Successfully restored " + tableName + " using snapshot "
+ snapshotName);
} catch (Exception e) {
sqlE = new SQLException(e);
@@ -3864,10 +3877,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try {
admin.enableTable(tableName);
if (snapshotRestored) {
- logger.warn("Successfully restored and enabled " + tableName + " using snapshot "
+ LOGGER.warn("Successfully restored and enabled " + tableName + " using snapshot "
+ snapshotName);
} else {
- logger.warn("Successfully enabled " + tableName + " after restoring using snapshot "
+ LOGGER.warn("Successfully enabled " + tableName + " after restoring using snapshot "
+ snapshotName + " failed. ");
}
} catch (Exception e1) {
@@ -3877,7 +3890,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
} else {
sqlE.setNextException(enableTableEx);
}
- logger.error("Failure in enabling "
+ LOGGER.error("Failure in enabling "
+ tableName
+ (snapshotRestored ? " after successfully restoring using snapshot"
+ snapshotName
@@ -3915,7 +3928,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
if (tableNames.size() == 0) { return; }
// Try to move any remaining tables matching "SYSTEM\..*" into "SYSTEM:"
if (tableNames.size() > 8) {
- logger.warn("Expected 8 system tables but found " + tableNames.size() + ":" + tableNames);
+ LOGGER.warn("Expected 8 system tables but found " + tableNames.size() + ":" + tableNames);
}
byte[] mappedSystemTable = SchemaUtil
@@ -3923,7 +3936,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
metatable = getTable(mappedSystemTable);
if (tableNames.contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME)) {
if (!admin.tableExists(mappedSystemTable)) {
- logger.info("Migrating SYSTEM.CATALOG table to SYSTEM namespace.");
+ LOGGER.info("Migrating SYSTEM.CATALOG table to SYSTEM namespace.");
// Actual migration of SYSCAT table
UpgradeUtil.mapTableToNamespace(admin, metatable,
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME, this.getProps(), null, PTableType.SYSTEM,
@@ -3936,7 +3949,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_HBASE_TABLE_NAME);
}
for (TableName table : tableNames) {
- logger.info(String.format("Migrating %s table to SYSTEM namespace.", table.getNameAsString()));
+ LOGGER.info(String.format("Migrating %s table to SYSTEM namespace.", table.getNameAsString()));
UpgradeUtil.mapTableToNamespace(admin, metatable, table.getNameAsString(), this.getProps(), null, PTableType.SYSTEM,
null);
ConnectionQueryServicesImpl.this.removeTable(null, table.getNameAsString(), null,
@@ -4004,10 +4017,10 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
+ tableName + " columnName : " + columnName + " familyName : "
+ familyName;
if (!checkAndPut) {
- logger.error(processName + " failed to acquire mutex for "+ msg);
+ LOGGER.error(processName + " failed to acquire mutex for "+ msg);
}
else {
- logger.debug(processName + " acquired mutex for "+ msg);
+ LOGGER.debug(processName + " acquired mutex for "+ msg);
}
return checkAndPut;
}
@@ -4045,7 +4058,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
" tenantId : " + tenantId + " schemaName : " + schemaName + " tableName : "
+ tableName + " columnName : " + columnName + " familyName : "
+ familyName;
- logger.debug(processName + " released mutex for "+ msg);
+ LOGGER.debug(processName + " released mutex for "+ msg);
}
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
@@ -4100,7 +4113,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
" B.COLUMN_FAMILY IS NOT NULL AND\n" +
" A.IMMUTABLE_ROWS = TRUE");
} catch (SQLException e) {
- logger.warn("exception during upgrading stats table:" + e);
+ LOGGER.warn("exception during upgrading stats table:" + e);
sqlE = e;
} finally {
try {
@@ -4141,7 +4154,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
"UPSERT INTO SYSTEM.CATALOG(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, DISABLE_WAL)\n" +
"VALUES (NULL, '" + QueryConstants.SYSTEM_SCHEMA_NAME + "','" + PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE + "', NULL, NULL, FALSE)");
} catch (SQLException e) {
- logger.warn("exception during upgrading stats table:" + e);
+ LOGGER.warn("exception during upgrading stats table:" + e);
sqlE = e;
} finally {
try {
@@ -4176,7 +4189,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
+ "' AND " + PhoenixDatabaseMetaData.TABLE_SCHEM + "='"
+ PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME + "'");
} catch (SQLException e) {
- logger.warn("exception during upgrading stats table:" + e);
+ LOGGER.warn("exception during upgrading stats table:" + e);
sqlE = e;
} finally {
try {
@@ -5000,7 +5013,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
// add it back at the tail
scannerQueue.offer(new WeakReference<TableResultIterator>(
scanningItr));
- logger.info("Lease renewed for scanner: " + scanningItr);
+ LOGGER.info("Lease renewed for scanner: " + scanningItr);
break;
// Scanner not initialized probably because next() hasn't been called on it yet. Enqueue it back to attempt lease renewal later.
case UNINITIALIZED:
@@ -5022,7 +5035,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
numScanners--;
}
if (renewed > 0) {
- logger.info("Renewed leases for " + renewed + " scanner/s in "
+ LOGGER.info("Renewed leases for " + renewed + " scanner/s in "
+ (System.currentTimeMillis() - start) + " ms ");
}
connectionsQueue.offer(connRef);
@@ -5030,7 +5043,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
numConnections--;
}
} catch (InternalRenewLeaseTaskException e) {
- logger.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
+ LOGGER.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
// clear up the queue since the task is about to be unscheduled.
connectionsQueue.clear();
// throw an exception since we want the task execution to be suppressed because we just encountered an
@@ -5038,13 +5051,13 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
throw new RuntimeException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt(); // restore the interrupt status
- logger.error("Thread interrupted when renewing lease.", e);
+ LOGGER.error("Thread interrupted when renewing lease.", e);
} catch (Exception e) {
- logger.error("Exception thrown when renewing lease ", e);
+ LOGGER.error("Exception thrown when renewing lease ", e);
// don't drain the queue and swallow the exception in this case since we don't want the task
// execution to be suppressed because renewing lease of a scanner failed.
} catch (Throwable e) {
- logger.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
+ LOGGER.error("Exception thrown when renewing lease. Draining the queue of scanners ", e);
connectionsQueue.clear(); // clear up the queue since the task is about to be unscheduled.
throw new RuntimeException(e);
}
@@ -5167,7 +5180,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
try (HBaseAdmin admin = getAdmin()) {
final String quorum = ZKConfig.getZKQuorumServersString(config);
final String znode = this.props.get(HConstants.ZOOKEEPER_ZNODE_PARENT);
- logger.debug("Found quorum: " + quorum + ":" + znode);
+ LOGGER.debug("Found quorum: " + quorum + ":" + znode);
boolean nameSpaceExists = true;
try {
admin.getNamespaceDescriptor(schemaName);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java b/phoenix-core/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java
index 4f94334..2bae7d4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/PhoenixStatsCacheLoader.java
@@ -35,7 +35,7 @@ import java.util.concurrent.Executors;
* {@link CacheLoader} asynchronous implementation for the Phoenix Table Stats cache.
*/
public class PhoenixStatsCacheLoader extends CacheLoader<GuidePostsKey, GuidePostsInfo> {
- private static final Logger logger = LoggerFactory.getLogger(PhoenixStatsCacheLoader.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixStatsCacheLoader.class);
final private PhoenixStatsLoader statsLoader;
private static volatile ExecutorService executor;
@@ -75,7 +75,7 @@ public class PhoenixStatsCacheLoader extends CacheLoader<GuidePostsKey, GuidePos
try {
return statsLoader.loadStats(key, prevGuidepostInfo);
} catch (Exception e) {
- logger.warn("Unable to load stats from table: " + key.toString(), e);
+ LOGGER.warn("Unable to load stats from table: " + key.toString(), e);
return prevGuidepostInfo;
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index a3c5224..b796f51 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -260,7 +260,7 @@ import com.google.common.collect.Sets;
import com.google.common.primitives.Ints;
public class MetaDataClient {
- private static final Logger logger = LoggerFactory.getLogger(MetaDataClient.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataClient.class);
private static final ParseNodeFactory FACTORY = new ParseNodeFactory();
private static final String SET_ASYNC_CREATED_DATE =
@@ -1729,7 +1729,7 @@ public class MetaDataClient {
return new MutationState(0, 0, connection);
}
- if (logger.isInfoEnabled()) logger.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp());
+ if (LOGGER.isInfoEnabled()) LOGGER.info("Created index " + table.getName().getString() + " at " + table.getTimeStamp());
boolean asyncIndexBuildEnabled = connection.getQueryServices().getProps().getBoolean(
QueryServices.INDEX_ASYNC_BUILD_ENABLED,
QueryServicesOptions.DEFAULT_INDEX_ASYNC_BUILD_ENABLED);
@@ -3334,8 +3334,8 @@ public class MetaDataClient {
break;
case CONCURRENT_TABLE_MUTATION:
addTableToCache(result);
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("CONCURRENT_TABLE_MUTATION for table " + SchemaUtil.getTableName(schemaName, tableName), connection));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations("CONCURRENT_TABLE_MUTATION for table " + SchemaUtil.getTableName(schemaName, tableName), connection));
}
throw new ConcurrentTableMutationException(schemaName, tableName);
case NEWER_TABLE_FOUND:
@@ -3586,8 +3586,8 @@ public class MetaDataClient {
int nNewColumns = numCols;
List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((1 + nNewColumns) * (nIndexes + 1));
List<Mutation> columnMetaData = Lists.newArrayListWithExpectedSize(nNewColumns * (nIndexes + 1));
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations("Resolved table to " + table.getName().getString() + " with seqNum " + table.getSequenceNumber() + " at timestamp " + table.getTimeStamp() + " with " + table.getColumns().size() + " columns: " + table.getColumns(), connection));
}
int position = table.getColumns().size();
@@ -3926,8 +3926,8 @@ public class MetaDataClient {
if (retried) {
throw e;
}
- if (logger.isDebugEnabled()) {
- logger.debug(LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", connection));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug(LogUtil.addCustomAnnotations("Caught ConcurrentTableMutationException for table " + SchemaUtil.getTableName(schemaName, tableName) + ". Will try again...", connection));
}
retried = true;
}
@@ -4588,7 +4588,7 @@ public class MetaDataClient {
*/
public MutationState changePermissions(ChangePermsStatement changePermsStatement) throws SQLException {
- logger.info(changePermsStatement.toString());
+ LOGGER.info(changePermsStatement.toString());
try(HBaseAdmin admin = connection.getQueryServices().getAdmin()) {
ClusterConnection clusterConnection = (ClusterConnection) admin.getConnection();
@@ -4659,7 +4659,7 @@ public class MetaDataClient {
inconsistentTables.add(indexTable);
continue;
}
- logger.info("Updating permissions for Index Table: " +
+ LOGGER.info("Updating permissions for Index Table: " +
indexTable.getName() + " Base Table: " + inputTable.getName());
tableName = SchemaUtil.getPhysicalTableName(indexTable.getPhysicalName().getBytes(), indexTable.isNamespaceMapped());
changePermsOnTable(clusterConnection, changePermsStatement, tableName);
@@ -4667,7 +4667,7 @@ public class MetaDataClient {
if(schemaInconsistency) {
for(PTable table : inconsistentTables) {
- logger.error("Fail to propagate permissions to Index Table: " + table.getName());
+ LOGGER.error("Fail to propagate permissions to Index Table: " + table.getName());
}
throw new TablesNotInSyncException(inputTable.getTableName().getString(),
inconsistentTables.get(0).getTableName().getString(), "Namespace properties");
@@ -4678,13 +4678,13 @@ public class MetaDataClient {
tableName = org.apache.hadoop.hbase.TableName.valueOf(viewIndexTableBytes);
boolean viewIndexTableExists = admin.tableExists(tableName);
if(viewIndexTableExists) {
- logger.info("Updating permissions for View Index Table: " +
+ LOGGER.info("Updating permissions for View Index Table: " +
Bytes.toString(viewIndexTableBytes) + " Base Table: " + inputTable.getName());
changePermsOnTable(clusterConnection, changePermsStatement, tableName);
} else {
if(inputTable.isMultiTenant()) {
- logger.error("View Index Table not found for MultiTenant Table: " + inputTable.getName());
- logger.error("Fail to propagate permissions to view Index Table: " + tableName.getNameAsString());
+ LOGGER.error("View Index Table not found for MultiTenant Table: " + inputTable.getName());
+ LOGGER.error("Fail to propagate permissions to view Index Table: " + tableName.getNameAsString());
throw new TablesNotInSyncException(inputTable.getTableName().getString(),
Bytes.toString(viewIndexTableBytes), " View Index table should exist for MultiTenant tables");
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index 5d5ab6c..8d40af3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -66,7 +66,8 @@ import com.google.common.collect.Maps;
*/
public class DefaultStatisticsCollector implements StatisticsCollector {
- private static final Logger LOGGER = LoggerFactory.getLogger(DefaultStatisticsCollector.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(DefaultStatisticsCollector.class);
final Map<ImmutableBytesPtr, Pair<Long, GuidePostsInfoBuilder>> guidePostsInfoWriterMap = Maps.newHashMap();
private final Table htable;
@@ -156,7 +157,8 @@ public class DefaultStatisticsCollector implements StatisticsCollector {
QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB,
QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES),
region.getTableDesc());
- LOGGER.info("Guide post depth determined from global configuration: " + guidePostDepth);
+ LOGGER.info("Guide post depth determined from global configuration: " +
+ guidePostDepth);
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index 1e4df2c..8e7f3b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -150,24 +150,28 @@ public class StatisticsScanner implements InternalScanner {
ArrayList<Mutation> mutations = new ArrayList<Mutation>();
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Deleting the stats for the region " + regionInfo.getRegionNameAsString()
+ LOGGER.debug("Deleting the stats for the region "
+ + regionInfo.getRegionNameAsString()
+ " as part of major compaction");
}
getStatisticsWriter().deleteStatsForRegion(region, tracker, family, mutations);
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Adding new stats for the region " + regionInfo.getRegionNameAsString()
+ LOGGER.debug("Adding new stats for the region " +
+ regionInfo.getRegionNameAsString()
+ " as part of major compaction");
}
getStatisticsWriter().addStats(tracker, family,
mutations, tracker.getGuidePostDepth());
if (LOGGER.isDebugEnabled()) {
- LOGGER.debug("Committing new stats for the region " + regionInfo.getRegionNameAsString()
+ LOGGER.debug("Committing new stats for the region " +
+ regionInfo.getRegionNameAsString()
+ " as part of major compaction");
}
getStatisticsWriter().commitStats(mutations, tracker);
} catch (IOException e) {
if (getRegionServerServices().isStopping() || getRegionServerServices().isStopped()) {
- LOGGER.debug("Ignoring error updating statistics because region is closing/closed");
+ LOGGER.debug(
+ "Ignoring error updating statistics because region is closing/closed");
} else {
LOGGER.error("Failed to update statistics table!", e);
toThrow = e;
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
index 88cc642..5d8d844 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -222,8 +222,8 @@ public class TraceReader {
}
}
if (cols.size() < count) {
- LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count + ", but only got " + cols.size()
- + " tags from rquest " + request));
+ LOGGER.error(addCustomAnnotations("Missing tags! Expected " + count +
+ ", but only got " + cols.size() + " tags from rquest " + request));
}
return cols;
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
index 1147e07..2aea629 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/OmidTransactionContext.java
@@ -46,7 +46,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
public class OmidTransactionContext implements PhoenixTransactionContext {
- private static final Logger logger = LoggerFactory.getLogger(OmidTransactionContext.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(OmidTransactionContext.class);
private HBaseTransactionManager tm;
private HBaseTransaction tx;
@@ -166,8 +166,8 @@ public class OmidTransactionContext implements PhoenixTransactionContext {
try {
tx = (HBaseTransaction) tm.fence(dataTable.getName().getBytes());
- if (logger.isInfoEnabled()) {
- logger.info("Added write fence at ~"
+ if (LOGGER.isInfoEnabled()) {
+ LOGGER.info("Added write fence at ~"
+ tx.getReadTimestamp());
}
} catch (TransactionException e) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
index 18a05ce..f63c492 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/transaction/TephraTransactionContext.java
@@ -64,7 +64,7 @@ import org.slf4j.LoggerFactory;
import com.google.common.collect.Lists;
public class TephraTransactionContext implements PhoenixTransactionContext {
- private static final Logger logger = LoggerFactory.getLogger(TephraTransactionContext.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(TephraTransactionContext.class);
private static final TransactionCodec CODEC = new TransactionCodec();
private final List<TransactionAware> txAwares;
@@ -208,8 +208,8 @@ public class TephraTransactionContext implements PhoenixTransactionContext {
txServiceClient);
fenceWait.await(10000, TimeUnit.MILLISECONDS);
- if (logger.isInfoEnabled()) {
- logger.info("Added write fence at ~"
+ if (LOGGER.isInfoEnabled()) {
+ LOGGER.info("Added write fence at ~"
+ getCurrentTransaction().getReadPointer());
}
} catch (InterruptedException e) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
index d042fac..9cadab9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/EquiDepthStreamHistogram.java
@@ -176,7 +176,8 @@ public class EquiDepthStreamHistogram {
smallerBar.incrementCount(countToDistribute);
}
if (LOGGER.isTraceEnabled()) {
- LOGGER.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s", origBar, newLeft, newRight));
+ LOGGER.trace(String.format("Split orig=%s , newLeft=%s , newRight=%s",
+ origBar, newLeft, newRight));
}
bars.remove(origBar);
bars.add(newLeft);
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index d67ee39..3e41997 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -91,7 +91,7 @@ import com.google.protobuf.ServiceException;
public class MetaDataUtil {
- private static final Logger logger = LoggerFactory.getLogger(MetaDataUtil.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(MetaDataUtil.class);
public static final String VIEW_INDEX_TABLE_PREFIX = "_IDX_";
public static final String LOCAL_INDEX_TABLE_PREFIX = "_LOCAL_IDX_";
@@ -782,15 +782,15 @@ public class MetaDataUtil {
admin.getRegionInfo(null, request);
} catch (ServiceException e) {
IOException ie = ProtobufUtil.getRemoteException(e);
- logger.debug("Region " + loc.getRegionInfo().getEncodedName() + " isn't online due to:" + ie);
+ LOGGER.debug("Region " + loc.getRegionInfo().getEncodedName() + " isn't online due to:" + ie);
return false;
} catch (RemoteException e) {
- logger.debug("Cannot get region " + loc.getRegionInfo().getEncodedName() + " info due to error:" + e);
+ LOGGER.debug("Cannot get region " + loc.getRegionInfo().getEncodedName() + " info due to error:" + e);
return false;
}
}
} catch (IOException ex) {
- logger.warn("tableRegionsOnline failed due to:" + ex);
+ LOGGER.warn("tableRegionsOnline failed due to:" + ex);
return false;
} finally {
if (hcon != null) {
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
index d6950a2..fe5d045 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ReadOnlyProps.java
@@ -43,7 +43,7 @@ import com.google.common.collect.Maps;
*
*/
public class ReadOnlyProps implements Iterable<Entry<String, String>> {
- private static final Logger logger = LoggerFactory.getLogger(ReadOnlyProps.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ReadOnlyProps.class);
public static final ReadOnlyProps EMPTY_PROPS = new ReadOnlyProps();
@Nonnull
private final Map<String, String> props;
@@ -314,7 +314,7 @@ public class ReadOnlyProps implements Iterable<Entry<String, String>> {
String value = entry.getValue().toString();
String oldValue = props.get(key);
if (!Objects.equal(oldValue, value)) {
- if (logger.isDebugEnabled()) logger.debug("Creating new ReadOnlyProps due to " + key + " with " + oldValue + "!=" + value);
+ if (LOGGER.isDebugEnabled()) LOGGER.debug("Creating new ReadOnlyProps due to " + key + " with " + oldValue + "!=" + value);
return new ReadOnlyProps(this, overrides);
}
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index 339fa7a..b79bfe6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -133,7 +133,7 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
public class UpgradeUtil {
- private static final Logger logger = LoggerFactory.getLogger(UpgradeUtil.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(UpgradeUtil.class);
private static final byte[] SEQ_PREFIX_BYTES = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("_SEQ_"));
public static final byte[] UPGRADE_TO_4_7_COLUMN_NAME = Bytes.toBytes("UPGRADE_TO_4_7");
/**
@@ -257,17 +257,17 @@ public class UpgradeUtil {
}
}
if (sizeBytes >= batchSizeBytes) {
- logger.info("Committing bactch of temp rows");
+ LOGGER.info("Committing bactch of temp rows");
target.batch(mutations);
mutations.clear();
sizeBytes = 0;
}
}
if (!mutations.isEmpty()) {
- logger.info("Committing last bactch of temp rows");
+ LOGGER.info("Committing last bactch of temp rows");
target.batch(mutations);
}
- logger.info("Successfully completed copy");
+ LOGGER.info("Successfully completed copy");
} catch (SQLException e) {
throw e;
} catch (Exception e) {
@@ -279,12 +279,12 @@ public class UpgradeUtil {
try {
if (source != null) source.close();
} catch (IOException e) {
- logger.warn("Exception during close of source table",e);
+ LOGGER.warn("Exception during close of source table",e);
} finally {
try {
if (target != null) target.close();
} catch (IOException e) {
- logger.warn("Exception during close of target table",e);
+ LOGGER.warn("Exception during close of target table",e);
}
}
}
@@ -299,7 +299,7 @@ public class UpgradeUtil {
if (nSaltBuckets <= 0) {
return;
}
- logger.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window.");
+ LOGGER.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window.");
HTableDescriptor desc = admin.getTableDescriptor(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
createSequenceSnapshot(admin, conn);
snapshotCreated = true;
@@ -309,7 +309,7 @@ public class UpgradeUtil {
admin.createTable(desc, splitPoints);
restoreSequenceSnapshot(admin, conn);
success = true;
- logger.warn("Completed pre-splitting SYSTEM.SEQUENCE table");
+ LOGGER.warn("Completed pre-splitting SYSTEM.SEQUENCE table");
} catch (IOException e) {
throw new SQLException("Unable to pre-split SYSTEM.SEQUENCE table", e);
} finally {
@@ -318,14 +318,14 @@ public class UpgradeUtil {
try {
deleteSequenceSnapshot(admin);
} catch (SQLException e) {
- logger.warn("Exception while deleting SYSTEM.SEQUENCE snapshot during pre-split", e);
+ LOGGER.warn("Exception while deleting SYSTEM.SEQUENCE snapshot during pre-split", e);
}
}
} finally {
try {
admin.close();
} catch (IOException e) {
- logger.warn("Exception while closing admin during pre-split", e);
+ LOGGER.warn("Exception while closing admin during pre-split", e);
}
}
}
@@ -447,8 +447,8 @@ public class UpgradeUtil {
createIndex.append(")");
}
createIndex.append(" ASYNC");
- logger.info("Index creation query is : " + createIndex.toString());
- logger.info("Dropping the index " + indexTableName
+ LOGGER.info("Index creation query is : " + createIndex.toString());
+ LOGGER.info("Dropping the index " + indexTableName
+ " to clean up the index details from SYSTEM.CATALOG.");
PhoenixConnection localConnection = null;
if (tenantId != null) {
@@ -459,9 +459,9 @@ public class UpgradeUtil {
(localConnection == null ? globalConnection : localConnection).createStatement().execute(
"DROP INDEX IF EXISTS " + indexTableName + " ON "
+ SchemaUtil.getTableName(schemaName, dataTableName));
- logger.info("Recreating the index " + indexTableName);
+ LOGGER.info("Recreating the index " + indexTableName);
(localConnection == null ? globalConnection : localConnection).createStatement().execute(createIndex.toString());
- logger.info("Created the index " + indexTableName);
+ LOGGER.info("Created the index " + indexTableName);
} finally {
props.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
if (localConnection != null) {
@@ -650,12 +650,12 @@ public class UpgradeUtil {
}
@SuppressWarnings("deprecation")
public static boolean upgradeSequenceTable(PhoenixConnection conn, int nSaltBuckets, PTable oldTable) throws SQLException {
- logger.info("Upgrading SYSTEM.SEQUENCE table");
+ LOGGER.info("Upgrading SYSTEM.SEQUENCE table");
byte[] seqTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE);
Table sysTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
try {
- logger.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
+ LOGGER.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
KeyValue saltKV = KeyValueUtil.newKeyValue(seqTableKey,
PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES,
@@ -691,7 +691,7 @@ public class UpgradeUtil {
return true;
}
}
- logger.info("SYSTEM.SEQUENCE table has already been upgraded");
+ LOGGER.info("SYSTEM.SEQUENCE table has already been upgraded");
return false;
}
@@ -709,7 +709,7 @@ public class UpgradeUtil {
Table seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
try {
boolean committed = false;
- logger.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
+ LOGGER.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
ResultScanner scanner = seqTable.getScanner(scan);
try {
Result result;
@@ -744,7 +744,7 @@ public class UpgradeUtil {
}
}
if (sizeBytes >= batchSizeBytes) {
- logger.info("Committing bactch of SYSTEM.SEQUENCE rows");
+ LOGGER.info("Committing bactch of SYSTEM.SEQUENCE rows");
seqTable.batch(mutations);
mutations.clear();
sizeBytes = 0;
@@ -753,11 +753,11 @@ public class UpgradeUtil {
}
}
if (!mutations.isEmpty()) {
- logger.info("Committing last bactch of SYSTEM.SEQUENCE rows");
+ LOGGER.info("Committing last bactch of SYSTEM.SEQUENCE rows");
seqTable.batch(mutations);
}
preSplitSequenceTable(conn, nSaltBuckets);
- logger.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
+ LOGGER.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
success = true;
return true;
} catch (InterruptedException e) {
@@ -780,10 +780,10 @@ public class UpgradeUtil {
sysTable.put(unsaltPut);
success = true;
} finally {
- if (!success) logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
+ if (!success) LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
}
} else { // We're screwed b/c we've already committed some salted sequences...
- logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
+ LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
}
}
}
@@ -794,7 +794,7 @@ public class UpgradeUtil {
try {
seqTable.close();
} catch (IOException e) {
- logger.warn("Exception during close",e);
+ LOGGER.warn("Exception during close",e);
}
}
}
@@ -805,7 +805,7 @@ public class UpgradeUtil {
try {
sysTable.close();
} catch (IOException e) {
- logger.warn("Exception during close",e);
+ LOGGER.warn("Exception during close",e);
}
}
@@ -857,7 +857,7 @@ public class UpgradeUtil {
try {
// Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG
metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
- logger.info("Upgrading metadata to support adding columns to tables with views");
+ LOGGER.info("Upgrading metadata to support adding columns to tables with views");
String getBaseTableAndViews = "SELECT "
+ COLUMN_FAMILY + " AS BASE_PHYSICAL_TABLE, "
+ TENANT_ID + ", "
@@ -1093,7 +1093,7 @@ public class UpgradeUtil {
try {
// Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG
metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
- logger.info("Upgrading metadata to add parent to child links for views");
+ LOGGER.info("Upgrading metadata to add parent to child links for views");
metaConnection.commit();
// physical table
// |
@@ -1166,7 +1166,7 @@ public class UpgradeUtil {
try {
// Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG
metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
- logger.info("Upgrading metadata to add parent to child links for views");
+ LOGGER.info("Upgrading metadata to add parent to child links for views");
metaConnection.commit();
String createChildLink = "UPSERT INTO SYSTEM.CHILD_LINK(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE) " +
"SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, LINK_TYPE " +
@@ -1189,7 +1189,7 @@ public class UpgradeUtil {
// Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG
try (PhoenixConnection queryConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
PhoenixConnection upsertConn = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP)) {
- logger.info("Upgrading metadata to add parent links for indexes on views");
+ LOGGER.info("Upgrading metadata to add parent links for indexes on views");
String indexQuery = "SELECT TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY FROM SYSTEM.CATALOG WHERE LINK_TYPE = "
+ LinkType.INDEX_TABLE.getSerializedValue();
String createViewIndexLink = "UPSERT INTO SYSTEM.CATALOG (TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_FAMILY, LINK_TYPE) VALUES (?,?,?,?,?) ";
@@ -1368,7 +1368,7 @@ public class UpgradeUtil {
view = PhoenixRuntime.getTable(newConn, viewName);
} catch (TableNotFoundException e) {
// Ignore
- logger.warn("Error getting PTable for view: " + viewName);
+ LOGGER.warn("Error getting PTable for view: " + viewName);
continue;
}
syncUpdateCacheFreqForIndexesOfTable(view, stmt);
@@ -1406,7 +1406,7 @@ public class UpgradeUtil {
table = PhoenixRuntime.getTable(conn, null, fullTableName);
} catch (TableNotFoundException e) {
// Ignore tables not mapped to a Phoenix Table
- logger.warn("Error getting PTable for HBase table: " + fullTableName);
+ LOGGER.warn("Error getting PTable for HBase table: " + fullTableName);
continue;
}
if (table.getType() == PTableType.INDEX) {
@@ -1719,7 +1719,7 @@ public class UpgradeUtil {
if (isTable && !bypassUpgrade) {
String msg = "Taking snapshot of physical table " + physicalName + " prior to upgrade...";
System.out.println(msg);
- logger.info(msg);
+ LOGGER.info(msg);
admin.disableTable(physicalName);
admin.snapshot(snapshotName, physicalName);
admin.enableTable(physicalName);
@@ -1734,7 +1734,7 @@ public class UpgradeUtil {
}
String msg = "Starting upgrade of " + escapedTableName + tenantInfo + "...";
System.out.println(msg);
- logger.info(msg);
+ LOGGER.info(msg);
ResultSet rs;
if (!bypassUpgrade) {
rs = upgradeConn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + escapedTableName);
@@ -1788,9 +1788,9 @@ public class UpgradeUtil {
success = true;
msg = "Completed upgrade of " + escapedTableName + tenantInfo;
System.out.println(msg);
- logger.info(msg);
+ LOGGER.info(msg);
} catch (Exception e) {
- logger.error("Exception during upgrade of " + physicalName + ":", e);
+ LOGGER.error("Exception during upgrade of " + physicalName + ":", e);
} finally {
boolean restored = false;
try {
@@ -1800,25 +1800,25 @@ public class UpgradeUtil {
admin.enableTable(physicalName);
String msg = "Restored snapshot of " + physicalName + " due to failure of upgrade";
System.out.println(msg);
- logger.info(msg);
+ LOGGER.info(msg);
}
restored = true;
} catch (Exception e) {
- logger.warn("Unable to restoring snapshot " + snapshotName + " after failed upgrade", e);
+ LOGGER.warn("Unable to restoring snapshot " + snapshotName + " after failed upgrade", e);
} finally {
try {
if (restoreSnapshot && restored) {
admin.deleteSnapshot(snapshotName);
}
} catch (Exception e) {
- logger.warn("Unable to delete snapshot " + snapshotName + " after upgrade:", e);
+ LOGGER.warn("Unable to delete snapshot " + snapshotName + " after upgrade:", e);
} finally {
try {
if (admin != null) {
admin.close();
}
} catch (IOException e) {
- logger.warn("Unable to close admin after upgrade:", e);
+ LOGGER.warn("Unable to close admin after upgrade:", e);
}
}
}
@@ -2030,7 +2030,7 @@ public class UpgradeUtil {
}
if (ts != null) {
// Update flag to represent table is mapped to namespace
- logger.info(String.format("Updating meta information of phoenix table '%s' to map to namespace..",
+ LOGGER.info(String.format("Updating meta information of phoenix table '%s' to map to namespace..",
phoenixTableName));
Put put = new Put(tableKey, ts);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES,
@@ -2047,19 +2047,19 @@ public class UpgradeUtil {
boolean destTableExists=admin.tableExists(destTableName);
if (!destTableExists) {
String snapshotName = QueryConstants.UPGRADE_TABLE_SNAPSHOT_PREFIX + srcTableName;
- logger.info("Disabling table " + srcTableName + " ..");
+ LOGGER.info("Disabling table " + srcTableName + " ..");
admin.disableTable(srcTableName);
- logger.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
+ LOGGER.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
admin.snapshot(snapshotName, srcTableName);
- logger.info(
+ LOGGER.info(
String.format("Restoring snapshot %s in destination table %s..", snapshotName, destTableName));
admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(destTableName));
- logger.info(String.format("deleting old table %s..", srcTableName));
+ LOGGER.info(String.format("deleting old table %s..", srcTableName));
admin.deleteTable(srcTableName);
- logger.info(String.format("deleting snapshot %s..", snapshotName));
+ LOGGER.info(String.format("deleting snapshot %s..", snapshotName));
admin.deleteSnapshot(snapshotName);
} else {
- logger.info(String.format("Destination Table %s already exists. No migration needed.", destTableName));
+ LOGGER.info(String.format("Destination Table %s already exists. No migration needed.", destTableName));
}
}
}
@@ -2101,15 +2101,15 @@ public class UpgradeUtil {
if (table.isNamespaceMapped()) { throw new IllegalArgumentException("Table is already upgraded"); }
if (!schemaName.equals("")) {
- logger.info(String.format("Creating schema %s..", schemaName));
+ LOGGER.info(String.format("Creating schema %s..", schemaName));
conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName);
}
String oldPhysicalName = table.getPhysicalName().getString();
String newPhysicalTablename = SchemaUtil.normalizeIdentifier(
SchemaUtil.getPhysicalTableName(oldPhysicalName, readOnlyProps).getNameAsString());
- logger.info(String.format("Upgrading %s %s..", table.getType(), fullTableName));
- logger.info(String.format("oldPhysicalName %s newPhysicalTablename %s..", oldPhysicalName, newPhysicalTablename));
- logger.info(String.format("teanantId %s..", conn.getTenantId()));
+ LOGGER.info(String.format("Upgrading %s %s..", table.getType(), fullTableName));
+ LOGGER.info(String.format("oldPhysicalName %s newPhysicalTablename %s..", oldPhysicalName, newPhysicalTablename));
+ LOGGER.info(String.format("teanantId %s..", conn.getTenantId()));
TableViewFinderResult childViewsResult = new TableViewFinderResult();
try (Table childLinkTable =
@@ -2150,12 +2150,14 @@ public class UpgradeUtil {
boolean updateLink = true;
if (srcTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)) {
// Skip already migrated
- logger.info(String.format("skipping as it seems index '%s' is already upgraded..",
+ LOGGER.info(String.format(
+ "skipping as it seems index '%s' is already upgraded..",
index.getName()));
continue;
}
if (MetaDataUtil.isLocalIndex(srcTableName)) {
- logger.info(String.format("local index '%s' found with physical hbase table name ''..",
+ LOGGER.info(String.format(
+ "local index '%s' found with physical hbase table name ''..",
index.getName(), srcTableName));
destTableName = Bytes
.toString(MetaDataUtil.getLocalIndexPhysicalName(newPhysicalTablename.getBytes()));
@@ -2164,18 +2166,20 @@ public class UpgradeUtil {
.execute(String.format("ALTER TABLE %s set " + MetaDataUtil.PARENT_TABLE_KEY + "='%s'",
phoenixTableName, table.getPhysicalName()));
} else if (MetaDataUtil.isViewIndex(srcTableName)) {
- logger.info(String.format("View index '%s' found with physical hbase table name ''..",
+ LOGGER.info(String.format(
+ "View index '%s' found with physical hbase table name ''..",
index.getName(), srcTableName));
destTableName = Bytes
.toString(MetaDataUtil.getViewIndexPhysicalName(newPhysicalTablename.getBytes()));
} else {
- logger.info(String.format("Global index '%s' found with physical hbase table name ''..",
+ LOGGER.info(String.format(
+ "Global index '%s' found with physical hbase table name ''..",
index.getName(), srcTableName));
destTableName = SchemaUtil
.getPhysicalTableName(index.getPhysicalName().getString(), readOnlyProps)
.getNameAsString();
}
- logger.info(String.format("Upgrading index %s..", index.getName()));
+ LOGGER.info(String.format("Upgrading index %s..", index.getName()));
if (!(table.getType() == PTableType.VIEW && !MetaDataUtil.isViewIndex(srcTableName)
&& IndexType.LOCAL != index.getIndexType())) {
mapTableToNamespace(admin, metatable, srcTableName, destTableName, readOnlyProps,
@@ -2183,7 +2187,7 @@ public class UpgradeUtil {
conn.getTenantId());
}
if (updateLink) {
- logger.info(String.format("Updating link information for index '%s' ..", index.getName()));
+ LOGGER.info(String.format("Updating link information for index '%s' ..", index.getName()));
updateLink(conn, srcTableName, destTableName,index.getSchemaName(),index.getTableName());
conn.commit();
}
@@ -2198,14 +2202,14 @@ public class UpgradeUtil {
throw new RuntimeException("Error: problem occured during upgrade. Table is not upgraded successfully");
}
if (table.getType() == PTableType.VIEW) {
- logger.info(String.format("Updating link information for view '%s' ..", table.getTableName()));
+ LOGGER.info(String.format("Updating link information for view '%s' ..", table.getTableName()));
updateLink(conn, oldPhysicalName, newPhysicalTablename,table.getSchemaName(),table.getTableName());
conn.commit();
// if the view is a first level child, then we need to create the PARENT_TABLE link
// that was overwritten by the PHYSICAL_TABLE link
if (table.getParentName().equals(table.getPhysicalName())) {
- logger.info(String.format("Creating PARENT link for view '%s' ..", table.getTableName()));
+ LOGGER.info(String.format("Creating PARENT link for view '%s' ..", table.getTableName()));
// Add row linking view to its parent
PreparedStatement linkStatement = conn.prepareStatement(MetaDataClient.CREATE_VIEW_LINK);
linkStatement.setString(1, Bytes.toStringBinary(tenantIdBytes));
@@ -2299,7 +2303,7 @@ public class UpgradeUtil {
conn.close();
conn = DriverManager.getConnection(connUrl, props).unwrap(PhoenixConnection.class);
}
- logger.info(String.format("Upgrading view %s for tenantId %s..", viewName,tenantId));
+ LOGGER.info(String.format("Upgrading view %s for tenantId %s..", viewName,tenantId));
UpgradeUtil.upgradeTable(conn, viewName);
prevTenantId = tenantId;
}
@@ -2362,7 +2366,7 @@ public class UpgradeUtil {
currentValueArray, new SQLException[1]);
if (sqlExceptions[0] != null) {
- logger.error("Unable to convert view index sequence because of error. " +
+ LOGGER.error("Unable to convert view index sequence because of error. " +
"It will need to be converted manually, " +
" or there's a risk that two view indexes of the same base table " +
"will have colliding view index ids.", sqlExceptions[0]);
@@ -2398,7 +2402,7 @@ public class UpgradeUtil {
false, EnvironmentEdgeManager.currentTimeMillis());
}
} catch(SequenceAlreadyExistsException sae) {
- logger.info("Tried to create view index sequence "
+ LOGGER.info("Tried to create view index sequence "
+ SchemaUtil.getTableName(sae.getSchemaName(), sae.getSequenceName()) +
" during upgrade but it already existed. This is probably fine.");
}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
index 4929993..e730813 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/json/JsonUpsertExecutor.java
@@ -109,7 +109,8 @@ public class JsonUpsertExecutor extends UpsertExecutor<Map<?, ?>, Object> {
if (LOGGER.isDebugEnabled()) {
// Even though this is an error we only log it with debug logging because we're notifying the
// listener, and it can do its own logging if needed
- LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex + ", colName " + colName, e);
+ LOGGER.debug("Error on record " + record + ", fieldIndex " + fieldIndex +
+ ", colName " + colName, e);
}
upsertListener.errorOnRecord(record, new Exception("fieldIndex: " + fieldIndex + ", colName " + colName, e));
}
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index e852c16..879f09e 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -184,7 +184,7 @@ public abstract class BaseTest {
public static final String DRIVER_CLASS_NAME_ATTRIB = "phoenix.driver.class.name";
private static final double ZERO = 1e-9;
private static final Map<String,String> tableDDLMap;
- private static final Logger logger = LoggerFactory.getLogger(BaseTest.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(BaseTest.class);
@ClassRule
public static TemporaryFolder tmpFolder = new TemporaryFolder();
private static final int dropTableTimeout = 300; // 5 mins should be long enough.
@@ -457,7 +457,7 @@ public abstract class BaseTest {
try {
assertTrue(destroyDriver(driver));
} catch (Throwable t) {
- logger.error("Exception caught when destroying phoenix test driver", t);
+ LOGGER.error("Exception caught when destroying phoenix test driver", t);
} finally {
driver = null;
}
@@ -487,18 +487,18 @@ public abstract class BaseTest {
try {
u.shutdownMiniMapReduceCluster();
} catch (Throwable t) {
- logger.error(
+ LOGGER.error(
"Exception caught when shutting down mini map reduce cluster", t);
} finally {
try {
u.shutdownMiniCluster();
} catch (Throwable t) {
- logger.error("Exception caught when shutting down mini cluster", t);
+ LOGGER.error("Exception caught when shutting down mini cluster", t);
} finally {
try {
ConnectionFactory.shutdown();
} finally {
- logger.info(
+ LOGGER.info(
"Time in seconds spent in shutting down mini cluster with "
+ numTables + " tables: "
+ (System.currentTimeMillis() - startTime) / 1000);
@@ -677,7 +677,7 @@ public abstract class BaseTest {
DriverManager.deregisterDriver(driver);
}
} catch (Exception e) {
- logger.warn("Unable to close registered driver: " + driver, e);
+ LOGGER.warn("Unable to close registered driver: " + driver, e);
}
}
return false;
@@ -781,12 +781,12 @@ public abstract class BaseTest {
int numTables = TABLE_COUNTER.get();
TABLE_COUNTER.set(0);
if(isDistributedClusterModeEnabled(config)) {
- logger.info(
+ LOGGER.info(
"Deleting old tables on distributed cluster because number of tables is likely greater than "
+ TEARDOWN_THRESHOLD);
deletePriorMetaData(HConstants.LATEST_TIMESTAMP, url);
} else {
- logger.info(
+ LOGGER.info(
"Shutting down mini cluster because number of tables on this mini cluster is likely greater than "
+ TEARDOWN_THRESHOLD);
tearDownMiniClusterAsync(numTables);
@@ -965,9 +965,9 @@ public abstract class BaseTest {
try {
conn.createStatement().executeUpdate(ddl);
} catch (NewerTableAlreadyExistsException ex) {
- logger.info("Newer table " + fullTableName + " or its delete marker exists. Ignore current deletion");
+ LOGGER.info("Newer table " + fullTableName + " or its delete marker exists. Ignore current deletion");
} catch (TableNotFoundException ex) {
- logger.info("Table " + fullTableName + " is already deleted.");
+ LOGGER.info("Table " + fullTableName + " is already deleted.");
}
}
rs.close();
@@ -1017,7 +1017,7 @@ public abstract class BaseTest {
lastTenantId = tenantId;
}
- logger.info("DROP SEQUENCE STATEMENT: DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
+ LOGGER.info("DROP SEQUENCE STATEMENT: DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
conn.createStatement().execute("DROP SEQUENCE " + SchemaUtil.getEscapedTableName(rs.getString(2), rs.getString(3)));
}
rs.close();
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java b/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
index 91f6c72..ead0d83 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/tool/ParameterizedPhoenixCanaryToolIT.java
@@ -19,7 +19,6 @@ package org.apache.phoenix.tool;
import com.google.common.collect.Maps;
import com.google.gson.Gson;
-
import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
import org.apache.phoenix.query.BaseTest;
import org.apache.phoenix.query.QueryServices;
@@ -58,7 +57,8 @@ import static org.junit.Assert.assertTrue;
@Category(NeedsOwnMiniClusterTest.class)
public class ParameterizedPhoenixCanaryToolIT extends BaseTest {
- private static final Logger LOGGER = LoggerFactory.getLogger(ParameterizedPhoenixCanaryToolIT.class);
+ private static final Logger LOGGER =
+ LoggerFactory.getLogger(ParameterizedPhoenixCanaryToolIT.class);
private static final String stdOutSink
= "org.apache.phoenix.tool.PhoenixCanaryTool$StdOutSink";
private static final String fileOutSink
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
index 51d6743..2b55e29 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/Pherf.java
@@ -49,7 +49,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Pherf {
- private static final Logger logger = LoggerFactory.getLogger(Pherf.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(Pherf.class);
private static final Options options = new Options();
private final PhoenixUtil phoenixUtil = PhoenixUtil.create();
@@ -142,8 +142,8 @@ public class Pherf {
properties.getProperty("pherf.default.monitorFrequency");
properties.setProperty("pherf.default.monitorFrequency", monitorFrequency);
- logger.debug("Using Monitor: " + monitor);
- logger.debug("Monitor Frequency Ms:" + monitorFrequency);
+ LOGGER.debug("Using Monitor: " + monitor);
+ LOGGER.debug("Monitor Frequency Ms:" + monitorFrequency);
preLoadData = command.hasOption("l");
executeQuerySets = command.hasOption("q");
zookeeper = command.getOptionValue("z", "localhost");
@@ -184,10 +184,10 @@ public class Pherf {
}
PhoenixUtil.setRowCountOverride(rowCountOverride);
if (!thinDriver) {
- logger.info("Using thick driver with ZooKeepers '{}'", zookeeper);
+ LOGGER.info("Using thick driver with ZooKeepers '{}'", zookeeper);
PhoenixUtil.setZookeeper(zookeeper);
} else {
- logger.info("Using thin driver with PQS '{}'", queryServerUrl);
+ LOGGER.info("Using thin driver with PQS '{}'", queryServerUrl);
// Enables the thin-driver and sets the PQS URL
PhoenixUtil.useThinDriver(queryServerUrl);
}
@@ -230,7 +230,7 @@ public class Pherf {
// Compare results and exit
if (null != compareResults) {
- logger.info("\nStarting to compare results and exiting for " + compareResults);
+ LOGGER.info("\nStarting to compare results and exiting for " + compareResults);
new GoogleChartGenerator(compareResults, compareType).readAndRender();
return;
}
@@ -239,7 +239,7 @@ public class Pherf {
// Drop tables with PHERF schema and regex comparison
if (null != dropPherfTablesRegEx) {
- logger.info(
+ LOGGER.info(
"\nDropping existing table with PHERF namename and " + dropPherfTablesRegEx
+ " regex expression.");
phoenixUtil.deleteTables(dropPherfTablesRegEx);
@@ -253,7 +253,7 @@ public class Pherf {
}
if (applySchema) {
- logger.info("\nStarting to apply schema...");
+ LOGGER.info("\nStarting to apply schema...");
SchemaReader
reader =
(schemaFile == null) ?
@@ -264,7 +264,7 @@ public class Pherf {
// Schema and Data Load
if (preLoadData) {
- logger.info("\nStarting Data Load...");
+ LOGGER.info("\nStarting Data Load...");
Workload workload = new WriteWorkload(parser, generateStatistics);
try {
workloadExecutor.add(workload);
@@ -277,26 +277,26 @@ public class Pherf {
}
}
} else {
- logger.info(
+ LOGGER.info(
"\nSKIPPED: Data Load and schema creation as -l argument not specified");
}
// Execute multi-threaded query sets
if (executeQuerySets) {
- logger.info("\nStarting to apply Execute Queries...");
+ LOGGER.info("\nStarting to apply Execute Queries...");
workloadExecutor
.add(new QueryExecutor(parser, phoenixUtil, workloadExecutor, parser.getDataModels(), queryHint,
isFunctional, writeRuntimeResults));
} else {
- logger.info(
+ LOGGER.info(
"\nSKIPPED: Multithreaded query set execution as -q argument not specified");
}
// Clean up the monitor explicitly
if (monitorManager != null) {
- logger.info("Run completed. Shutting down Monitor.");
+ LOGGER.info("Run completed. Shutting down Monitor.");
monitorManager.complete();
}
@@ -305,7 +305,7 @@ public class Pherf {
} finally {
if (workloadExecutor != null) {
- logger.info("Run completed. Shutting down thread pool.");
+ LOGGER.info("Run completed. Shutting down thread pool.");
workloadExecutor.shutdown();
}
}
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
index 8f2a1d8..87b4403 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/configuration/XMLConfigParser.java
@@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
public class XMLConfigParser {
- private static final Logger logger = LoggerFactory.getLogger(XMLConfigParser.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(XMLConfigParser.class);
private String filePattern;
private List<DataModel> dataModels;
private List<Scenario> scenarios = null;
@@ -96,7 +96,7 @@ public class XMLConfigParser {
scenarios.add(scenario);
}
} catch (JAXBException e) {
- logger.error("Unable to parse scenario file "+path, e);
+ LOGGER.error("Unable to parse scenario file "+path, e);
throw e;
}
}
@@ -122,7 +122,7 @@ public class XMLConfigParser {
JAXBContext jaxbContext = JAXBContext.newInstance(DataModel.class);
Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller();
String fName = PherfConstants.RESOURCE_SCENARIO + "/" + file.getFileName().toString();
- logger.info("Open config file: " + fName);
+ LOGGER.info("Open config file: " + fName);
XMLStreamReader xmlReader = xif.createXMLStreamReader(
new StreamSource(XMLConfigParser.class.getResourceAsStream(fName)));
return (DataModel) jaxbUnmarshaller.unmarshal(xmlReader);
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
index 929f96a..1cf740e 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/result/ResultManager.java
@@ -31,7 +31,7 @@ import java.util.ArrayList;
import java.util.List;
public class ResultManager {
- private static final Logger logger = LoggerFactory.getLogger(ResultManager.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ResultManager.class);
private final List<ResultHandler> resultHandlers;
private final ResultUtil util;
@@ -153,7 +153,7 @@ public class ResultManager {
handler.flush();
} catch (Exception e) {
e.printStackTrace();
- logger.warn("Could not flush handler: "
+ LOGGER.warn("Could not flush handler: "
+ handler.getResultFileName() + " : " + e.getMessage());
}
}
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
index 6d1e727..2597d0c 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/RulesApplier.java
@@ -39,7 +39,7 @@ import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
public class RulesApplier {
- private static final Logger logger = LoggerFactory.getLogger(RulesApplier.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(RulesApplier.class);
private static final AtomicLong COUNTER = new AtomicLong(0);
// Used to bail out of random distribution if it takes too long
@@ -116,7 +116,7 @@ public class RulesApplier {
List<Scenario> scenarios = parser.getScenarios();
DataValue value = null;
if (scenarios.contains(scenario)) {
- logger.debug("We found a correct Scenario");
+ LOGGER.debug("We found a correct Scenario");
Map<DataTypeMapping, List> overrideRuleMap = this.getCachedScenarioOverrides(scenario);
@@ -124,7 +124,7 @@ public class RulesApplier {
List<Column> overrideRuleList = this.getCachedScenarioOverrides(scenario).get(phxMetaColumn.getType());
if (overrideRuleList != null && overrideRuleList.contains(phxMetaColumn)) {
- logger.debug("We found a correct override column rule");
+ LOGGER.debug("We found a correct override column rule");
Column columnRule = getColumnForRuleOverride(overrideRuleList, phxMetaColumn);
if (columnRule != null) {
return getDataValue(columnRule);
@@ -139,12 +139,12 @@ public class RulesApplier {
// Make sure Column from Phoenix Metadata matches a rule column
if (ruleList.contains(phxMetaColumn)) {
// Generate some random data based on this rule
- logger.debug("We found a correct column rule");
+ LOGGER.debug("We found a correct column rule");
Column columnRule = getColumnForRule(ruleList, phxMetaColumn);
value = getDataValue(columnRule);
} else {
- logger.warn("Attempted to apply rule to data, but could not find a rule to match type:"
+ LOGGER.warn("Attempted to apply rule to data, but could not find a rule to match type:"
+ phxMetaColumn.getType()
);
}
@@ -177,7 +177,7 @@ public class RulesApplier {
}
if ((prefix.length() >= length) && (length > 0)) {
- logger.warn("You are attempting to generate data with a prefix (" + prefix + ") "
+ LOGGER.warn("You are attempting to generate data with a prefix (" + prefix + ") "
+ "That is longer than expected overall field length (" + length + "). "
+ "This will certainly lead to unexpected data values.");
}
@@ -352,7 +352,7 @@ public class RulesApplier {
// While it's possible to get here if you have a bunch of really small distributions,
// It's just really unlikely. This is just a safety just so we actually pick a value.
if(count++ == OH_SHIT_LIMIT){
- logger.info("We generated a value from hitting our OH_SHIT_LIMIT: " + OH_SHIT_LIMIT);
+ LOGGER.info("We generated a value from hitting our OH_SHIT_LIMIT: " + OH_SHIT_LIMIT);
generatedDataValue = valueRule;
}
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
index 5ccdaaa..53c4408 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/schema/SchemaReader.java
@@ -33,7 +33,7 @@ import java.sql.Connection;
import java.util.Collection;
public class SchemaReader {
- private static final Logger logger = LoggerFactory.getLogger(SchemaReader.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(SchemaReader.class);
private final PhoenixUtil pUtil;
private Collection<Path> resourceList;
private final String searchPattern;
@@ -64,7 +64,7 @@ public class SchemaReader {
try {
connection = pUtil.getConnection(null);
for (Path file : resourceList) {
- logger.info("\nApplying schema to file: " + file);
+ LOGGER.info("\nApplying schema to file: " + file);
pUtil.executeStatement(resourceToString(file), connection);
}
} finally {
@@ -88,12 +88,12 @@ public class SchemaReader {
}
private void read() throws Exception {
- logger.debug("Trying to match resource pattern: " + searchPattern);
+ LOGGER.debug("Trying to match resource pattern: " + searchPattern);
System.out.println("Trying to match resource pattern: " + searchPattern);
resourceList = null;
resourceList = resourceUtil.getResourceList(searchPattern);
- logger.info("File resourceList Loaded: " + resourceList);
+ LOGGER.info("File resourceList Loaded: " + resourceList);
System.out.println("File resourceList Loaded: " + resourceList);
if (resourceList.isEmpty()) {
throw new FileLoaderException("Could not load Schema Files");
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
index 72ab3e0..43ba8ba 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/PhoenixUtil.java
@@ -44,7 +44,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
public class PhoenixUtil {
- private static final Logger logger = LoggerFactory.getLogger(PhoenixUtil.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(PhoenixUtil.class);
private static String zookeeper;
private static int rowCountOverride = 0;
private boolean testEnabled;
@@ -106,7 +106,7 @@ public class PhoenixUtil {
Properties props = new Properties();
if (null != tenantId) {
props.setProperty("TenantId", tenantId);
- logger.debug("\nSetting tenantId to " + tenantId);
+ LOGGER.debug("\nSetting tenantId to " + tenantId);
}
String url = "jdbc:phoenix:thin:url=" + queryServerUrl + ";serialization=PROTOBUF";
return DriverManager.getConnection(url, props);
@@ -118,7 +118,7 @@ public class PhoenixUtil {
Properties props = new Properties();
if (null != tenantId) {
props.setProperty("TenantId", tenantId);
- logger.debug("\nSetting tenantId to " + tenantId);
+ LOGGER.debug("\nSetting tenantId to " + tenantId);
}
if (phoenixProperty != null) {
@@ -223,12 +223,12 @@ public class PhoenixUtil {
+ "."
+ resultSet.getString(TABLE_NAME);
if (tableName.matches(regexMatch)) {
- logger.info("\nDropping " + tableName);
+ LOGGER.info("\nDropping " + tableName);
try {
executeStatementThrowException("DROP TABLE "
+ tableName + " CASCADE", conn);
} catch (org.apache.phoenix.schema.TableNotFoundException tnf) {
- logger.error("Table might be already be deleted via cascade. Schema: "
+ LOGGER.error("Table might be already be deleted via cascade. Schema: "
+ tnf.getSchemaName()
+ " Table: "
+ tnf.getTableName());
@@ -288,7 +288,7 @@ public class PhoenixUtil {
if (null != query.getDdl()) {
Connection conn = null;
try {
- logger.info("\nExecuting DDL:" + query.getDdl() + " on tenantId:" + query
+ LOGGER.info("\nExecuting DDL:" + query.getDdl() + " on tenantId:" + query
.getTenantId());
executeStatement(query.getDdl(),
conn = getConnection(query.getTenantId()));
@@ -312,7 +312,7 @@ public class PhoenixUtil {
Connection conn = null;
try {
for (Ddl ddl : ddls) {
- logger.info("\nExecuting DDL:" + ddl + " on tenantId:" +tenantId);
+ LOGGER.info("\nExecuting DDL:" + ddl + " on tenantId:" +tenantId);
long startTime = System.currentTimeMillis();
executeStatement(ddl.toString(), conn = getConnection(tenantId));
if (ddl.getStatement().toUpperCase().contains(ASYNC_KEYWORD)) {
@@ -362,10 +362,10 @@ public class PhoenixUtil {
*/
boolean isYarnJobInProgress(String tableName) {
try {
- logger.info("Fetching YARN apps...");
+ LOGGER.info("Fetching YARN apps...");
Set<String> response = new PhoenixMRJobSubmitter().getSubmittedYarnApps();
for (String str : response) {
- logger.info("Runnng YARN app: " + str);
+ LOGGER.info("Runnng YARN app: " + str);
if (str.toUpperCase().contains(tableName.toUpperCase())) {
return true;
}
@@ -382,7 +382,7 @@ public class PhoenixUtil {
}
public static void setZookeeper(String zookeeper) {
- logger.info("Setting zookeeper: " + zookeeper);
+ LOGGER.info("Setting zookeeper: " + zookeeper);
useThickDriver(zookeeper);
}
@@ -406,7 +406,7 @@ public class PhoenixUtil {
* @throws Exception
*/
public void updatePhoenixStats(String tableName, Scenario scenario) throws Exception {
- logger.info("Updating stats for " + tableName);
+ LOGGER.info("Updating stats for " + tableName);
executeStatement("UPDATE STATISTICS " + tableName, scenario);
}
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
index 0b54641..df5dbf7 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/util/ResourceList.java
@@ -40,7 +40,7 @@ import java.util.zip.ZipFile;
* list resources available from the classpath @ *
*/
public class ResourceList {
- private static final Logger logger = LoggerFactory.getLogger(ResourceList.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ResourceList.class);
private final String rootResourceDir;
public ResourceList(String rootResourceDir) {
@@ -84,10 +84,10 @@ public class ResourceList {
String rName = rootResourceDir + resource;
- logger.debug("Trying with the root append.");
+ LOGGER.debug("Trying with the root append.");
url = ResourceList.class.getResource(rName);
if (url == null) {
- logger.debug("Failed! Must be using a jar. Trying without the root append.");
+ LOGGER.debug("Failed! Must be using a jar. Trying without the root append.");
url = ResourceList.class.getResource(resource);
if (url == null) {
@@ -99,7 +99,7 @@ public class ResourceList {
} else {
path = Paths.get(url.toURI());
}
- logger.debug("Found the correct resource: " + path.toString());
+ LOGGER.debug("Found the correct resource: " + path.toString());
paths.add(path);
}
@@ -143,11 +143,11 @@ public class ResourceList {
final ZipEntry ze = (ZipEntry) e.nextElement();
final String fileName = ze.getName();
final boolean accept = pattern.matcher(fileName).matches();
- logger.trace("fileName:" + fileName);
- logger.trace("File:" + file.toString());
- logger.trace("Match:" + accept);
+ LOGGER.trace("fileName:" + fileName);
+ LOGGER.trace("File:" + file.toString());
+ LOGGER.trace("Match:" + accept);
if (accept) {
- logger.trace("Adding File from Jar: " + fileName);
+ LOGGER.trace("Adding File from Jar: " + fileName);
retVal.add("/" + fileName);
}
}
@@ -171,7 +171,7 @@ public class ResourceList {
final String fileName = file.getName();
final boolean accept = pattern.matcher(file.toString()).matches();
if (accept) {
- logger.debug("Adding File from directory: " + fileName);
+ LOGGER.debug("Adding File from directory: " + fileName);
retval.add("/" + fileName);
}
}
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
index 4423bbd..ecc432b 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultiThreadedRunner.java
@@ -40,7 +40,7 @@ import org.apache.phoenix.pherf.configuration.XMLConfigParser;
import org.apache.phoenix.pherf.util.PhoenixUtil;
class MultiThreadedRunner implements Callable<Void> {
- private static final Logger logger = LoggerFactory.getLogger(MultiThreadedRunner.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(MultiThreadedRunner.class);
private Query query;
private ThreadTime threadTime;
private PhoenixUtil pUtil = PhoenixUtil.create();
@@ -87,7 +87,7 @@ class MultiThreadedRunner implements Callable<Void> {
*/
@Override
public Void call() throws Exception {
- logger.info("\n\nThread Starting " + threadName + " ; " + query.getStatement() + " for "
+ LOGGER.info("\n\nThread Starting " + threadName + " ; " + query.getStatement() + " for "
+ numberOfExecutions + "times\n\n");
Long start = System.currentTimeMillis();
for (long i = numberOfExecutions; (i > 0 && ((System.currentTimeMillis() - start)
@@ -106,7 +106,7 @@ class MultiThreadedRunner implements Callable<Void> {
resultManager.flush();
}
- logger.info("\n\nThread exiting." + threadName + "\n\n");
+ LOGGER.info("\n\nThread exiting." + threadName + "\n\n");
return null;
}
@@ -137,7 +137,7 @@ class MultiThreadedRunner implements Callable<Void> {
conn.setAutoCommit(true);
final String statementString = query.getDynamicStatement(ruleApplier, scenario);
statement = conn.prepareStatement(statementString);
- logger.info("Executing: " + statementString);
+ LOGGER.info("Executing: " + statementString);
if (scenario.getWriteParams() != null) {
Workload writes = new WriteWorkload(PhoenixUtil.create(), parser, scenario, GeneratePhoenixStats.NO);
@@ -165,7 +165,7 @@ class MultiThreadedRunner implements Callable<Void> {
conn.commit();
}
} catch (Exception e) {
- logger.error("Exception while executing query", e);
+ LOGGER.error("Exception while executing query", e);
exception = e.getMessage();
throw e;
} finally {
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
index ef2e167..26429a5 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/MultithreadedDiffer.java
@@ -32,7 +32,6 @@ import org.slf4j.LoggerFactory;
class MultithreadedDiffer implements Callable<Void> {
private static final Logger LOGGER = LoggerFactory.getLogger(MultithreadedDiffer.class);
-
private Thread t;
private Query query;
private ThreadTime threadTime;
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
index c4a3517..d894a96 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryExecutor.java
@@ -36,7 +36,7 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
public class QueryExecutor implements Workload {
- private static final Logger logger = LoggerFactory.getLogger(QueryExecutor.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(QueryExecutor.class);
private List<DataModel> dataModels;
private String queryHint;
private final boolean exportCSV;
@@ -113,7 +113,7 @@ public class QueryExecutor implements Workload {
}
}
} catch (Exception e) {
- logger.error("Scenario throws exception", e);
+ LOGGER.error("Scenario throws exception", e);
throw e;
}
return null;
@@ -165,7 +165,7 @@ public class QueryExecutor implements Workload {
resultManager.write(dataModelResults, ruleApplier);
resultManager.flush();
} catch (Exception e) {
- logger.error("Scenario throws exception", e);
+ LOGGER.error("Scenario throws exception", e);
throw e;
}
return null;
@@ -255,7 +255,7 @@ public class QueryExecutor implements Workload {
queryResult.getThreadTimes().add(threadTime);
threadTime.setThreadName(name);
queryResult.setHint(this.queryHint);
- logger.info("\nExecuting query " + queryResult.getStatement());
+ LOGGER.info("\nExecuting query " + queryResult.getStatement());
Callable<Void> thread;
if (workloadExecutor.isPerformance()) {
thread =
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
index 7b2bb12..786f778 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/QueryVerifier.java
@@ -44,7 +44,7 @@ import difflib.Patch;
public class QueryVerifier {
private PhoenixUtil pUtil = PhoenixUtil.create();
- private static final Logger logger = LoggerFactory.getLogger(QueryVerifier.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(QueryVerifier.class);
private boolean useTemporaryOutput;
private String directoryLocation;
@@ -110,10 +110,10 @@ public class QueryVerifier {
Patch patch = DiffUtils.diff(original, newLines);
if (patch.getDeltas().isEmpty()) {
- logger.info("Match: " + query.getId() + " with " + newCSV);
+ LOGGER.info("Match: " + query.getId() + " with " + newCSV);
return true;
} else {
- logger.error("DIFF FAILED: " + query.getId() + " with " + newCSV);
+ LOGGER.error("DIFF FAILED: " + query.getId() + " with " + newCSV);
return false;
}
}
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
index 4abb574..ff599b8 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WorkloadExecutor.java
@@ -31,7 +31,7 @@ import java.util.Properties;
import java.util.concurrent.*;
public class WorkloadExecutor {
- private static final Logger logger = LoggerFactory.getLogger(WorkloadExecutor.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(WorkloadExecutor.class);
private final int poolSize;
private final boolean isPerformance;
@@ -87,7 +87,7 @@ public class WorkloadExecutor {
future.get();
jobs.remove(workload);
} catch (InterruptedException | ExecutionException e) {
- logger.error("", e);
+ LOGGER.error("", e);
}
}
diff --git a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
index cae223c..3df5fe8 100644
--- a/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
+++ b/phoenix-pherf/src/main/java/org/apache/phoenix/pherf/workload/WriteWorkload.java
@@ -52,7 +52,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class WriteWorkload implements Workload {
- private static final Logger logger = LoggerFactory.getLogger(WriteWorkload.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(WriteWorkload.class);
public static final String USE_BATCH_API_PROPERTY = "pherf.default.dataloader.batchApi";
@@ -169,7 +169,7 @@ public class WriteWorkload implements Workload {
resultUtil.write(dataLoadThreadTime);
} catch (Exception e) {
- logger.error("WriteWorkLoad failed", e);
+ LOGGER.error("WriteWorkLoad failed", e);
throw e;
}
return null;
@@ -179,7 +179,7 @@ public class WriteWorkload implements Workload {
private synchronized void exec(DataLoadTimeSummary dataLoadTimeSummary,
DataLoadThreadTime dataLoadThreadTime, Scenario scenario) throws Exception {
- logger.info("\nLoading " + scenario.getRowCount() + " rows for " + scenario.getTableName());
+ LOGGER.info("\nLoading " + scenario.getRowCount() + " rows for " + scenario.getTableName());
// Execute any pre dataload scenario DDLs
pUtil.executeScenarioDdl(scenario.getPreScenarioDdls(), scenario.getTenantId(), dataLoadTimeSummary);
@@ -190,11 +190,11 @@ public class WriteWorkload implements Workload {
// Update Phoenix Statistics
if (this.generateStatistics == GeneratePhoenixStats.YES) {
- logger.info("Updating Phoenix table statistics...");
+ LOGGER.info("Updating Phoenix table statistics...");
pUtil.updatePhoenixStats(scenario.getTableName(), scenario);
- logger.info("Stats update done!");
+ LOGGER.info("Stats update done!");
} else {
- logger.info("Phoenix table stats update not requested.");
+ LOGGER.info("Phoenix table stats update not requested.");
}
// Execute any post data load scenario DDLs before starting query workload
@@ -214,7 +214,7 @@ public class WriteWorkload implements Workload {
pUtil.getColumnsFromPhoenix(scenario.getSchemaName(),
scenario.getTableNameWithoutSchemaName(), pUtil.getConnection(scenario.getTenantId()));
int threadRowCount = rowCalculator.getNext();
- logger.info(
+ LOGGER.info(
"Kick off thread (#" + i + ")for upsert with (" + threadRowCount + ") rows.");
Future<Info>
write =
@@ -239,11 +239,11 @@ public class WriteWorkload implements Workload {
Info writeInfo = write.get();
sumRows += writeInfo.getRowCount();
sumDuration += writeInfo.getDuration();
- logger.info("Executor (" + this.hashCode() + ") writes complete with row count ("
+ LOGGER.info("Executor (" + this.hashCode() + ") writes complete with row count ("
+ writeInfo.getRowCount() + ") in Ms (" + writeInfo.getDuration() + ")");
}
long testDuration = System.currentTimeMillis() - start;
- logger.info("Writes completed with total row count (" + sumRows
+ LOGGER.info("Writes completed with total row count (" + sumRows
+ ") with total elapsed time of (" + testDuration
+ ") ms and total CPU execution time of (" + sumDuration + ") ms");
dataLoadTimeSummary
@@ -296,7 +296,7 @@ public class WriteWorkload implements Workload {
}
connection.commit();
duration = System.currentTimeMillis() - last;
- logger.info("Writer (" + Thread.currentThread().getName()
+ LOGGER.info("Writer (" + Thread.currentThread().getName()
+ ") committed Batch. Total " + getBatchSize()
+ " rows for this thread (" + this.hashCode() + ") in ("
+ duration + ") Ms");
@@ -315,7 +315,7 @@ public class WriteWorkload implements Workload {
}
}
} catch (SQLException e) {
- logger.error("Scenario " + scenario.getName() + " failed with exception ", e);
+ LOGGER.error("Scenario " + scenario.getName() + " failed with exception ", e);
throw e;
} finally {
// Need to keep the statement open to send the remaining batch of updates
@@ -342,7 +342,7 @@ public class WriteWorkload implements Workload {
try {
connection.commit();
duration = System.currentTimeMillis() - start;
- logger.info("Writer ( " + Thread.currentThread().getName()
+ LOGGER.info("Writer ( " + Thread.currentThread().getName()
+ ") committed Final Batch. Duration (" + duration + ") Ms");
connection.close();
} catch (SQLException e) {
diff --git a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
index 0b6c9cc..343285f 100644
--- a/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
+++ b/phoenix-pherf/src/test/java/org/apache/phoenix/pherf/ConfigurationParserTest.java
@@ -38,7 +38,7 @@ import javax.xml.bind.Marshaller;
import static org.junit.Assert.*;
public class ConfigurationParserTest extends ResultBaseTest {
- private static final Logger logger = LoggerFactory.getLogger(ConfigurationParserTest.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(ConfigurationParserTest.class);
@Test
public void testReadWriteWorkloadReader() throws Exception {
@@ -65,7 +65,7 @@ public class ConfigurationParserTest extends ResultBaseTest {
public void testConfigReader() {
try {
- logger.debug("DataModel: " + writeXML());
+ LOGGER.debug("DataModel: " + writeXML());
List<Scenario> scenarioList = getScenarios();
List<Column> dataMappingColumns = getDataModel().getDataMappingColumns();
assertTrue("Could not load the data columns from xml.",