You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/10/29 00:58:46 UTC

[01/24] git commit: PHOENIX-1375 Remove references to incubation

Repository: phoenix
Updated Branches:
  refs/heads/4.2 59647fc34 -> 5614dbd5e


PHOENIX-1375 Remove references to incubation


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6a28b7d2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6a28b7d2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6a28b7d2

Branch: refs/heads/4.2
Commit: 6a28b7d249d6e2de515b5897992ce67649762858
Parents: 59647fc
Author: Gabriel Reid <ga...@ngdata.com>
Authored: Thu Oct 23 21:52:46 2014 +0200
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Thu Oct 23 21:52:46 2014 +0200

----------------------------------------------------------------------
 NOTICE                   | 2 +-
 dev/release_files/NOTICE | 2 +-
 pom.xml                  | 6 +++---
 3 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a28b7d2/NOTICE
----------------------------------------------------------------------
diff --git a/NOTICE b/NOTICE
index ac3ec17..093ae02 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,4 +1,4 @@
-Apache Phoenix (Incubating)
+Apache Phoenix
 Copyright 2014 The Apache Software Foundation
 
 This product includes software developed at

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a28b7d2/dev/release_files/NOTICE
----------------------------------------------------------------------
diff --git a/dev/release_files/NOTICE b/dev/release_files/NOTICE
index 84869a6..a96a97c 100644
--- a/dev/release_files/NOTICE
+++ b/dev/release_files/NOTICE
@@ -1,4 +1,4 @@
-Apache Phoenix (Incubating)
+Apache Phoenix
 Copyright 2014 The Apache Software Foundation
 
 This product includes software developed at

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6a28b7d2/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index ab8dc33..10ef778 100644
--- a/pom.xml
+++ b/pom.xml
@@ -57,9 +57,9 @@
   </parent>
 
   <scm>
-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-phoenix.git</connection>
-    <url>https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git</url>
-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-phoenix.git</developerConnection>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/phoenix.git</connection>
+    <url>https://git-wip-us.apache.org/repos/asf/phoenix.git</url>
+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/phoenix.git</developerConnection>
   </scm>
 
   <properties>


[15/24] PHOENIX-1286 Remove hadoop2 compat modules

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
new file mode 100644
index 0000000..6ae52d8
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.cloudera.htrace.Span;
+
+/**
+ * Utilities for tracing
+ */
+public class TracingUtils {
+
+    private static final Log LOG = LogFactory.getLog(TracingUtils.class);
+
+    public static final String METRIC_SOURCE_KEY = "phoenix.";
+
+    /** Set context to enable filtering */
+    public static final String METRICS_CONTEXT = "tracing";
+
+    /** Marker metric to ensure that we register the tracing mbeans */
+    public static final String METRICS_MARKER_CONTEXT = "marker";
+
+    public static void addAnnotation(Span span, String message, int value) {
+        span.addKVAnnotation(message.getBytes(), Bytes.toBytes(Integer.toString(value)));
+    }
+
+    public static Pair<String, String> readAnnotation(byte[] key, byte[] value) {
+        return new Pair<String, String>(new String(key), Bytes.toString(value));
+    }
+
+    /**
+     * @see #getTraceMetricName(String)
+     */
+    public static final String getTraceMetricName(long traceId) {
+        return getTraceMetricName(Long.toString(traceId));
+    }
+
+    /**
+     * @param traceId unique id of the trace
+     * @return the name of the metric record that should be generated for a given trace
+     */
+    public static final String getTraceMetricName(String traceId) {
+        return METRIC_SOURCE_KEY + traceId;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
index d0677cf..b093b9c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/util/Tracing.java
@@ -36,8 +36,7 @@ import org.apache.phoenix.call.CallWrapper;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.trace.TracingCompat;
-import org.apache.phoenix.util.StringUtil;
+import org.apache.phoenix.trace.TraceMetricSource;
 import org.cloudera.htrace.Sampler;
 import org.cloudera.htrace.Span;
 import org.cloudera.htrace.Trace;
@@ -313,7 +312,7 @@ public class Tracing {
     public synchronized static void addTraceMetricsSource() {
         try {
             if (!initialized) {
-                Trace.addReceiver(TracingCompat.newTraceMetricSource());
+                Trace.addReceiver(new TraceMetricSource());
             }
         } catch (RuntimeException e) {
             LOG.warn("Tracing will outputs will not be written to any metrics sink! No "

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java
new file mode 100644
index 0000000..f4dfd74
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.impl;
+
+import org.apache.hadoop.metrics2.MetricsInfo;
+
+/**
+ *
+ */
+public class ExposedMetricCounterLong extends MetricCounterLong {
+
+
+
+  /**
+   * @param info
+   * @param value
+   */
+  public ExposedMetricCounterLong(MetricsInfo info, long value) {
+    super(info, value);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java
new file mode 100644
index 0000000..c5f54e6
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.impl;
+
+import java.util.List;
+
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsTag;
+
+/**
+ * Helper class to access the package-private {@link MetricsRecordImpl}
+ */
+@SuppressWarnings("javadoc")
+public class ExposedMetricsRecordImpl extends MetricsRecordImpl {
+
+  /**
+   * @param info
+   * @param timestamp
+   * @param tags
+   * @param metrics
+   */
+  public ExposedMetricsRecordImpl(MetricsInfo info, long timestamp, List<MetricsTag> tags,
+      Iterable<AbstractMetric> metrics) {
+    super(info, timestamp, tags, metrics);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java
new file mode 100644
index 0000000..1ad1553
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.metrics2.lib;
+
+import org.apache.hadoop.metrics2.lib.MetricsInfoImpl;
+
+/**
+ * Helper class to expose access to the {@link org.apache.hadoop.metrics2.lib.MetricsInfoImpl}
+ */
+public class ExposedMetricsInfoImpl extends MetricsInfoImpl {
+
+    /**
+     * @param name
+     * @param description
+     */
+    public ExposedMetricsInfoImpl(String name, String description) {
+        super(name, description);
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
new file mode 100644
index 0000000..2cea684
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.metrics;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.phoenix.trace.TracingUtils;
+
+/**
+ * Simple sink that just logs the output of all the metrics that start with
+ * {@link org.apache.phoenix.trace.TracingUtils#METRIC_SOURCE_KEY}
+ */
+public class LoggingSink implements MetricsSink {
+
+    private static final Log LOG = LogFactory.getLog(LoggingSink.class);
+
+    @Override
+    public void init(SubsetConfiguration config) {
+    }
+
+    @Override
+    public void putMetrics(MetricsRecord record) {
+        // we could wait until flush, but this is a really lightweight process, so we just write
+        // them
+        // as soon as we get them
+        if (!LOG.isDebugEnabled()) {
+            return;
+        }
+        LOG.debug("Found record:" + record.name());
+        for (AbstractMetric metric : record.metrics()) {
+            // just print the metric we care about
+            if (metric.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) {
+                LOG.debug("\t metric:" + metric);
+            }
+        }
+    }
+
+    @Override
+    public void flush() {
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java b/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java
new file mode 100644
index 0000000..5cb34b8
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.metrics2.MetricsCollector;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.cloudera.htrace.Span;
+import org.cloudera.htrace.impl.MilliSpan;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test that the @{link TraceMetricSource} correctly handles different kinds of traces
+ */
+public class TraceMetricsSourceTest {
+
+  @BeforeClass
+  public static void setup() throws Exception{
+    DefaultMetricsSystem.setMiniClusterMode(true);
+  }
+
+  /**
+   * For PHOENIX-1126, Phoenix originally assumed all the annotation values were integers,
+   * but HBase writes some strings as well, so we need to be able to handle that too
+   */
+  @Test
+  public void testNonIntegerAnnotations(){
+    Span span = getSpan();
+    // make sure its less than the length of an integer
+    byte[] value = Bytes.toBytes("a");
+    byte[] someInt = Bytes.toBytes(1);
+    assertTrue(someInt.length >value.length);
+
+    // an annotation that is not an integer
+    span.addKVAnnotation(Bytes.toBytes("key"), value);
+
+    // Create the sink and write the span
+    TraceMetricSource source = new TraceMetricSource();
+    source.receiveSpan(span);
+  }
+
+  @Test
+  public void testIntegerAnnotations(){
+    Span span = getSpan();
+
+    // add annotation through the phoenix interfaces
+    TracingUtils.addAnnotation(span, "message", 10);
+
+    TraceMetricSource source = new TraceMetricSource();
+    source.receiveSpan(span);
+  }
+
+  /**
+   * If the source does not write any metrics when there are no spans, i.e. when initialized,
+   * then the metrics system will discard the source, so it needs to always emit some metrics.
+   */
+  @Test
+  public void testWritesInfoWhenNoSpans(){
+    TraceMetricSource source = new TraceMetricSource();
+    MetricsCollector collector = Mockito.mock(MetricsCollector.class);
+    MetricsRecordBuilder builder = Mockito.mock(MetricsRecordBuilder.class);
+    Mockito.when(collector.addRecord(Mockito.anyString())).thenReturn(builder);
+
+    source.getMetrics(collector, true);
+
+    // verify that we add a record and that the record has some info
+    Mockito.verify(collector).addRecord(Mockito.anyString());
+    Mockito.verify(builder).add(Mockito.any(MetricsTag.class));
+  }
+
+  private Span getSpan(){
+    return new MilliSpan("test span", 0, 1 , 2, "pid");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/pom.xml b/phoenix-hadoop-compat/pom.xml
deleted file mode 100644
index fef5ca4..0000000
--- a/phoenix-hadoop-compat/pom.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.phoenix</groupId>
-    <artifactId>phoenix</artifactId>
-    <version>4.2.0</version>
-  </parent>
-  <artifactId>phoenix-hadoop-compat</artifactId>
-  <name>Phoenix Hadoop Compatibility</name>
-  <description>Compatibility layer for Hadoop versions</description>
-  
-  <build>
-    <plugins>
-      <!-- Run with -Dmaven.test.skip.exec=true to build -tests.jar without running 
-        tests (this is needed for upstream projects whose tests need this jar simply for 
-        compilation) -->
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <version>2.4</version><!--$NO-MVN-MAN-VER$-->
-        <executions>
-          <execution>
-            <phase>prepare-package
-            </phase>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <!--Make it so assembly:single does nothing in here -->
-        <artifactId>maven-assembly-plugin</artifactId>
-        <configuration>
-          <skipAssembly>true</skipAssembly>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.cloudera.htrace</groupId>
-      <artifactId>htrace-core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-hadoop-compat</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-common</artifactId>
-    </dependency>
-  </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricInfo.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricInfo.java b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricInfo.java
deleted file mode 100644
index e6ad976..0000000
--- a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricInfo.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.metrics;
-
-/**
- * Metrics and their conversion from the trace name to the name we store in the stats table
- */
-public enum MetricInfo {
-
-    TRACE("", "trace_id"),
-    SPAN("span_id", "span_id"),
-    PARENT("parent_id", "parent_id"),
-    START("start_time", "start_time"),
-    END("end_time", "end_time"),
-    TAG("phoenix.tag", "t"),
-    ANNOTATION("phoenix.annotation", "a"),
-    HOSTNAME("Hostname", "hostname"),
-    DESCRIPTION("", "description");
-
-    public final String traceName;
-    public final String columnName;
-
-    private MetricInfo(String traceName, String columnName) {
-        this.traceName = traceName;
-        this.columnName = columnName;
-    }
-
-    public static String getColumnName(String traceName) {
-        for (MetricInfo info : MetricInfo.values()) {
-            if (info.traceName.equals(traceName)) {
-                return info.columnName;
-            }
-        }
-        throw new IllegalArgumentException("Unknown tracename: " + traceName);
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/Metrics.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/Metrics.java b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/Metrics.java
deleted file mode 100644
index 5bc8545..0000000
--- a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/Metrics.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.metrics;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-
-public class Metrics {
-
-    private static final Log LOG = LogFactory.getLog(Metrics.class);
-
-  private static volatile MetricsManager manager;
-
-    private static boolean initialized;
-
-    /** This must match the prefix that we are using in the hadoop-metrics2 config on the client */
-    public static final String METRICS_SYSTEM_NAME = "phoenix";
-    public static MetricsManager initialize() {
-        MetricsManager manager = Metrics.getManager();
-        // if the jars aren't on the classpath, then we don't start the metrics system
-        if (manager == null) {
-            LOG.warn("Phoenix metrics could not be initialized - no MetricsManager found!");
-            return null;
-        }
-        // only initialize the metrics system once
-        synchronized (Metrics.class) {
-            if (!initialized) {
-                LOG.info("Initializing metrics system: " + Metrics.METRICS_SYSTEM_NAME);
-                manager.initialize(Metrics.METRICS_SYSTEM_NAME);
-                initialized = true;
-            }
-        }
-        return manager;
-    }
-
-  /**
-   * @return get the first {@link MetricsManager} on the classpath. Always returns the same object
-   */
-  public static MetricsManager getManager(){
-    if(manager == null){
-      synchronized(Metrics.class){
-        if(manager == null){
-          manager = CompatibilitySingletonFactory.getInstance(MetricsManager.class);
-        }
-      }
-    }
-    return manager;
-  }
-
-    private static volatile boolean sinkInitialized = false;
-
-    /**
-     * Mark that the metrics/tracing sink has been initialized
-     */
-    public static void markSinkInitialized() {
-        sinkInitialized = true;
-    }
-
-    public static void ensureConfigured() {
-        if (!sinkInitialized) {
-            LOG.warn("Phoenix metrics2/tracing sink was not started. Should be it be?");
-        }
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsManager.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsManager.java b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsManager.java
deleted file mode 100644
index 13c9435..0000000
--- a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsManager.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.metrics;
-
-/**
- * Metrics management system. Backed by the underlying hadoop metrics, depending on the project on
- * the classpath.
- * <p>
- * The underlying types passed to method must match the expected metrics type - this will vary for
- * the underlying metrics systems (hadoop1 vs hadoop2), but can't be specified at this layer because
- * we must be compatible with both systems.
- */
-public interface MetricsManager {
-
-    /**
-     * @param metricsSystemName the metrics prefix to initialize, if it hasn't already been
-     *            initialized. Not assumed to be thread-safe, unless otherwise noted in the
-     *            implementation.
-     */
-    public abstract void initialize(String metricsSystemName);
-
-    /**
-     * Register a metrics sink
-     * @param <T> the type of the sink
-     * @param sink to register
-     * @param name of the sink. Must be unique.
-     * @param desc the description of the sink
-     * @return the sink
-     */
-    public abstract <T> T register(String name, String desc, T sink);
-
-    /**
-     * Register a metrics source.
-     * @param name name of the source - must be unique
-     * @param description description of the source
-     * @param source to register.
-     * @param <T> the type of the source
-     * @return the source
-     */
-    public abstract <T> T registerSource(String name, String description, T source);
-
-    public void shutdown();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsWriter.java b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsWriter.java
deleted file mode 100644
index 0e8b9fe..0000000
--- a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/MetricsWriter.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.metrics;
-
-
-/**
- * Generic writer for a phoenix metric
- */
-public interface MetricsWriter {
-
-    public void initialize();
-
-    public void flush();
-
-    public void addMetrics(PhoenixMetricsRecord record);
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixAbstractMetric.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixAbstractMetric.java b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixAbstractMetric.java
deleted file mode 100644
index 27ae6b8..0000000
--- a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixAbstractMetric.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.metrics;
-
-
-public interface PhoenixAbstractMetric {
-
-    public String getName();
-
-    /**
-     * Get the value of the metric
-     * @return the value of the metric
-     */
-    public Number value();
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricTag.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricTag.java b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricTag.java
deleted file mode 100644
index 123cc1c..0000000
--- a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricTag.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.metrics;
-
-public interface PhoenixMetricTag {
-
-    public String name();
-
-    public String description();
-
-    public String value();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricsRecord.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricsRecord.java b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricsRecord.java
deleted file mode 100644
index 68f7c46..0000000
--- a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/metrics/PhoenixMetricsRecord.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.metrics;
-
-import java.util.Collection;
-
-
-/**
- *
- */
-public interface PhoenixMetricsRecord {
-
-    public String name();
-
-    public String description();
-
-    public Iterable<PhoenixAbstractMetric> metrics();
-
-    public Collection<PhoenixMetricTag> tags();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/PhoenixSpanReceiver.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/PhoenixSpanReceiver.java b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/PhoenixSpanReceiver.java
deleted file mode 100644
index 7e4e09c..0000000
--- a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/PhoenixSpanReceiver.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import org.cloudera.htrace.SpanReceiver;
-
-/**
- * Marker interface for phoenix specific receivers.
- */
-public interface PhoenixSpanReceiver extends SpanReceiver {
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/TestableMetricsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/TestableMetricsWriter.java b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/TestableMetricsWriter.java
deleted file mode 100644
index b6bc75d..0000000
--- a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/TestableMetricsWriter.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import org.apache.phoenix.metrics.MetricsWriter;
-
-/**
- * Marker interface for a MetricsWriter that can be registered to the current metrics system. The
- * writer should convert from the metrics information it receives from the metrics system to Phoenix
- * records that the MetricsWriter can read (and subsequently write).
- */
-public interface TestableMetricsWriter {
-
-    public void setWriterForTesting(MetricsWriter writer);
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/TracingCompat.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/TracingCompat.java b/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/TracingCompat.java
deleted file mode 100644
index e0a3410..0000000
--- a/phoenix-hadoop-compat/src/main/java/org/apache/phoenix/trace/TracingCompat.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.CompatibilityFactory;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.phoenix.metrics.MetricsWriter;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.SpanReceiver;
-
-/**
- * Utilities for tracing that are common among the compatibility and core classes.
- */
-public class TracingCompat {
-
-    private static final Log LOG = LogFactory.getLog(TracingCompat.class);
-
-    /**
-     * @return a new SpanReceiver that will write to the correct metrics system
-     */
-    public static SpanReceiver newTraceMetricSource() {
-        return CompatibilityFactory.getInstance(PhoenixSpanReceiver.class);
-    }
-
-    public static final String METRIC_SOURCE_KEY = "phoenix.";
-
-    /** Set context to enable filtering */
-    public static final String METRICS_CONTEXT = "tracing";
-
-    /** Marker metric to ensure that we register the tracing mbeans */
-    public static final String METRICS_MARKER_CONTEXT = "marker";
-
-    public static void addAnnotation(Span span, String message, int value) {
-        span.addKVAnnotation(message.getBytes(), Bytes.toBytes(Integer.toString(value)));
-    }
-
-    public static Pair<String, String> readAnnotation(byte[] key, byte[] value) {
-        return new Pair<String, String>(new String(key), Bytes.toString(value));
-    }
-
-    public static MetricsWriter initializeWriter(String clazz) {
-        try {
-            MetricsWriter writer =
-                    Class.forName(clazz).asSubclass(MetricsWriter.class).newInstance();
-            writer.initialize();
-            return writer;
-        } catch (InstantiationException e) {
-            LOG.error("Failed to create metrics writer: " + clazz, e);
-        } catch (IllegalAccessException e) {
-            LOG.error("Failed to create metrics writer: " + clazz, e);
-        } catch (ClassNotFoundException e) {
-            LOG.error("Failed to create metrics writer: " + clazz, e);
-        }
-        return null;
-    }
-
-    /**
-     * @see #getTraceMetricName(String)
-     */
-    public static final String getTraceMetricName(long traceId) {
-        return getTraceMetricName(Long.toString(traceId));
-    }
-
-    /**
-     * @param traceId unique id of the trace
-     * @return the name of the metric record that should be generated for a given trace
-     */
-    public static final String getTraceMetricName(String traceId) {
-        return METRIC_SOURCE_KEY + traceId;
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/test/java/org/apache/phoenix/metrics/LoggingSink.java b/phoenix-hadoop-compat/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
deleted file mode 100644
index 97682b3..0000000
--- a/phoenix-hadoop-compat/src/test/java/org/apache/phoenix/metrics/LoggingSink.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.metrics;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.phoenix.trace.TracingCompat;
-
-/**
- * Simple sink that just logs the output of all the metrics that start with
- * {@link TracingCompat#METRIC_SOURCE_KEY}
- */
-public class LoggingSink implements MetricsWriter {
-
-    private static final Log LOG = LogFactory.getLog(LoggingSink.class);
-
-    @Override
-    public void initialize() {
-    }
-
-    @Override
-    public void addMetrics(PhoenixMetricsRecord record) {
-        // we could wait until flush, but this is a really lightweight process, so we just write
-        // them
-        // as soon as we get them
-        if (!LOG.isDebugEnabled()) {
-            return;
-        }
-        LOG.debug("Found record:" + record.name());
-        for (PhoenixAbstractMetric metric : record.metrics()) {
-            // just print the metric we care about
-            if (metric.getName().startsWith(TracingCompat.METRIC_SOURCE_KEY)) {
-                LOG.debug("\t metric:" + metric);
-            }
-        }
-    }
-
-    @Override
-    public void flush() {
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop-compat/src/test/java/org/apache/phoenix/metrics/TracingTestCompat.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop-compat/src/test/java/org/apache/phoenix/metrics/TracingTestCompat.java b/phoenix-hadoop-compat/src/test/java/org/apache/phoenix/metrics/TracingTestCompat.java
deleted file mode 100644
index 8dd8a41..0000000
--- a/phoenix-hadoop-compat/src/test/java/org/apache/phoenix/metrics/TracingTestCompat.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.metrics;
-
-import org.apache.hadoop.hbase.CompatibilityFactory;
-import org.apache.phoenix.trace.TestableMetricsWriter;
-
-/**
- * Utility class for testing tracing
- */
-public class TracingTestCompat {
-
-    private TracingTestCompat() {
-        assert false;
-    }
-
-    public static TestableMetricsWriter newTraceMetricSink() {
-        return CompatibilityFactory.getInstance(TestableMetricsWriter.class);
-    }
-
-    /**
-     * Register the sink with the metrics system, so we don't need to specify it in the conf
-     * @param sink
-     */
-    public static void registerSink(MetricsWriter sink) {
-        TestableMetricsWriter writer = newTraceMetricSink();
-        writer.setWriterForTesting(sink);
-        Metrics.getManager().register("phoenix", "test sink gets logged", writer);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/pom.xml b/phoenix-hadoop2-compat/pom.xml
deleted file mode 100644
index e762678..0000000
--- a/phoenix-hadoop2-compat/pom.xml
+++ /dev/null
@@ -1,77 +0,0 @@
-<?xml version='1.0'?>
-<!--
-
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing,
- software distributed under the License is distributed on an
- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- KIND, either express or implied.  See the License for the
- specific language governing permissions and limitations
- under the License.
-
--->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.phoenix</groupId>
-    <artifactId>phoenix</artifactId>
-    <version>4.2.0</version>
-  </parent>
-  <artifactId>phoenix-hadoop2-compat</artifactId>
-  <name>Phoenix Hadoop2 Compatibility</name>
-
-  <dependencies>
-    <!-- Intra-project dependencies -->
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-hadoop-compat</artifactId>
-    </dependency>
-    <!-- HBase -->
-    <dependency>
-    <groupId>org.apache.hbase</groupId>
-    <artifactId>hbase-common</artifactId>
-    <exclusions>
-      <exclusion>
-        <artifactId>hadoop-core</artifactId>
-        <groupId>org.apache.hadoop</groupId>
-      </exclusion>
-    </exclusions>
-    </dependency>
-    <!-- Hadoop -->
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-core</artifactId>
-      <version>${hadoop-two.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-annotations</artifactId>
-      <version>${hadoop-two.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>${hadoop-two.version}</version>
-    </dependency>
-    <!-- Other -->
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-    <!-- Test -->
-     <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-all</artifactId>
-    </dependency>
-  </dependencies>
-</project>

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/metrics/MetricsManagerImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/metrics/MetricsManagerImpl.java b/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/metrics/MetricsManagerImpl.java
deleted file mode 100644
index 03e06a5..0000000
--- a/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/metrics/MetricsManagerImpl.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
- * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
- * for the specific language governing permissions and limitations under the License.
- */
-package org.apache.phoenix.metrics;
-
-import java.util.Arrays;
-
-import org.apache.hadoop.metrics2.MetricsSink;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-
-import com.google.common.base.Preconditions;
-
-/**
- *
- */
-public class MetricsManagerImpl implements MetricsManager {
-
-  private MetricsSystem system;
-
-  @Override
-  /**
-   * Register a metrics sink
-   * @param <T>   the type of the sink.
-   * @param sink  to register
-   * @param name  of the sink. Must be unique.
-   * @param desc  the description of the sink
-   * @return the sink
-   * @throws IllegalArgumentException if sink is not a MetricsSink
-   */
-  public <T> T register(String name, String desc, T sink) {
-    isA(sink, MetricsSink.class);
-    return (T) system.register(name, desc, (MetricsSink) sink);
-  }
-
-  public <T> T registerSource(String name, String desc, T source) {
-    isA(source, MetricsSource.class);
-    return (T) system.register(name, desc, (MetricsSource) source);
-  }
-
-  @Override
-  public void initialize(String prefix) {
-    this.system = DefaultMetricsSystem.initialize(prefix);
-  }
-
-  private <T> void isA(T object, Class<?>... classes) {
-    boolean match = false;
-    for (Class<?> clazz : classes) {
-      if (clazz.isAssignableFrom(object.getClass())) {
-        match = true;
-        break;
-      }
-    }
-    Preconditions.checkArgument(match, object + " is not one of " + Arrays.toString(classes));
-  }
-
-  @Override
-  public void shutdown() {
-    if (this.system != null) {
-      this.system.shutdown();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java b/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java
deleted file mode 100644
index 47c1dda..0000000
--- a/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import com.google.common.base.Objects;
-import static com.google.common.base.Preconditions.*;
-import org.apache.hadoop.metrics2.MetricsInfo;
-
-/**
- * Making implementing metric info a little easier
- * <p>
- * Just a copy of the same from Hadoop, but exposed for usage.
- */
-public class MetricsInfoImpl implements MetricsInfo {
-  private final String name, description;
-
-  MetricsInfoImpl(String name, String description) {
-    this.name = checkNotNull(name, "name");
-    this.description = checkNotNull(description, "description");
-  }
-
-  @Override public String name() {
-    return name;
-  }
-
-  @Override public String description() {
-    return description;
-  }
-
-  @Override public boolean equals(Object obj) {
-    if (obj instanceof MetricsInfo) {
-      MetricsInfo other = (MetricsInfo) obj;
-      return Objects.equal(name, other.name()) &&
-             Objects.equal(description, other.description());
-    }
-    return false;
-  }
-
-  @Override public int hashCode() {
-    return Objects.hashCode(name, description);
-  }
-
-  @Override public String toString() {
-    return Objects.toStringHelper(this)
-        .add("name", name).add("description", description)
-        .toString();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java b/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
deleted file mode 100644
index 3de7da3..0000000
--- a/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-
-import javax.annotation.Nullable;
-
-import org.apache.commons.configuration.Configuration;
-import org.apache.commons.configuration.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.metrics2.AbstractMetric;
-import org.apache.hadoop.metrics2.MetricsRecord;
-import org.apache.hadoop.metrics2.MetricsSink;
-import org.apache.hadoop.metrics2.MetricsTag;
-import org.apache.phoenix.metrics.Metrics;
-import org.apache.phoenix.metrics.MetricsWriter;
-import org.apache.phoenix.metrics.PhoenixAbstractMetric;
-import org.apache.phoenix.metrics.PhoenixMetricTag;
-import org.apache.phoenix.metrics.PhoenixMetricsRecord;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterators;
-
-/**
- * Translate metrics from a Hadoop2 metrics2 metric to a generic PhoenixMetric that a
- * {@link MetricsWriter} can then write out.
- * <p>
- * This class becomes unnecessary once we drop Hadoop1 support.
- */
-public class PhoenixMetricsSink implements MetricsSink, TestableMetricsWriter {
-
-  private static final Log LOG = LogFactory.getLog(PhoenixMetricsSink.class);
-  /**
-   * Metrics configuration key for the class that should be used for writing the output.
-   * <p>
-   * This would actually be set as: <code>
-   * phoenix.sink.&ltsome instance name&gt.writer-class
-   * </code> Where <tt>some instance name</tt> is just any unique name, so properties can be
-   * differentiated
-   */
-  public static final String PHOENIX_METRICS_WRITER_CLASS = "writer-class";
-
-  public static void setWriterClass(MetricsWriter writer, Configuration conf) {
-    conf.setProperty(PHOENIX_METRICS_WRITER_CLASS, writer.getClass().getName());
-  }
-
-  private MetricsWriter writer;
-
-  public PhoenixMetricsSink() {
-    LOG.info("Writing tracing metrics to phoenix table");
-    Metrics.markSinkInitialized();
-  }
-
-  @Override
-  public void init(SubsetConfiguration config) {
-    // instantiate the configured writer class
-    String clazz = config.getString(PHOENIX_METRICS_WRITER_CLASS);
-    LOG.info("Instantiating writer class: " + clazz);
-    this.writer = TracingCompat.initializeWriter(clazz);
-    Preconditions.checkNotNull(writer, "Could not correctly initialize metrics writer!");
-  }
-
-  @Override
-  @VisibleForTesting
-  public void setWriterForTesting(MetricsWriter writer) {
-    this.writer = writer;
-  }
-
-  @Override
-  public void putMetrics(MetricsRecord record) {
-    writer.addMetrics(wrap(record));
-  }
-
-  @Override
-  public void flush() {
-    writer.flush();
-  }
-
-  /**
-   * Convert the passed record to a {@link PhoenixMetricsRecord}
-   * @param record to convert
-   * @return a generic {@link PhoenixMetricsRecord} that delegates to the record in all things
-   */
-  private PhoenixMetricsRecord wrap(final MetricsRecord record) {
-    return new PhoenixMetricsRecord() {
-
-      @Override
-      public String name() {
-        return record.name();
-      }
-
-      @Override
-      public String description() {
-        return record.description();
-      }
-
-      @Override
-      public Iterable<PhoenixAbstractMetric> metrics() {
-        final Iterable<AbstractMetric> iterable = record.metrics();
-        return new Iterable<PhoenixAbstractMetric>(){
-
-          @Override
-          public Iterator<PhoenixAbstractMetric> iterator() {
-            final Iterator<AbstractMetric> iter = iterable.iterator();
-            return Iterators.transform(iter, new Function<AbstractMetric, PhoenixAbstractMetric>() {
-
-              @Override
-              @Nullable
-              public PhoenixAbstractMetric apply(@Nullable final AbstractMetric input) {
-                if (input == null) {
-                  return null;
-                }
-                return new PhoenixAbstractMetric() {
-
-                  @Override
-                  public Number value() {
-                    return input.value();
-                  }
-
-                  @Override
-                  public String getName() {
-                    return input.name();
-                  }
-
-                  @Override
-                  public String toString() {
-                    return input.toString();
-                  }
-                };
-              }
-            });
-          }
-        };
-      }
-
-      @Override
-      public Collection<PhoenixMetricTag> tags() {
-        Collection<PhoenixMetricTag> tags = new ArrayList<PhoenixMetricTag>();
-        Collection<MetricsTag> origTags = record.tags();
-        for (final MetricsTag tag : origTags) {
-          tags.add(new PhoenixMetricTag() {
-
-            @Override
-            public String name() {
-              return tag.name();
-            }
-
-            @Override
-            public String description() {
-              return tag.description();
-            }
-
-            @Override
-            public String value() {
-              return tag.value();
-            }
-
-            @Override
-            public String toString() {
-              return tag.toString();
-            }
-
-          });
-        }
-        return tags;
-      }
-
-    };
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java b/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
deleted file mode 100644
index 1114a95..0000000
--- a/phoenix-hadoop2-compat/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import static org.apache.phoenix.metrics.MetricInfo.ANNOTATION;
-import static org.apache.phoenix.metrics.MetricInfo.END;
-import static org.apache.phoenix.metrics.MetricInfo.PARENT;
-import static org.apache.phoenix.metrics.MetricInfo.SPAN;
-import static org.apache.phoenix.metrics.MetricInfo.START;
-import static org.apache.phoenix.metrics.MetricInfo.TAG;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsSource;
-import org.apache.hadoop.metrics2.MetricsTag;
-import org.apache.hadoop.metrics2.lib.Interns;
-import org.apache.phoenix.metrics.MetricInfo;
-import org.apache.phoenix.metrics.Metrics;
-import org.apache.phoenix.metrics.MetricsManager;
-import org.cloudera.htrace.HTraceConfiguration;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.SpanReceiver;
-import org.cloudera.htrace.TimelineAnnotation;
-import org.cloudera.htrace.impl.MilliSpan;
-
-/**
- * Sink for request traces ({@link SpanReceiver}) that pushes writes to {@link MetricsSource} in a
- * format that we can more easily consume.
- * <p>
- * <p>
- * Rather than write directly to a phoenix table, we drop it into the metrics queue so we can more
- * cleanly handle it asyncrhonously.Currently, {@link MilliSpan} submits the span in a synchronized
- * block to all the receivers, which could have a lot of overhead if we are submitting to multiple
- * receivers.
- * <p>
- * The format of the generated metrics is this:
- * <ol>
- *   <li>All Metrics from the same span have the same name (allowing correlation in the sink)</li>
- *   <li>The description of the metric describes what it contains. For instance,
- *   <ul>
- *     <li>{@link MetricInfo#PARENT} is the id of the parent of this span. (Root span is
- *     {@link Span#ROOT_SPAN_ID}).</li>
- *     <li>{@value MetricInfo#START} is the start time of the span</li>
- *     <li>{@value MetricInfo#END} is the end time of the span</li>
- *   </ul></li>
- *   <li>Each span's messages are contained in a {@link MetricsTag} with the same name as above and a
- *   generic counter for the number of messages (to differentiate messages and provide timeline
- *   ordering).</li>
- * </ol>
- * <p>
- * <i>So why even submit to metrics2 framework if we only have a single source?</i>
- * <p>
- * This allows us to make the updates in batches. We might have spans that finish before other spans
- * (for instance in the same parent). By batching the updates we can lessen the overhead on the
- * client, which is also busy doing 'real' work. <br>
- * We could make our own queue and manage batching and filtering and dropping extra metrics, but
- * that starts to get complicated fast (its not as easy as it sounds) so we use metrics2 to abstract
- * out that pipeline and also provides us flexibility to dump metrics to other sources.
- * <p>
- * This is a somewhat rough implementation - we do excessive locking for correctness,
- * rather than trying to make it fast, for the moment.
- */
-public class TraceMetricSource implements PhoenixSpanReceiver, MetricsSource {
-
-  private static final String EMPTY_STRING = "";
-
-  private static final String CONTEXT = "tracing";
-
-  private List<Metric> spans = new ArrayList<Metric>();
-
-  public TraceMetricSource() {
-    MetricsManager manager = Metrics.initialize();
-
-    // Register this instance.
-    // For right now, we ignore the MBean registration issues that show up in DEBUG logs. Basically,
-    // we need a Jmx MBean compliant name. We'll get to a better name when we want that later
-    manager.registerSource(CONTEXT, "Phoenix call tracing", this);
-  }
-
-  @Override
-  public void receiveSpan(Span span) {
-    Metric builder = new Metric(span);
-    // add all the metrics for the span
-    builder.addCounter(Interns.info(SPAN.traceName, EMPTY_STRING), span.getSpanId());
-    builder.addCounter(Interns.info(PARENT.traceName, EMPTY_STRING), span.getParentId());
-    builder.addCounter(Interns.info(START.traceName, EMPTY_STRING), span.getStartTimeMillis());
-    builder.addCounter(Interns.info(END.traceName, EMPTY_STRING), span.getStopTimeMillis());
-    // add the tags to the span. They were written in order received so we mark them as such
-    for (TimelineAnnotation ta : span.getTimelineAnnotations()) {
-      builder.add(new MetricsTag(Interns.info(TAG.traceName, Long.toString(ta.getTime())), ta
-          .getMessage()));
-    }
-
-    // add the annotations. We assume they are serialized as strings and integers, but that can
-    // change in the future
-    Map<byte[], byte[]> annotations = span.getKVAnnotations();
-    for (Entry<byte[], byte[]> annotation : annotations.entrySet()) {
-      Pair<String, String> val =
-          TracingCompat.readAnnotation(annotation.getKey(), annotation.getValue());
-      builder.add(new MetricsTag(Interns.info(ANNOTATION.traceName, val.getFirst()), val
-          .getSecond()));
-    }
-
-    // add the span to the list we care about
-    synchronized (this) {
-      spans.add(builder);
-    }
-  }
-
-  @Override
-  public void getMetrics(MetricsCollector collector, boolean all) {
-    // add a marker record so we know how many spans are used
-    // this is also necessary to ensure that we register the metrics source as an MBean (avoiding a
-    // runtime warning)
-    MetricsRecordBuilder marker = collector.addRecord(TracingCompat.METRICS_MARKER_CONTEXT);
-    marker.add(new MetricsTag(new MetricsInfoImpl("stat", "num spans"), Integer
-        .toString(spans.size())));
-
-    // actually convert the known spans into metric records as well
-    synchronized (this) {
-      for (Metric span : spans) {
-        MetricsRecordBuilder builder = collector.addRecord(new MetricsInfoImpl(TracingCompat
-            .getTraceMetricName(span.id), span.desc));
-        builder.setContext(TracingCompat.METRICS_CONTEXT);
-        for (Pair<MetricsInfo, Long> metric : span.counters) {
-          builder.addCounter(metric.getFirst(), metric.getSecond());
-        }
-        for (MetricsTag tag : span.tags) {
-          builder.add(tag);
-        }
-      }
-      // reset the spans so we don't keep a big chunk of memory around
-      spans = new ArrayList<Metric>();
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    // noop
-  }
-
-  @Override
-  public void configure(HTraceConfiguration conf) {
-    // noop
-  }
-
-  private static class Metric {
-
-    List<Pair<MetricsInfo, Long>> counters = new ArrayList<Pair<MetricsInfo, Long>>();
-    List<MetricsTag> tags = new ArrayList<MetricsTag>();
-    private String id;
-    private String desc;
-
-    public Metric(Span span) {
-      this.id = Long.toString(span.getTraceId());
-      this.desc = span.getDescription();
-    }
-
-    /**
-     * @param metricsInfoImpl
-     * @param startTimeMillis
-     */
-    public void addCounter(MetricsInfo metricsInfoImpl, long startTimeMillis) {
-      counters.add(new Pair<MetricsInfo, Long>(metricsInfoImpl, startTimeMillis));
-    }
-
-    /**
-     * @param metricsTag
-     */
-    public void add(MetricsTag metricsTag) {
-      tags.add(metricsTag);
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.metrics.MetricsManager
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.metrics.MetricsManager b/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.metrics.MetricsManager
deleted file mode 100644
index 8430a48..0000000
--- a/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.metrics.MetricsManager
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.phoenix.metrics.MetricsManagerImpl
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.trace.PhoenixSpanReceiver
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.trace.PhoenixSpanReceiver b/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.trace.PhoenixSpanReceiver
deleted file mode 100644
index 3694093..0000000
--- a/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.trace.PhoenixSpanReceiver
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.phoenix.trace.TraceMetricSource
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.trace.TestableMetricsWriter
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.trace.TestableMetricsWriter b/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.trace.TestableMetricsWriter
deleted file mode 100644
index 7c9e107..0000000
--- a/phoenix-hadoop2-compat/src/main/resources/META-INF/services/org.apache.phoenix.trace.TestableMetricsWriter
+++ /dev/null
@@ -1 +0,0 @@
-org.apache.phoenix.trace.PhoenixMetricsSink
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java b/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java
deleted file mode 100644
index 33ca738..0000000
--- a/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricCounterLong.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.metrics2.impl;
-
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.impl.MetricCounterLong;
-
-/**
- *
- */
-public class ExposedMetricCounterLong extends MetricCounterLong {
-
-  /**
-   * @param info
-   * @param value
-   */
-  public ExposedMetricCounterLong(MetricsInfo info, long value) {
-    super(info, value);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java b/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java
deleted file mode 100644
index bcb8b43..0000000
--- a/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/impl/ExposedMetricsRecordImpl.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.metrics2.impl;
-
-import java.util.List;
-
-import org.apache.hadoop.metrics2.AbstractMetric;
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsTag;
-import org.apache.hadoop.metrics2.impl.MetricsRecordImpl;
-
-/**
- * Helper class to access the package-private {@link MetricsRecordImpl}
- */
-@SuppressWarnings("javadoc")
-public class ExposedMetricsRecordImpl extends MetricsRecordImpl {
-
-  /**
-   * @param info
-   * @param timestamp
-   * @param tags
-   * @param metrics
-   */
-  public ExposedMetricsRecordImpl(MetricsInfo info, long timestamp, List<MetricsTag> tags,
-      Iterable<AbstractMetric> metrics) {
-    super(info, timestamp, tags, metrics);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java b/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java
deleted file mode 100644
index 6daf604..0000000
--- a/phoenix-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/ExposedMetricsInfoImpl.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.metrics2.lib;
-
-/**
- * Helper class to expose access to the {@link MetricsInfoImpl}
- */
-public class ExposedMetricsInfoImpl extends MetricsInfoImpl {
-
-    /**
-     * @param name
-     * @param description
-     */
-    public ExposedMetricsInfoImpl(String name, String description) {
-        super(name, description);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/PhoenixMetricsWriterTest.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/PhoenixMetricsWriterTest.java b/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/PhoenixMetricsWriterTest.java
deleted file mode 100644
index f865723..0000000
--- a/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/PhoenixMetricsWriterTest.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.List;
-
-import org.apache.hadoop.metrics2.AbstractMetric;
-import org.apache.hadoop.metrics2.MetricsInfo;
-import org.apache.hadoop.metrics2.MetricsRecord;
-import org.apache.hadoop.metrics2.MetricsTag;
-import org.apache.hadoop.metrics2.impl.ExposedMetricCounterLong;
-import org.apache.hadoop.metrics2.impl.ExposedMetricsRecordImpl;
-import org.apache.hadoop.metrics2.lib.ExposedMetricsInfoImpl;
-import org.apache.phoenix.metrics.MetricInfo;
-import org.apache.phoenix.metrics.MetricsWriter;
-import org.apache.phoenix.metrics.PhoenixAbstractMetric;
-import org.apache.phoenix.metrics.PhoenixMetricTag;
-import org.apache.phoenix.metrics.PhoenixMetricsRecord;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-import com.google.common.collect.Lists;
-
-/**
- * Test that we correctly convert between hadoop2 metrics2 and generic phoenix metrics.
- */
-public class PhoenixMetricsWriterTest {
-
-  @Test
-  public void testTranslation() throws Exception {
-    // hook up a sink we can test
-    MetricsWriter mockSink = Mockito.mock(MetricsWriter.class);
-
-    // writer that will translate to the sink (specific to hadoop version used)
-    PhoenixMetricsSink writer = new PhoenixMetricsSink();
-    writer.setWriterForTesting(mockSink);
-
-    // create a simple metrics record
-    final long traceid = 987654;
-    MetricsInfo info = new ExposedMetricsInfoImpl(TracingCompat.getTraceMetricName(traceid),
-        "Some generic trace");
-    // setup some metrics for the span
-    long spanid = 10;
-    AbstractMetric span = new ExposedMetricCounterLong(new ExposedMetricsInfoImpl(
-        MetricInfo.SPAN.traceName, ""), spanid);
-    long parentid = 11;
-    AbstractMetric parent = new ExposedMetricCounterLong(new ExposedMetricsInfoImpl(
-        MetricInfo.PARENT.traceName, ""), parentid);
-    long startTime = 12;
-    AbstractMetric start = new ExposedMetricCounterLong(new ExposedMetricsInfoImpl(
-        MetricInfo.START.traceName, ""), startTime);
-    long endTime = 13;
-    AbstractMetric end = new ExposedMetricCounterLong(new ExposedMetricsInfoImpl(
-        MetricInfo.END.traceName, ""), endTime);
-    final List<AbstractMetric> metrics = Lists.newArrayList(span, parent, start, end);
-
-    // create an annotation as well
-    String annotation = "test annotation for a span";
-    MetricsTag tag = new MetricsTag(
-        new ExposedMetricsInfoImpl(MetricInfo.ANNOTATION.traceName, "0"), annotation);
-    String hostnameValue = "host-name.value";
-    MetricsTag hostname = new MetricsTag(new ExposedMetricsInfoImpl(MetricInfo.HOSTNAME.traceName,
-        ""), hostnameValue);
-    final List<MetricsTag> tags = Lists.newArrayList(hostname, tag);
-
-    MetricsRecord record = new ExposedMetricsRecordImpl(info, System.currentTimeMillis(), tags,
-        metrics);
-
-    // setup the mocking/validation for the sink
-    Mockito.doAnswer(new Answer<Void>() {
-
-      @Override
-      public Void answer(InvocationOnMock invocation) throws Throwable {
-        PhoenixMetricsRecord record = (PhoenixMetricsRecord) invocation.getArguments()[0];
-        //validate that we got the right fields in the record
-        assertEquals("phoenix.987654", record.name());
-        assertEquals("Some generic trace", record.description());
-        int count = 0;
-        for (PhoenixAbstractMetric metric : record.metrics()) {
-          count++;
-          //find the matching metric in the list
-          boolean found = false;
-          for(AbstractMetric expected : metrics){
-            if(expected.name().equals(metric.getName())){
-              found = true;
-              // make sure the rest of the info matches
-              assertEquals("Metric value mismatch", expected.value(), metric.value());
-            }
-          }
-          assertTrue("Didn't find an expected metric to match "+metric, found);
-        }
-        assertEquals("Number of metrics is received is wrong", metrics.size(), count);
-
-        count = 0;
-        for (PhoenixMetricTag tag : record.tags()) {
-          count++;
-          // find the matching metric in the list
-          boolean found = false;
-          for (MetricsTag expected : tags) {
-            if (expected.name().equals(tag.name())) {
-              found = true;
-              // make sure the rest of the info matches
-              assertEquals("Tag value mismatch", expected.value(), tag.value());
-              assertEquals("Tag description mismatch", expected.description(), tag.description());
-            }
-          }
-          assertTrue("Didn't find an expected metric to match " + tag, found);
-        }
-        assertEquals("Number of tags is received is wrong", tags.size(), count);
-        return null;
-      }
-
-    }).when(mockSink).addMetrics(Mockito.any(PhoenixMetricsRecord.class));
-
-    // actually do the update
-    writer.putMetrics(record);
-    writer.flush();
-
-    Mockito.verify(mockSink).addMetrics(Mockito.any(PhoenixMetricsRecord.class));
-    Mockito.verify(mockSink).flush();
-  }
-}
\ No newline at end of file


[08/24] git commit: PHOENIX-1379 Wrong MultiIndexWriteFailureException when recovering local index table (Shunsuke Nakamura)

Posted by ja...@apache.org.
PHOENIX-1379 Wrong MultiIndexWriteFailureException when recovering local index table (Shunsuke Nakamura)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/351769c6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/351769c6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/351769c6

Branch: refs/heads/4.2
Commit: 351769c69e1d857fda48c2e8aec851384fcafb0e
Parents: 5093c2f
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Oct 26 22:26:41 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sun Oct 26 22:26:41 2014 -0700

----------------------------------------------------------------------
 .../index/write/recovery/TrackingParallelWriterIndexCommitter.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/351769c6/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
index 9a61191..9171b53 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/write/recovery/TrackingParallelWriterIndexCommitter.java
@@ -158,7 +158,7 @@ public class TrackingParallelWriterIndexCommitter implements IndexCommitter {
                                 if (indexRegion != null) {
                                     throwFailureIfDone();
                                     indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]));
-                                    return null;
+                                    return Boolean.TRUE;
                                 }
                             }
                         } catch (IOException ignord) {


[09/24] git commit: Merge branch '4.0' of https://git-wip-us.apache.org/repos/asf/phoenix into 4.0

Posted by ja...@apache.org.
Merge branch '4.0' of https://git-wip-us.apache.org/repos/asf/phoenix into 4.0


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7b571608
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7b571608
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7b571608

Branch: refs/heads/4.2
Commit: 7b5716084be1a462b2ca093c512b7a24476c3e5b
Parents: 351769c 00522cf
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Oct 26 22:38:06 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sun Oct 26 22:38:06 2014 -0700

----------------------------------------------------------------------
 bin/phoenix_utils.py                                               | 2 +-
 .../main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------



[19/24] git commit: PHOENIX-1390 Stats not updated on client after major compaction

Posted by ja...@apache.org.
PHOENIX-1390 Stats not updated on client after major compaction


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/7a8a023a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/7a8a023a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/7a8a023a

Branch: refs/heads/4.2
Commit: 7a8a023a3d2bdb694f02fc1560a0f5eb35294a96
Parents: 851f57a
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 28 12:44:37 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 28 12:44:37 2014 -0700

----------------------------------------------------------------------
 .../end2end/BaseClientManagedTimeIT.java        | 15 +++-
 .../org/apache/phoenix/end2end/BaseQueryIT.java |  3 +-
 .../end2end/ClientTimeArithmeticQueryIT.java    | 43 +++++++++++
 .../phoenix/end2end/InMemoryOrderByIT.java      |  4 +-
 .../org/apache/phoenix/end2end/QueryIT.java     | 24 ++++--
 .../apache/phoenix/end2end/ReverseScanIT.java   |  2 +-
 .../org/apache/phoenix/end2end/SequenceIT.java  |  7 +-
 .../phoenix/end2end/SpooledOrderByIT.java       |  4 +-
 .../phoenix/end2end/StatsCollectorIT.java       | 55 +++++++++++++-
 .../apache/phoenix/end2end/UpsertSelectIT.java  |  4 +-
 .../phoenix/compile/ExpressionCompiler.java     |  2 +-
 .../coprocessor/MetaDataEndpointImpl.java       | 35 ++-------
 .../UngroupedAggregateRegionObserver.java       | 21 ++++--
 .../org/apache/phoenix/query/QueryServices.java |  1 +
 .../phoenix/query/QueryServicesOptions.java     |  5 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  4 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  2 +-
 .../phoenix/schema/stats/PTableStats.java       |  7 ++
 .../phoenix/schema/stats/PTableStatsImpl.java   | 12 ++-
 .../schema/stats/StatisticsCollector.java       | 79 ++++++++++++--------
 .../phoenix/schema/stats/StatisticsScanner.java |  1 -
 .../phoenix/schema/stats/StatisticsUtil.java    |  6 +-
 .../phoenix/schema/stats/StatisticsWriter.java  | 39 ++++++----
 23 files changed, 259 insertions(+), 116 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
index 14dffcb..1acd5b3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
@@ -17,16 +17,21 @@
  */
 package org.apache.phoenix.end2end;
 
+import java.util.Map;
+
 import javax.annotation.concurrent.NotThreadSafe;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.experimental.categories.Category;
 
+import com.google.common.collect.Maps;
+
 /**
  * Base class for tests that manage their own time stamps
  * We need to separate these from tests that rely on hbase to set
@@ -54,9 +59,17 @@ public abstract class BaseClientManagedTimeIT extends BaseTest {
         deletePriorTables(ts - 1, getUrl());    
     }
     
+    public static Map<String,String> getDefaultProps() {
+        Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
+        // Must update config before starting server
+        props.put(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, Boolean.FALSE.toString());
+        return props;
+    }
+    
     @BeforeClass
     public static void doSetup() throws Exception {
-        setUpTestDriver(ReadOnlyProps.EMPTY_PROPS);
+        Map<String,String> props = getDefaultProps();
+        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
     
     @AfterClass

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
index 7a3e86e..f3031f4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -48,7 +48,6 @@ import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
 
 
@@ -70,7 +69,7 @@ public abstract class BaseQueryIT extends BaseClientManagedTimeIT {
     @BeforeClass
     @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
+        Map<String,String> props = getDefaultProps();
         props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(5000));
         props.put(IndexWriterUtils.HTABLE_THREAD_KEY, Integer.toString(100));
         // Make a small batch size to test multiple calls to reserve sequences

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
index 98b233c..d709b9c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ClientTimeArithmeticQueryIT.java
@@ -49,6 +49,7 @@ import java.util.Properties;
 
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.TestUtil;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -596,5 +597,47 @@ public class ClientTimeArithmeticQueryIT extends BaseQueryIT {
         }
     }
 
+    @Test
+    public void testDateDateSubtract() throws Exception {
+        String url;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        
+        url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 15);
+        Connection conn = DriverManager.getConnection(url, props);
+        PreparedStatement statement = conn.prepareStatement("UPSERT INTO ATABLE(organization_id,entity_id,a_time) VALUES(?,?,?)");
+        statement.setString(1, getOrganizationId());
+        statement.setString(2, ROW2);
+        statement.setDate(3, date);
+        statement.execute();
+        statement.setString(2, ROW3);
+        statement.setDate(3, date);
+        statement.execute();
+        statement.setString(2, ROW4);
+        statement.setDate(3, new Date(date.getTime() + TestUtil.MILLIS_IN_DAY - 1));
+        statement.execute();
+        statement.setString(2, ROW6);
+        statement.setDate(3, new Date(date.getTime() + TestUtil.MILLIS_IN_DAY - 1));
+        statement.execute();
+        statement.setString(2, ROW9);
+        statement.setDate(3, date);
+        statement.execute();
+        conn.commit();
+        conn.close();
+
+        url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 25);
+        conn = DriverManager.getConnection(url, props);
+        try {
+            statement = conn.prepareStatement("SELECT entity_id, b_string FROM ATABLE WHERE a_date - a_time > 1");
+            ResultSet rs = statement.executeQuery();
+            @SuppressWarnings("unchecked")
+            List<List<Object>> expectedResults = Lists.newArrayList(
+                    Arrays.<Object>asList(ROW3, E_VALUE),
+                    Arrays.<Object>asList( ROW6, E_VALUE), 
+                    Arrays.<Object>asList(ROW9, E_VALUE));
+            assertValuesEqualsResultSet(rs, expectedResults);
+        } finally {
+            conn.close();
+        }
+    }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InMemoryOrderByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InMemoryOrderByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InMemoryOrderByIT.java
index 48a0581..533143c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/InMemoryOrderByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/InMemoryOrderByIT.java
@@ -24,8 +24,6 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.experimental.categories.Category;
 
-import com.google.common.collect.Maps;
-
 @Category(ClientManagedTimeTest.class)
 public class InMemoryOrderByIT extends OrderByIT {
 
@@ -35,7 +33,7 @@ public class InMemoryOrderByIT extends OrderByIT {
     @BeforeClass
     @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
+        Map<String,String> props = getDefaultProps();
         props.put(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, Integer.toString(1024*1024));
         // Must update config before starting server
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
index f45b689..fe65e10 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
@@ -250,7 +250,7 @@ public class QueryIT extends BaseQueryIT {
     @Test
     public void testPointInTimeScan() throws Exception {
         // Override value that was set at creation time
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 1); // Run query at timestamp 5
+        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 10);
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection upsertConn = DriverManager.getConnection(url, props);
         String upsertStmt =
@@ -267,13 +267,15 @@ public class QueryIT extends BaseQueryIT {
         stmt.setString(2, ROW4);
         stmt.setInt(3, 5);
         stmt.execute(); // should commit too
-        Connection conn1 = DriverManager.getConnection(getUrl(), props);
+        
+        url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 15);
+        Connection conn1 = DriverManager.getConnection(url, props);
         analyzeTable(conn1, "ATABLE");
         conn1.close();
         upsertConn.close();
 
         // Override value again, but should be ignored since it's past the SCN
-        url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 3); // Run query at timestamp 5
+        url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 30);
         upsertConn = DriverManager.getConnection(url, props);
         upsertConn.setAutoCommit(true); // Test auto commit
         // Insert all rows at ts
@@ -285,7 +287,7 @@ public class QueryIT extends BaseQueryIT {
         upsertConn.close();
         
         String query = "SELECT organization_id, a_string AS a FROM atable WHERE organization_id=? and a_integer = 5";
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2));
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 20));
         Connection conn = DriverManager.getConnection(getUrl(), props);
         PreparedStatement statement = conn.prepareStatement(query);
         statement.setString(1, tenantId);
@@ -394,7 +396,7 @@ public class QueryIT extends BaseQueryIT {
             "    A_TIMESTAMP) " +
             "VALUES (?, ?, ?)";
         // Override value that was set at creation time
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 1);
+        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 10);
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection upsertConn = DriverManager.getConnection(url, props);
         upsertConn.setAutoCommit(true); // Test auto commit
@@ -405,9 +407,12 @@ public class QueryIT extends BaseQueryIT {
         byte[] ts1 = PDataType.TIMESTAMP.toBytes(tsValue1);
         stmt.setTimestamp(3, tsValue1);
         stmt.execute();
-        Connection conn1 = DriverManager.getConnection(getUrl(), props);
+        
+        url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 15);       
+        Connection conn1 = DriverManager.getConnection(url, props);
         analyzeTable(conn1, "ATABLE");
         conn1.close();
+        
         updateStmt = 
             "upsert into " +
             "ATABLE(" +
@@ -426,15 +431,18 @@ public class QueryIT extends BaseQueryIT {
         stmt.setTime(4, new Time(tsValue2.getTime()));
         stmt.execute();
         upsertConn.close();
-        conn1 = DriverManager.getConnection(getUrl(), props);
+        
+        url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 20);       
+        conn1 = DriverManager.getConnection(url, props);
         analyzeTable(conn1, "ATABLE");
         conn1.close();
+        
         analyzeTable(upsertConn, "ATABLE");
         assertTrue(compare(CompareOp.GREATER, new ImmutableBytesWritable(ts2), new ImmutableBytesWritable(ts1)));
         assertFalse(compare(CompareOp.GREATER, new ImmutableBytesWritable(ts1), new ImmutableBytesWritable(ts1)));
 
         String query = "SELECT entity_id, a_timestamp, a_time FROM aTable WHERE organization_id=? and a_timestamp > ?";
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 3)); // Execute at timestamp 2
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 30)); // Execute at timestamp 2
         Connection conn = DriverManager.getConnection(getUrl(), props);
         try {
             PreparedStatement statement = conn.prepareStatement(query);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
index 26d6d4b..e279710 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
@@ -50,7 +50,7 @@ import com.google.common.collect.Maps;
 @Category(HBaseManagedTimeTest.class)
 public class ReverseScanIT extends BaseHBaseManagedTimeIT {
     @BeforeClass
-    @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
+    @Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
index 4f2b9a9..b4b0b2e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SequenceIT.java
@@ -51,7 +51,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
 @Category(ClientManagedTimeTest.class)
 public class SequenceIT extends BaseClientManagedTimeIT {
@@ -63,11 +62,9 @@ public class SequenceIT extends BaseClientManagedTimeIT {
     @BeforeClass
     @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
-        
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
-        // Make a small batch size to test multiple calls to reserve sequences
-        props.put(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB, Long.toString(BATCH_SIZE));
+        Map<String,String> props = getDefaultProps();
         // Must update config before starting server
+        props.put(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB, Long.toString(BATCH_SIZE));
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpooledOrderByIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpooledOrderByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpooledOrderByIT.java
index 2533a29..c35ecab 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpooledOrderByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpooledOrderByIT.java
@@ -24,15 +24,13 @@ import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.experimental.categories.Category;
 
-import com.google.common.collect.Maps;
-
 @Category(ClientManagedTimeTest.class)
 public class SpooledOrderByIT extends OrderByIT {
 
     @BeforeClass
     @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
+        Map<String,String> props = getDefaultProps();
         props.put(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, Integer.toString(100));
         // Must update config before starting server
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index 51ad543..b9a0e88 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -18,6 +18,8 @@
 package org.apache.phoenix.end2end;
 
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.apache.phoenix.util.TestUtil.getAllSplits;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -27,9 +29,15 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
@@ -41,7 +49,8 @@ import com.google.common.collect.Maps;
 
 @Category(NeedsOwnMiniClusterTest.class)
 public class StatsCollectorIT extends BaseOwnClusterHBaseManagedTimeIT {
-    
+    private static final String STATS_TEST_TABLE_NAME = "S";
+        
     @BeforeClass
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
@@ -222,4 +231,48 @@ public class StatsCollectorIT extends BaseOwnClusterHBaseManagedTimeIT {
         return stmt;
     }
 
+    private void compactTable(Connection conn) throws IOException, InterruptedException, SQLException {
+        ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
+        HBaseAdmin admin = services.getAdmin();
+        try {
+            admin.flush(STATS_TEST_TABLE_NAME);
+            admin.majorCompact(STATS_TEST_TABLE_NAME);
+            Thread.sleep(10000); // FIXME: how do we know when compaction is done?
+        } finally {
+            admin.close();
+        }
+        services.clearCache();
+    }
+    
+    @Test
+    public void testCompactUpdatesStats() throws Exception {
+        int nRows = 10;
+        Connection conn;
+        PreparedStatement stmt;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        conn = DriverManager.getConnection(getUrl(), props);
+        conn.createStatement().execute("CREATE TABLE " + STATS_TEST_TABLE_NAME + "(k CHAR(1) PRIMARY KEY, v INTEGER) " + HColumnDescriptor.KEEP_DELETED_CELLS + "=" + Boolean.FALSE);
+        stmt = conn.prepareStatement("UPSERT INTO " + STATS_TEST_TABLE_NAME + " VALUES(?,?)");
+        for (int i = 0; i < nRows; i++) {
+            stmt.setString(1, Character.toString((char) ('a' + i)));
+            stmt.setInt(2, i);
+            stmt.executeUpdate();
+        }
+        conn.commit();
+        
+        compactTable(conn);
+        conn = DriverManager.getConnection(getUrl(), props);
+        List<KeyRange>keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME);
+        assertEquals(nRows+1, keyRanges.size());
+        
+        int nDeletedRows = conn.createStatement().executeUpdate("DELETE FROM " + STATS_TEST_TABLE_NAME + " WHERE V < 5");
+        conn.commit();
+        assertEquals(5, nDeletedRows);
+        
+        compactTable(conn);
+        
+        keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME);
+        assertEquals(nRows/2+1, keyRanges.size());
+        
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
index 642ba62..ac54fe4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
@@ -55,8 +55,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import com.google.common.collect.Maps;
-
 @Category(ClientManagedTimeTest.class)
 public class UpsertSelectIT extends BaseClientManagedTimeIT {
 	
@@ -64,7 +62,7 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
   @BeforeClass
   @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
   public static void doSetup() throws Exception {
-      Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
+      Map<String,String> props = getDefaultProps();
       props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(500));
       props.put(QueryServices.THREAD_POOL_SIZE_ATTRIB, Integer.toString(64));
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index e06a88f..3876b8a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -891,7 +891,7 @@ public class ExpressionCompiler extends UnsupportedAllParseNodeVisitor<Expressio
                 if (isType1Date || isType2Date) {
                     if (isType1Date && isType2Date) {
                         i = 2;
-                        theType = PDataType.LONG;
+                        theType = PDataType.DECIMAL;
                     } else if (isType1Date && type2 != null
                             && type2.isCoercibleTo(PDataType.DECIMAL)) {
                         i = 2;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index b90fb2e..3abd206 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -608,6 +608,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             try {
                 statsHTable = ServerUtil.getHTableForCoprocessorScan(env, PhoenixDatabaseMetaData.SYSTEM_STATS_NAME);
                 stats = StatisticsUtil.readStatistics(statsHTable, physicalTableName.getBytes(), clientTimeStamp);
+                timeStamp = Math.max(timeStamp, stats.getTimestamp()); 
             } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
                 logger.warn(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " not online yet?");
             } finally {
@@ -1264,32 +1265,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         }
     }
     
-    private PTable incrementTableTimestamp(byte[] key, long clientTimeStamp) throws IOException, SQLException {
-        HRegion region = env.getRegion();
-        RowLock lid = region.getRowLock(key);
-        if (lid == null) {
-            throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
-        }
-        try {
-            PTable table = doGetTable(key, clientTimeStamp, lid);
-            if (table != null) {
-                long tableTimeStamp = table.getTimeStamp() + 1;
-                List<Mutation> mutations = Lists.newArrayListWithExpectedSize(1);
-                Put p = new Put(key);
-                p.add(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, tableTimeStamp, ByteUtil.EMPTY_BYTE_ARRAY);
-                mutations.add(p);
-                region.mutateRowsWithLocks(mutations, Collections.<byte[]> emptySet());
-                
-                Cache<ImmutableBytesPtr, PTable> metaDataCache = GlobalCache.getInstance(env).getMetaDataCache();
-                ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
-                metaDataCache.invalidate(cacheKey);
-            }
-            return table;
-        } finally {
-            lid.release();
-        }
-    }
-    
     private PTable doGetTable(byte[] key, long clientTimeStamp) throws IOException, SQLException {
         return doGetTable(key, clientTimeStamp, null);
     }
@@ -1711,9 +1686,11 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
         byte[] tableName = request.getTableName().toByteArray();
         try {
             byte[] tenantId = request.getTenantId().toByteArray();
-            long clientTimeStamp = request.getClientTimestamp();
-            byte[] tableKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
-            incrementTableTimestamp(tableKey, clientTimeStamp);
+            byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
+            ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
+            Cache<ImmutableBytesPtr, PTable> metaDataCache =
+                    GlobalCache.getInstance(this.env).getMetaDataCache();
+            metaDataCache.invalidate(cacheKey);
         } catch (Throwable t) {
             logger.error("incrementTableTimeStamp failed", t);
             ProtobufUtil.setControllerException(controller,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 710409f..aba35fe 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -72,6 +72,7 @@ import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.join.HashJoinInfo;
 import org.apache.phoenix.join.TupleProjector;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.ConstraintViolationException;
 import org.apache.phoenix.schema.PColumn;
@@ -459,9 +460,13 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         if (!table.getNameAsString().equals(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)
                 && scanType.equals(ScanType.COMPACT_DROP_DELETES)) {
             try {
-                // TODO: for users that manage timestamps themselves, we should provide
-                // a means of specifying/getting this.
-                long clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
+                boolean useCurrentTime = 
+                        c.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, 
+                                QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
+                // Provides a means of clients controlling their timestamps to not use current time
+                // when background tasks are updating stats. Instead we track the max timestamp of
+                // the cells and use that.
+                long clientTimeStamp = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : StatisticsCollector.NO_TIMESTAMP;
                 StatisticsCollector stats = new StatisticsCollector(c.getEnvironment(), table.getNameAsString(), clientTimeStamp);
                 internalScan =
                         stats.createCompactionScanner(c.getEnvironment().getRegion(), store, scanners, scanType, earliestPutTs, s);
@@ -485,9 +490,13 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         if (!table.getNameAsString().equals(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME)) {
             StatisticsCollector stats = null;
             try {
-                // TODO: for users that manage timestamps themselves, we should provide
-                // a means of specifying/getting this.
-                long clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
+                boolean useCurrentTime = 
+                        e.getEnvironment().getConfiguration().getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, 
+                                QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
+                // Provides a means of clients controlling their timestamps to not use current time
+                // when background tasks are updating stats. Instead we track the max timestamp of
+                // the cells and use that.
+                long clientTimeStamp = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : StatisticsCollector.NO_TIMESTAMP;
                 stats = new StatisticsCollector(e.getEnvironment(), table.getNameAsString(), clientTimeStamp);
                 stats.collectStatsDuringSplit(e.getEnvironment().getConfiguration(), l, r, region);
             } catch (IOException ioe) { 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index 7f000c0..72002ae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -136,6 +136,7 @@ public interface QueryServices extends SQLCloseable {
     public static final String MIN_STATS_UPDATE_FREQ_MS_ATTRIB = "phoenix.stats.minUpdateFrequency";
     public static final String STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB = "phoenix.stats.guidepost.width";
     public static final String STATS_GUIDEPOST_PER_REGION_ATTRIB = "phoenix.stats.guidepost.per.region";
+    public static final String STATS_USE_CURRENT_TIME_ATTRIB = "phoenix.stats.useCurrentTime";
 
     public static final String SEQUENCE_SALT_BUCKETS_ATTRIB = "phoenix.sequence.saltBuckets";
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 7ee225b..7c8ecd4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -52,6 +52,7 @@ import static org.apache.phoenix.query.QueryServices.SPOOL_DIRECTORY;
 import static org.apache.phoenix.query.QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB;
+import static org.apache.phoenix.query.QueryServices.STATS_USE_CURRENT_TIME_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.THREAD_POOL_SIZE_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.THREAD_TIMEOUT_MS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.USE_INDEXES_ATTRIB;
@@ -146,7 +147,8 @@ public class QueryServicesOptions {
     public static final double DEFAULT_TRACING_PROBABILITY_THRESHOLD = 0.05;
 
     public static final int DEFAULT_STATS_UPDATE_FREQ_MS = 15 * 60000; // 15min
-    public static final int DEFAULT_GUIDE_POSTS_PER_REGION = 20;
+    public static final long DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES = 100 * 1024 *1024; // 100MB
+    public static final boolean DEFAULT_STATS_USE_CURRENT_TIME = true;
 
     public static final boolean DEFAULT_USE_REVERSE_SCAN = true;
     
@@ -175,6 +177,7 @@ public class QueryServicesOptions {
     public static QueryServicesOptions withDefaults() {
         Configuration config = HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
         QueryServicesOptions options = new QueryServicesOptions(config)
+            .setIfUnset(STATS_USE_CURRENT_TIME_ATTRIB, DEFAULT_STATS_USE_CURRENT_TIME)
             .setIfUnset(KEEP_ALIVE_MS_ATTRIB, DEFAULT_KEEP_ALIVE_MS)
             .setIfUnset(THREAD_POOL_SIZE_ATTRIB, DEFAULT_THREAD_POOL_SIZE)
             .setIfUnset(QUEUE_SIZE_ATTRIB, DEFAULT_QUEUE_SIZE)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index afe21e8..b763bbb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -613,13 +613,13 @@ public class MetaDataClient {
         Long scn = connection.getSCN();
         // Always invalidate the cache
         long clientTimeStamp = connection.getSCN() == null ? HConstants.LATEST_TIMESTAMP : scn;
-        String query = "SELECT CURRENT_DATE() - " + LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
+        String query = "SELECT CURRENT_DATE()," + LAST_STATS_UPDATE_TIME + " FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
                 + " WHERE " + PHYSICAL_NAME + "='" + physicalName.getString() + "' AND " + COLUMN_FAMILY
                 + " IS NULL AND " + REGION_NAME + " IS NULL AND " + LAST_STATS_UPDATE_TIME + " IS NOT NULL";
         ResultSet rs = connection.createStatement().executeQuery(query);
         long msSinceLastUpdate = Long.MAX_VALUE;
         if (rs.next()) {
-            msSinceLastUpdate = rs.getLong(1);
+            msSinceLastUpdate = rs.getLong(1) - rs.getLong(2);
         }
         if (msSinceLastUpdate < msMinBetweenUpdates) {
             return 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 2448f39..8f85ccc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -886,7 +886,7 @@ public class PTableImpl implements PTable {
             GuidePostsInfo info = new GuidePostsInfo(pTableStatsProto.getGuidePostsByteCount(), value);
             tableGuidePosts.put(pTableStatsProto.getKey().toByteArray(), info);
       }
-      PTableStats stats = new PTableStatsImpl(tableGuidePosts);
+      PTableStats stats = new PTableStatsImpl(tableGuidePosts, timeStamp);
 
       PName dataTableName = null;
       if (table.hasDataTableNameBytes()) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStats.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStats.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStats.java
index 3745487..435fe87 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStats.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStats.java
@@ -37,6 +37,11 @@ public interface PTableStats {
         public int getEstimatedSize() {
             return 0;
         }
+
+        @Override
+        public long getTimestamp() {
+            return StatisticsCollector.NO_TIMESTAMP;
+        }
     };
 
     /**
@@ -47,4 +52,6 @@ public interface PTableStats {
     SortedMap<byte[], GuidePostsInfo> getGuidePosts();
 
     int getEstimatedSize();
+    
+    long getTimestamp();
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java
index dcf7b00..dc70e86 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/PTableStatsImpl.java
@@ -23,6 +23,7 @@ import java.util.SortedMap;
 import java.util.TreeMap;
 
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.util.SizedUtil;
 
 import com.sun.istack.NotNull;
@@ -33,13 +34,15 @@ import com.sun.istack.NotNull;
 public class PTableStatsImpl implements PTableStats {
     private final SortedMap<byte[], GuidePostsInfo> guidePosts;
     private final int estimatedSize;
+    private final long timeStamp;
 
     public PTableStatsImpl() {
-        this(new TreeMap<byte[], GuidePostsInfo>(Bytes.BYTES_COMPARATOR));
+        this(new TreeMap<byte[], GuidePostsInfo>(Bytes.BYTES_COMPARATOR), MetaDataProtocol.MIN_TABLE_TIMESTAMP);
     }
 
-    public PTableStatsImpl(@NotNull SortedMap<byte[], GuidePostsInfo> guidePosts) {
+    public PTableStatsImpl(@NotNull SortedMap<byte[], GuidePostsInfo> guidePosts, long timeStamp) {
         this.guidePosts = guidePosts;
+        this.timeStamp = timeStamp;
         int estimatedSize = SizedUtil.OBJECT_SIZE + SizedUtil.INT_SIZE + SizedUtil.sizeOfTreeMap(guidePosts.size());
         for (Map.Entry<byte[], GuidePostsInfo> entry : guidePosts.entrySet()) {
             byte[] cf = entry.getKey();
@@ -84,4 +87,9 @@ public class PTableStatsImpl implements PTableStats {
     public int getEstimatedSize() {
         return estimatedSize;
     }
+
+    @Override
+    public long getTimestamp() {
+        return timeStamp;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
index 53bd18a..3511d12 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollector.java
@@ -23,8 +23,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
@@ -45,12 +43,15 @@ import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.TimeKeeper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -64,33 +65,45 @@ import com.google.common.collect.Maps;
  * board for now.
  */
 public class StatisticsCollector {
+    private static final Logger logger = LoggerFactory.getLogger(StatisticsCollector.class);
+    public static final long NO_TIMESTAMP = -1;
 
     private Map<String, byte[]> minMap = Maps.newHashMap();
     private Map<String, byte[]> maxMap = Maps.newHashMap();
     private long guidepostDepth;
+    private boolean useCurrentTime;
+    private long maxTimeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
     private Map<String, Pair<Long,GuidePostsInfo>> guidePostsMap = Maps.newHashMap();
     // Tracks the bytecount per family if it has reached the guidePostsDepth
     private Map<ImmutableBytesPtr, Boolean> familyMap = Maps.newHashMap();
     protected StatisticsWriter statsTable;
-    // Ensures that either analyze or compaction happens at any point of time.
-    private static final Log LOG = LogFactory.getLog(StatisticsCollector.class);
 
     public StatisticsCollector(RegionCoprocessorEnvironment env, String tableName, long clientTimeStamp) throws IOException {
         Configuration config = env.getConfiguration();
         HTableInterface statsHTable = env.getTable(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES));
-        long maxFileSize = statsHTable.getTableDescriptor().getMaxFileSize();
-        if (maxFileSize <= 0) { // HBase brain dead API doesn't give you the "real" max file size if it's not set...
-            maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE;
+        useCurrentTime = 
+            config.getBoolean(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB, 
+                    QueryServicesOptions.DEFAULT_STATS_USE_CURRENT_TIME);
+        int guidepostPerRegion = config.getInt(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB, 0);
+        if (guidepostPerRegion > 0) {
+            long maxFileSize = statsHTable.getTableDescriptor().getMaxFileSize();
+            if (maxFileSize <= 0) { // HBase brain dead API doesn't give you the "real" max file size if it's not set...
+                maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE;
+            }
+            guidepostDepth = maxFileSize / guidepostPerRegion;
+        } else {
+            guidepostDepth = config.getLong(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB,
+                    QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES);
         }
-        guidepostDepth =
-            config.getLong(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB,
-                    maxFileSize / config.getInt(QueryServices.STATS_GUIDEPOST_PER_REGION_ATTRIB, 
-                                                QueryServicesOptions.DEFAULT_GUIDE_POSTS_PER_REGION));
         // Get the stats table associated with the current table on which the CP is
         // triggered
         this.statsTable = StatisticsWriter.newWriter(statsHTable, tableName, clientTimeStamp);
     }
     
+    public long getMaxTimeStamp() {
+        return maxTimeStamp;
+    }
+    
     public void close() throws IOException {
         this.statsTable.close();
     }
@@ -99,12 +112,12 @@ public class StatisticsCollector {
         try {
             ArrayList<Mutation> mutations = new ArrayList<Mutation>();
             writeStatsToStatsTable(region, true, mutations, TimeKeeper.SYSTEM.getCurrentTime());
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Committing new stats for the region " + region.getRegionInfo());
+            if (logger.isDebugEnabled()) {
+                logger.debug("Committing new stats for the region " + region.getRegionInfo());
             }
             commitStats(mutations);
         } catch (IOException e) {
-            LOG.error(e);
+            logger.error("Unable to commit new stats", e);
         } finally {
             clear();
         }
@@ -116,20 +129,20 @@ public class StatisticsCollector {
             // update the statistics table
             for (ImmutableBytesPtr fam : familyMap.keySet()) {
                 if (delete) {
-                    if(LOG.isDebugEnabled()) {
-                        LOG.debug("Deleting the stats for the region "+region.getRegionInfo());
+                    if(logger.isDebugEnabled()) {
+                        logger.debug("Deleting the stats for the region "+region.getRegionInfo());
                     }
                     statsTable.deleteStats(region.getRegionInfo().getRegionNameAsString(), this, Bytes.toString(fam.copyBytesIfNecessary()),
                             mutations);
                 }
-                if(LOG.isDebugEnabled()) {
-                    LOG.debug("Adding new stats for the region "+region.getRegionInfo());
+                if(logger.isDebugEnabled()) {
+                    logger.debug("Adding new stats for the region "+region.getRegionInfo());
                 }
                 statsTable.addStats((region.getRegionInfo().getRegionNameAsString()), this, Bytes.toString(fam.copyBytesIfNecessary()),
                         mutations);
             }
         } catch (IOException e) {
-            LOG.error("Failed to update statistics table!", e);
+            logger.error("Failed to update statistics table!", e);
             throw e;
         }
     }
@@ -147,7 +160,7 @@ public class StatisticsCollector {
                         mutations);
             }
         } catch (IOException e) {
-            LOG.error("Failed to delete from statistics table!", e);
+            logger.error("Failed to delete from statistics table!", e);
             throw e;
         }
     }
@@ -195,8 +208,8 @@ public class StatisticsCollector {
                 internalScan = new StoreScanner(store, store.getScanInfo(), scan, scanners, scanType,
                         smallestReadPoint, earliestPutTs);
             }
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Compaction scanner created for stats");
+            if (logger.isDebugEnabled()) {
+                logger.debug("Compaction scanner created for stats");
             }
             InternalScanner scanner = getInternalScanner(region, store, internalScan, store.getColumnFamilyName());
             if (scanner != null) {
@@ -212,22 +225,22 @@ public class StatisticsCollector {
             // Create a delete operation on the parent region
             // Then write the new guide posts for individual regions
             List<Mutation> mutations = Lists.newArrayListWithExpectedSize(3);
-            long currentTime = TimeKeeper.SYSTEM.getCurrentTime();
+            long currentTime = useCurrentTime ? TimeKeeper.SYSTEM.getCurrentTime() : -1;
             deleteStatsFromStatsTable(region, mutations, currentTime);
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Collecting stats for the daughter region " + l.getRegionInfo());
+            if (logger.isDebugEnabled()) {
+                logger.debug("Collecting stats for the daughter region " + l.getRegionInfo());
             }
             collectStatsForSplitRegions(conf, l, mutations, currentTime);
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Collecting stats for the daughter region " + r.getRegionInfo());
+            if (logger.isDebugEnabled()) {
+                logger.debug("Collecting stats for the daughter region " + r.getRegionInfo());
             }
             collectStatsForSplitRegions(conf, r, mutations, currentTime);
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Committing stats for the daughter regions as part of split " + r.getRegionInfo());
+            if (logger.isDebugEnabled()) {
+                logger.debug("Committing stats for the daughter regions as part of split " + r.getRegionInfo());
             }
             commitStats(mutations);
         } catch (IOException e) {
-            LOG.error("Error while capturing stats after split of region "
+            logger.error("Error while capturing stats after split of region "
                     + region.getRegionInfo().getRegionNameAsString(), e);
         }
     }
@@ -244,13 +257,13 @@ public class StatisticsCollector {
             count = scanRegion(scanner, count);
             writeStatsToStatsTable(daughter, false, mutations, currentTime);
         } catch (IOException e) {
-            LOG.error(e);
+            logger.error("Unable to collects stats during split", e);
             toThrow = e;
         } finally {
                 try {
                     if (scanner != null) scanner.close();
                 } catch (IOException e) {
-                    LOG.error(e);
+                    logger.error("Unable to close scanner after split", e);
                     if (toThrow != null) toThrow = e;
                 } finally {
                     if (toThrow != null) throw toThrow;
@@ -278,6 +291,7 @@ public class StatisticsCollector {
         this.minMap.clear();
         this.guidePostsMap.clear();
         this.familyMap.clear();
+        maxTimeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
     }
 
     public void updateStatistic(KeyValue kv) {
@@ -302,6 +316,7 @@ public class StatisticsCollector {
                 maxMap.put(fam, row);
             }
         }
+        maxTimeStamp = Math.max(maxTimeStamp, kv.getTimestamp());
         // TODO : This can be moved to an interface so that we could collect guide posts in different ways
         Pair<Long,GuidePostsInfo> gps = guidePostsMap.get(fam);
         if (gps == null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
index 60b9601..3a84cfc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsScanner.java
@@ -35,7 +35,6 @@ public class StatisticsScanner implements InternalScanner {
 
     public StatisticsScanner(StatisticsCollector tracker, StatisticsWriter stats, HRegion region,
             InternalScanner delegate, byte[] family) {
-        // should there be only one tracker?
         this.tracker = tracker;
         this.stats = stats;
         this.delegate = delegate;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
index b8d64bd..eb183e6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java
@@ -72,6 +72,7 @@ public class StatisticsUtil {
         s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
         ResultScanner scanner = statsHTable.getScanner(s);
         Result result = null;
+        long timeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
         TreeMap<byte[], GuidePostsInfo> guidePostsPerCf = new TreeMap<byte[], GuidePostsInfo>(Bytes.BYTES_COMPARATOR);
         while ((result = scanner.next()) != null) {
             CellScanner cellScanner = result.cellScanner();
@@ -88,10 +89,13 @@ public class StatisticsUtil {
                 if (oldInfo != null) {
                     newInfo.combine(oldInfo);
                 }
+                if (current.getTimestamp() > timeStamp) {
+                    timeStamp = current.getTimestamp();
+                }
             }
         }
         if (!guidePostsPerCf.isEmpty()) {
-            return new PTableStatsImpl(guidePostsPerCf);
+            return new PTableStatsImpl(guidePostsPerCf, timeStamp);
         }
         return PTableStats.EMPTY_STATS;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/7a8a023a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index 6da135e..22f0ead 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -62,7 +62,9 @@ public class StatisticsWriter implements Closeable {
             clientTimeStamp = TimeKeeper.SYSTEM.getCurrentTime();
         }
         StatisticsWriter statsTable = new StatisticsWriter(hTable, tableName, clientTimeStamp);
-        statsTable.commitLastStatsUpdatedTime();
+        if (clientTimeStamp != StatisticsCollector.NO_TIMESTAMP) { // Otherwise we do this later as we don't know the ts yet
+            statsTable.commitLastStatsUpdatedTime();
+        }
         return statsTable;
     }
 
@@ -101,26 +103,31 @@ public class StatisticsWriter implements Closeable {
      */
     public void addStats(String regionName, StatisticsCollector tracker, String fam, List<Mutation> mutations) throws IOException {
         if (tracker == null) { return; }
-
+        boolean useMaxTimeStamp = clientTimeStamp == StatisticsCollector.NO_TIMESTAMP;
+        long timeStamp = clientTimeStamp;
+        if (useMaxTimeStamp) { // When using max timestamp, we write the update time later because we only know the ts now
+            timeStamp = tracker.getMaxTimeStamp();
+            mutations.add(getLastStatsUpdatedTimePut(timeStamp));
+        }
         byte[] prefix = StatisticsUtil.getRowKey(tableName, PDataType.VARCHAR.toBytes(fam),
                 PDataType.VARCHAR.toBytes(regionName));
         Put put = new Put(prefix);
         GuidePostsInfo gp = tracker.getGuidePosts(fam);
         if (gp != null) {
             put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_COUNT_BYTES,
-                    clientTimeStamp, PDataType.LONG.toBytes((gp.getGuidePosts().size())));
+                    timeStamp, PDataType.LONG.toBytes((gp.getGuidePosts().size())));
             put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES,
-                    clientTimeStamp, PDataType.VARBINARY.toBytes(gp.toBytes()));
+                    timeStamp, PDataType.VARBINARY.toBytes(gp.toBytes()));
             put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES,
-                    clientTimeStamp, PDataType.LONG.toBytes(gp.getByteCount()));
+                    timeStamp, PDataType.LONG.toBytes(gp.getByteCount()));
         }
         put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_KEY_BYTES,
-                clientTimeStamp, PDataType.VARBINARY.toBytes(tracker.getMinKey(fam)));
+                timeStamp, PDataType.VARBINARY.toBytes(tracker.getMinKey(fam)));
         put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_KEY_BYTES,
-                clientTimeStamp, PDataType.VARBINARY.toBytes(tracker.getMaxKey(fam)));
+                timeStamp, PDataType.VARBINARY.toBytes(tracker.getMaxKey(fam)));
         // Add our empty column value so queries behave correctly
         put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES,
-                clientTimeStamp, ByteUtil.EMPTY_BYTE_ARRAY);
+                timeStamp, ByteUtil.EMPTY_BYTE_ARRAY);
         mutations.add(put);
     }
 
@@ -153,21 +160,27 @@ public class StatisticsWriter implements Closeable {
         }
     }
 
-    private void commitLastStatsUpdatedTime() throws IOException {
-        // Always use wallclock time for this, as it's a mechanism to prevent
-        // stats from being collected too often.
+    private Put getLastStatsUpdatedTimePut(long timeStamp) {
         long currentTime = TimeKeeper.SYSTEM.getCurrentTime();
         byte[] prefix = tableName;
         Put put = new Put(prefix);
-        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES, clientTimeStamp,
+        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.LAST_STATS_UPDATE_TIME_BYTES, timeStamp,
                 PDataType.DATE.toBytes(new Date(currentTime)));
+        return put;
+    }
+
+    private void commitLastStatsUpdatedTime() throws IOException {
+        // Always use wallclock time for this, as it's a mechanism to prevent
+        // stats from being collected too often.
+        Put put = getLastStatsUpdatedTimePut(clientTimeStamp);
         statisticsTable.put(put);
     }
     
     public void deleteStats(String regionName, StatisticsCollector tracker, String fam, List<Mutation> mutations)
             throws IOException {
+        long timeStamp = clientTimeStamp == StatisticsCollector.NO_TIMESTAMP ? tracker.getMaxTimeStamp() : clientTimeStamp;
         byte[] prefix = StatisticsUtil.getRowKey(tableName, PDataType.VARCHAR.toBytes(fam),
                 PDataType.VARCHAR.toBytes(regionName));
-        mutations.add(new Delete(prefix, clientTimeStamp - 1));
+        mutations.add(new Delete(prefix, timeStamp - 1));
     }
 }
\ No newline at end of file


[04/24] git commit: PHOENIX-1366 ORDINAL_POSITION incorrect for multi-tenant table over tenant-specific connection (Bruno Dumon)

Posted by ja...@apache.org.
PHOENIX-1366 ORDINAL_POSITION incorrect for multi-tenant table over tenant-specific connection (Bruno Dumon)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/553fb4b3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/553fb4b3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/553fb4b3

Branch: refs/heads/4.2
Commit: 553fb4b33541531045435ed18e8ed5798edd6017
Parents: 4753c4e
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Oct 26 20:46:03 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sun Oct 26 20:46:03 2014 -0700

----------------------------------------------------------------------
 .../end2end/TenantSpecificTablesDDLIT.java      | 23 ++++++++++-----
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   | 31 ++++++++++++++++----
 2 files changed, 42 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/553fb4b3/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
index 589e963..42fe5b8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
@@ -22,6 +22,8 @@ import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_DROP_PK;
 import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_MODIFY_VIEW_PK;
 import static org.apache.phoenix.exception.SQLExceptionCode.CANNOT_MUTATE_TABLE;
 import static org.apache.phoenix.exception.SQLExceptionCode.TABLE_UNDEFINED;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
@@ -554,21 +556,22 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
             // make sure tenants see parent table's columns and their own
             rs = meta.getColumns(null, null, StringUtil.escapeLike(TENANT_TABLE_NAME) + "%", null);
             assertTrue(rs.next());
-            assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "user");
+            assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "user", 1);
             assertTrue(rs.next());
             // (tenant_id column is not visible in tenant-specific connection)
-            assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "tenant_type_id");
+            assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "tenant_type_id", 2);
+            assertEquals(1, rs.getInt(KEY_SEQ));
             assertTrue(rs.next());
-            assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "id");
+            assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "id", 3);
             assertTrue(rs.next());
-            assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "tenant_col");
+            assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "tenant_col", 4);
             assertTrue(rs.next());
-            assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "user");
+            assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "user", 1);
             assertTrue(rs.next());
             // (tenant_id column is not visible in tenant-specific connection)
-            assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "id");
+            assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "id", 2);
             assertTrue(rs.next());
-            assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "tenant_col");
+            assertColumnMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "tenant_col", 3);
             assertFalse(rs.next()); 
         }
         finally {
@@ -587,4 +590,10 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
         assertEquals(table, rs.getString("TABLE_NAME"));
         assertEquals(SchemaUtil.normalizeIdentifier(column), rs.getString("COLUMN_NAME"));
     }
+
+    private void assertColumnMetaData(ResultSet rs, String schema, String table, String column, int ordinalPosition)
+            throws SQLException {
+        assertColumnMetaData(rs, schema, table, column);
+        assertEquals(ordinalPosition, rs.getInt(ORDINAL_POSITION));
+    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/553fb4b3/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index a4976cb..ba77f6d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -26,8 +26,12 @@ import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.List;
 
 import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.ColumnProjector;
 import org.apache.phoenix.compile.ExpressionProjector;
@@ -56,6 +60,7 @@ import org.apache.phoenix.schema.PTable.LinkType;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.RowKeyValueAccessor;
 import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.SingleKeyValueTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.ByteUtil;
@@ -400,7 +405,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                 SQL_DATA_TYPE + "," +
                 SQL_DATETIME_SUB + "," +
                 CHAR_OCTET_LENGTH + "," +
-                ORDINAL_POSITION + "," +
+                "CASE WHEN TENANT_POS_SHIFT THEN ORDINAL_POSITION-1 ELSE ORDINAL_POSITION END AS " + ORDINAL_POSITION + "," +
                 "CASE " + NULLABLE + " WHEN " + DatabaseMetaData.attributeNoNulls +  " THEN '" + Boolean.FALSE.toString() + "' WHEN " + DatabaseMetaData.attributeNullable + " THEN '" + Boolean.TRUE.toString() + "' END AS " + IS_NULLABLE + "," +
                 SCOPE_CATALOG + "," +
                 SCOPE_SCHEMA + "," +
@@ -412,8 +417,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                 DATA_TYPE + " " + TYPE_ID + "," +// raw type id for potential internal consumption
                 VIEW_CONSTANT + "," +
                 MULTI_TENANT + "," +
-                KEY_SEQ +
-                " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS);
+                "CASE WHEN TENANT_POS_SHIFT THEN KEY_SEQ-1 ELSE KEY_SEQ END AS " + KEY_SEQ +
+                " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + "(TENANT_POS_SHIFT BOOLEAN)");
         StringBuilder where = new StringBuilder();
         addTenantIdFilter(where, catalog);
         if (schemaPattern != null) {
@@ -458,7 +463,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
         } else {
             buf.append(" where " + where);
         }
-        buf.append(" order by " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + ORDINAL_POSITION);
+        buf.append(" order by " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + SYSTEM_CATALOG_ALIAS + "." + ORDINAL_POSITION);
 
         Statement stmt;
         if (isTenantSpecificConnection) {
@@ -494,6 +499,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
         private final int multiTenantIndex;
         private final int keySeqIndex;
         private boolean inMultiTenantTable;
+        private boolean tenantColumnSkipped;
 
         private TenantColumnFilteringIterator(ResultIterator delegate, RowProjector rowProjector) throws SQLException {
             super(delegate);
@@ -512,15 +518,30 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                     && getColumn(tuple, columnFamilyIndex) == null && getColumn(tuple, columnNameIndex) == null) {
                 // new table, check if it is multitenant
                 inMultiTenantTable = getColumn(tuple, multiTenantIndex) == Boolean.TRUE;
+                tenantColumnSkipped = false;
                 // skip row representing table
                 tuple = super.next();
             }
 
-            if (tuple != null && inMultiTenantTable && new Short((short)1).equals(getColumn(tuple, keySeqIndex))) {
+            if (tuple != null && inMultiTenantTable && !tenantColumnSkipped
+                    && new Long(1L).equals(getColumn(tuple, keySeqIndex))) {
+                tenantColumnSkipped = true;
                 // skip tenant id primary key column
                 return next();
             }
 
+            if (tuple != null && tenantColumnSkipped) {
+                ResultTuple resultTuple = (ResultTuple)tuple;
+                List<Cell> cells = resultTuple.getResult().listCells();
+                KeyValue kv = new KeyValue(resultTuple.getResult().getRow(), QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
+                        Bytes.toBytes("TENANT_POS_SHIFT"), PDataType.TRUE_BYTES);
+                List<Cell> newCells = Lists.newArrayListWithCapacity(cells.size() + 1);
+                newCells.addAll(cells);
+                newCells.add(kv);
+                Collections.sort(newCells, KeyValue.COMPARATOR);
+                resultTuple.setResult(Result.create(newCells));
+            }
+
             return tuple;
         }
 


[20/24] git commit: Rename method in MetaDataService

Posted by ja...@apache.org.
Rename method in MetaDataService


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b3542285
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b3542285
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b3542285

Branch: refs/heads/4.2
Commit: b3542285f2960cf23f7d7dc7d26321c556aa75eb
Parents: 7a8a023
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 28 13:01:47 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 28 13:01:47 2014 -0700

----------------------------------------------------------------------
 .../coprocessor/MetaDataEndpointImpl.java       |   8 +-
 .../coprocessor/generated/MetaDataProtos.java   | 351 +++++++++----------
 .../query/ConnectionQueryServicesImpl.java      |  14 +-
 phoenix-protocol/src/main/MetaDataService.proto |   8 +-
 4 files changed, 190 insertions(+), 191 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3542285/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 3abd206..7604663 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -94,14 +94,14 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.AddColumnRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.CreateTableRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.DropColumnRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.DropTableRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse;
-import org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest;
-import org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
@@ -1680,8 +1680,8 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
     }
     
     @Override
-    public void incrementTableTimeStamp(RpcController controller, IncrementTableTimeStampRequest request,
-            RpcCallback<IncrementTableTimeStampResponse> done) {
+    public void clearTableFromCache(RpcController controller, ClearTableFromCacheRequest request,
+            RpcCallback<ClearTableFromCacheResponse> done) {
         byte[] schemaName = request.getSchemaName().toByteArray();
         byte[] tableName = request.getTableName().toByteArray();
         try {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3542285/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
index 8d153b2..f49a35e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/MetaDataProtos.java
@@ -6316,7 +6316,7 @@ public final class MetaDataProtos {
     // @@protoc_insertion_point(class_scope:GetVersionResponse)
   }
 
-  public interface IncrementTableTimeStampRequestOrBuilder
+  public interface ClearTableFromCacheRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
     // required bytes tenantId = 1;
@@ -6360,24 +6360,24 @@ public final class MetaDataProtos {
     long getClientTimestamp();
   }
   /**
-   * Protobuf type {@code IncrementTableTimeStampRequest}
+   * Protobuf type {@code ClearTableFromCacheRequest}
    */
-  public static final class IncrementTableTimeStampRequest extends
+  public static final class ClearTableFromCacheRequest extends
       com.google.protobuf.GeneratedMessage
-      implements IncrementTableTimeStampRequestOrBuilder {
-    // Use IncrementTableTimeStampRequest.newBuilder() to construct.
-    private IncrementTableTimeStampRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      implements ClearTableFromCacheRequestOrBuilder {
+    // Use ClearTableFromCacheRequest.newBuilder() to construct.
+    private ClearTableFromCacheRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
       super(builder);
       this.unknownFields = builder.getUnknownFields();
     }
-    private IncrementTableTimeStampRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+    private ClearTableFromCacheRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
 
-    private static final IncrementTableTimeStampRequest defaultInstance;
-    public static IncrementTableTimeStampRequest getDefaultInstance() {
+    private static final ClearTableFromCacheRequest defaultInstance;
+    public static ClearTableFromCacheRequest getDefaultInstance() {
       return defaultInstance;
     }
 
-    public IncrementTableTimeStampRequest getDefaultInstanceForType() {
+    public ClearTableFromCacheRequest getDefaultInstanceForType() {
       return defaultInstance;
     }
 
@@ -6387,7 +6387,7 @@ public final class MetaDataProtos {
         getUnknownFields() {
       return this.unknownFields;
     }
-    private IncrementTableTimeStampRequest(
+    private ClearTableFromCacheRequest(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
@@ -6444,28 +6444,28 @@ public final class MetaDataProtos {
     }
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampRequest_descriptor;
+      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearTableFromCacheRequest_descriptor;
     }
 
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampRequest_fieldAccessorTable
+      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearTableFromCacheRequest_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.Builder.class);
+              org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<IncrementTableTimeStampRequest> PARSER =
-        new com.google.protobuf.AbstractParser<IncrementTableTimeStampRequest>() {
-      public IncrementTableTimeStampRequest parsePartialFrom(
+    public static com.google.protobuf.Parser<ClearTableFromCacheRequest> PARSER =
+        new com.google.protobuf.AbstractParser<ClearTableFromCacheRequest>() {
+      public ClearTableFromCacheRequest parsePartialFrom(
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws com.google.protobuf.InvalidProtocolBufferException {
-        return new IncrementTableTimeStampRequest(input, extensionRegistry);
+        return new ClearTableFromCacheRequest(input, extensionRegistry);
       }
     };
 
     @java.lang.Override
-    public com.google.protobuf.Parser<IncrementTableTimeStampRequest> getParserForType() {
+    public com.google.protobuf.Parser<ClearTableFromCacheRequest> getParserForType() {
       return PARSER;
     }
 
@@ -6622,10 +6622,10 @@ public final class MetaDataProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest)) {
+      if (!(obj instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest)) {
         return super.equals(obj);
       }
-      org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest other = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest) obj;
+      org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest other = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest) obj;
 
       boolean result = true;
       result = result && (hasTenantId() == other.hasTenantId());
@@ -6682,53 +6682,53 @@ public final class MetaDataProtos {
       return hash;
     }
 
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(byte[] data)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(java.io.InputStream input)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseFrom(input, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseDelimitedFrom(java.io.InputStream input)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseDelimitedFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -6737,7 +6737,7 @@ public final class MetaDataProtos {
 
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest prototype) {
+    public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -6749,24 +6749,24 @@ public final class MetaDataProtos {
       return builder;
     }
     /**
-     * Protobuf type {@code IncrementTableTimeStampRequest}
+     * Protobuf type {@code ClearTableFromCacheRequest}
      */
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequestOrBuilder {
+       implements org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequestOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampRequest_descriptor;
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearTableFromCacheRequest_descriptor;
       }
 
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampRequest_fieldAccessorTable
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearTableFromCacheRequest_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.Builder.class);
+                org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest.Builder.class);
       }
 
-      // Construct using org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.newBuilder()
+      // Construct using org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -6803,23 +6803,23 @@ public final class MetaDataProtos {
 
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampRequest_descriptor;
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearTableFromCacheRequest_descriptor;
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest getDefaultInstanceForType() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.getDefaultInstance();
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest getDefaultInstanceForType() {
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest.getDefaultInstance();
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest build() {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest result = buildPartial();
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest build() {
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest buildPartial() {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest result = new org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest(this);
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest buildPartial() {
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest result = new org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest(this);
         int from_bitField0_ = bitField0_;
         int to_bitField0_ = 0;
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
@@ -6844,16 +6844,16 @@ public final class MetaDataProtos {
       }
 
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest) {
-          return mergeFrom((org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest)other);
+        if (other instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest) {
+          return mergeFrom((org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest other) {
-        if (other == org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.getDefaultInstance()) return this;
+      public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest other) {
+        if (other == org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest.getDefaultInstance()) return this;
         if (other.hasTenantId()) {
           setTenantId(other.getTenantId());
         }
@@ -6894,11 +6894,11 @@ public final class MetaDataProtos {
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest parsedMessage = null;
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest) e.getUnfinishedMessage();
+          parsedMessage = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest) e.getUnfinishedMessage();
           throw e;
         } finally {
           if (parsedMessage != null) {
@@ -7050,39 +7050,39 @@ public final class MetaDataProtos {
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:IncrementTableTimeStampRequest)
+      // @@protoc_insertion_point(builder_scope:ClearTableFromCacheRequest)
     }
 
     static {
-      defaultInstance = new IncrementTableTimeStampRequest(true);
+      defaultInstance = new ClearTableFromCacheRequest(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:IncrementTableTimeStampRequest)
+    // @@protoc_insertion_point(class_scope:ClearTableFromCacheRequest)
   }
 
-  public interface IncrementTableTimeStampResponseOrBuilder
+  public interface ClearTableFromCacheResponseOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
   }
   /**
-   * Protobuf type {@code IncrementTableTimeStampResponse}
+   * Protobuf type {@code ClearTableFromCacheResponse}
    */
-  public static final class IncrementTableTimeStampResponse extends
+  public static final class ClearTableFromCacheResponse extends
       com.google.protobuf.GeneratedMessage
-      implements IncrementTableTimeStampResponseOrBuilder {
-    // Use IncrementTableTimeStampResponse.newBuilder() to construct.
-    private IncrementTableTimeStampResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      implements ClearTableFromCacheResponseOrBuilder {
+    // Use ClearTableFromCacheResponse.newBuilder() to construct.
+    private ClearTableFromCacheResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
       super(builder);
       this.unknownFields = builder.getUnknownFields();
     }
-    private IncrementTableTimeStampResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+    private ClearTableFromCacheResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
 
-    private static final IncrementTableTimeStampResponse defaultInstance;
-    public static IncrementTableTimeStampResponse getDefaultInstance() {
+    private static final ClearTableFromCacheResponse defaultInstance;
+    public static ClearTableFromCacheResponse getDefaultInstance() {
       return defaultInstance;
     }
 
-    public IncrementTableTimeStampResponse getDefaultInstanceForType() {
+    public ClearTableFromCacheResponse getDefaultInstanceForType() {
       return defaultInstance;
     }
 
@@ -7092,7 +7092,7 @@ public final class MetaDataProtos {
         getUnknownFields() {
       return this.unknownFields;
     }
-    private IncrementTableTimeStampResponse(
+    private ClearTableFromCacheResponse(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
@@ -7128,28 +7128,28 @@ public final class MetaDataProtos {
     }
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampResponse_descriptor;
+      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearTableFromCacheResponse_descriptor;
     }
 
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampResponse_fieldAccessorTable
+      return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearTableFromCacheResponse_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.Builder.class);
+              org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<IncrementTableTimeStampResponse> PARSER =
-        new com.google.protobuf.AbstractParser<IncrementTableTimeStampResponse>() {
-      public IncrementTableTimeStampResponse parsePartialFrom(
+    public static com.google.protobuf.Parser<ClearTableFromCacheResponse> PARSER =
+        new com.google.protobuf.AbstractParser<ClearTableFromCacheResponse>() {
+      public ClearTableFromCacheResponse parsePartialFrom(
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws com.google.protobuf.InvalidProtocolBufferException {
-        return new IncrementTableTimeStampResponse(input, extensionRegistry);
+        return new ClearTableFromCacheResponse(input, extensionRegistry);
       }
     };
 
     @java.lang.Override
-    public com.google.protobuf.Parser<IncrementTableTimeStampResponse> getParserForType() {
+    public com.google.protobuf.Parser<ClearTableFromCacheResponse> getParserForType() {
       return PARSER;
     }
 
@@ -7193,10 +7193,10 @@ public final class MetaDataProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse)) {
+      if (!(obj instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse)) {
         return super.equals(obj);
       }
-      org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse other = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse) obj;
+      org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse other = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse) obj;
 
       boolean result = true;
       result = result &&
@@ -7217,53 +7217,53 @@ public final class MetaDataProtos {
       return hash;
     }
 
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(byte[] data)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(java.io.InputStream input)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseFrom(input, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseDelimitedFrom(java.io.InputStream input)
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseDelimitedFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parseFrom(
+    public static org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -7272,7 +7272,7 @@ public final class MetaDataProtos {
 
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse prototype) {
+    public static Builder newBuilder(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -7284,24 +7284,24 @@ public final class MetaDataProtos {
       return builder;
     }
     /**
-     * Protobuf type {@code IncrementTableTimeStampResponse}
+     * Protobuf type {@code ClearTableFromCacheResponse}
      */
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponseOrBuilder {
+       implements org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponseOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampResponse_descriptor;
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearTableFromCacheResponse_descriptor;
       }
 
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampResponse_fieldAccessorTable
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearTableFromCacheResponse_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.Builder.class);
+                org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.class, org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.Builder.class);
       }
 
-      // Construct using org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.newBuilder()
+      // Construct using org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -7330,38 +7330,38 @@ public final class MetaDataProtos {
 
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_IncrementTableTimeStampResponse_descriptor;
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.internal_static_ClearTableFromCacheResponse_descriptor;
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse getDefaultInstanceForType() {
-        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance();
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse getDefaultInstanceForType() {
+        return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.getDefaultInstance();
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse build() {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse result = buildPartial();
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse build() {
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse buildPartial() {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse result = new org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse(this);
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse buildPartial() {
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse result = new org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse(this);
         onBuilt();
         return result;
       }
 
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse) {
-          return mergeFrom((org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse)other);
+        if (other instanceof org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse) {
+          return mergeFrom((org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse other) {
-        if (other == org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance()) return this;
+      public Builder mergeFrom(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse other) {
+        if (other == org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.getDefaultInstance()) return this;
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -7374,11 +7374,11 @@ public final class MetaDataProtos {
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse parsedMessage = null;
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse) e.getUnfinishedMessage();
+          parsedMessage = (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse) e.getUnfinishedMessage();
           throw e;
         } finally {
           if (parsedMessage != null) {
@@ -7388,15 +7388,15 @@ public final class MetaDataProtos {
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:IncrementTableTimeStampResponse)
+      // @@protoc_insertion_point(builder_scope:ClearTableFromCacheResponse)
     }
 
     static {
-      defaultInstance = new IncrementTableTimeStampResponse(true);
+      defaultInstance = new ClearTableFromCacheResponse(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:IncrementTableTimeStampResponse)
+    // @@protoc_insertion_point(class_scope:ClearTableFromCacheResponse)
   }
 
   /**
@@ -7472,12 +7472,12 @@ public final class MetaDataProtos {
           com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse> done);
 
       /**
-       * <code>rpc incrementTableTimeStamp(.IncrementTableTimeStampRequest) returns (.IncrementTableTimeStampResponse);</code>
+       * <code>rpc clearTableFromCache(.ClearTableFromCacheRequest) returns (.ClearTableFromCacheResponse);</code>
        */
-      public abstract void incrementTableTimeStamp(
+      public abstract void clearTableFromCache(
           com.google.protobuf.RpcController controller,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request,
-          com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse> done);
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest request,
+          com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse> done);
 
     }
 
@@ -7549,11 +7549,11 @@ public final class MetaDataProtos {
         }
 
         @java.lang.Override
-        public  void incrementTableTimeStamp(
+        public  void clearTableFromCache(
             com.google.protobuf.RpcController controller,
-            org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request,
-            com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse> done) {
-          impl.incrementTableTimeStamp(controller, request, done);
+            org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest request,
+            com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse> done) {
+          impl.clearTableFromCache(controller, request, done);
         }
 
       };
@@ -7595,7 +7595,7 @@ public final class MetaDataProtos {
             case 7:
               return impl.getVersion(controller, (org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest)request);
             case 8:
-              return impl.incrementTableTimeStamp(controller, (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest)request);
+              return impl.clearTableFromCache(controller, (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -7627,7 +7627,7 @@ public final class MetaDataProtos {
             case 7:
               return org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest.getDefaultInstance();
             case 8:
-              return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.getDefaultInstance();
+              return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -7659,7 +7659,7 @@ public final class MetaDataProtos {
             case 7:
               return org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse.getDefaultInstance();
             case 8:
-              return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance();
+              return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -7733,12 +7733,12 @@ public final class MetaDataProtos {
         com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse> done);
 
     /**
-     * <code>rpc incrementTableTimeStamp(.IncrementTableTimeStampRequest) returns (.IncrementTableTimeStampResponse);</code>
+     * <code>rpc clearTableFromCache(.ClearTableFromCacheRequest) returns (.ClearTableFromCacheResponse);</code>
      */
-    public abstract void incrementTableTimeStamp(
+    public abstract void clearTableFromCache(
         com.google.protobuf.RpcController controller,
-        org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request,
-        com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse> done);
+        org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest request,
+        com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse> done);
 
     public static final
         com.google.protobuf.Descriptors.ServiceDescriptor
@@ -7803,8 +7803,8 @@ public final class MetaDataProtos {
               done));
           return;
         case 8:
-          this.incrementTableTimeStamp(controller, (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest)request,
-            com.google.protobuf.RpcUtil.<org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse>specializeCallback(
+          this.clearTableFromCache(controller, (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest)request,
+            com.google.protobuf.RpcUtil.<org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse>specializeCallback(
               done));
           return;
         default:
@@ -7838,7 +7838,7 @@ public final class MetaDataProtos {
         case 7:
           return org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest.getDefaultInstance();
         case 8:
-          return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest.getDefaultInstance();
+          return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -7870,7 +7870,7 @@ public final class MetaDataProtos {
         case 7:
           return org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse.getDefaultInstance();
         case 8:
-          return org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance();
+          return org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -8012,19 +8012,19 @@ public final class MetaDataProtos {
             org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse.getDefaultInstance()));
       }
 
-      public  void incrementTableTimeStamp(
+      public  void clearTableFromCache(
           com.google.protobuf.RpcController controller,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request,
-          com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse> done) {
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest request,
+          com.google.protobuf.RpcCallback<org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse> done) {
         channel.callMethod(
           getDescriptor().getMethods().get(8),
           controller,
           request,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance(),
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.getDefaultInstance(),
           com.google.protobuf.RpcUtil.generalizeCallback(
             done,
-            org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.class,
-            org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance()));
+            org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.class,
+            org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.getDefaultInstance()));
       }
     }
 
@@ -8074,9 +8074,9 @@ public final class MetaDataProtos {
           org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest request)
           throws com.google.protobuf.ServiceException;
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse incrementTableTimeStamp(
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse clearTableFromCache(
           com.google.protobuf.RpcController controller,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request)
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest request)
           throws com.google.protobuf.ServiceException;
     }
 
@@ -8183,15 +8183,15 @@ public final class MetaDataProtos {
       }
 
 
-      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse incrementTableTimeStamp(
+      public org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse clearTableFromCache(
           com.google.protobuf.RpcController controller,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest request)
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest request)
           throws com.google.protobuf.ServiceException {
-        return (org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse) channel.callBlockingMethod(
+        return (org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse) channel.callBlockingMethod(
           getDescriptor().getMethods().get(8),
           controller,
           request,
-          org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse.getDefaultInstance());
+          org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse.getDefaultInstance());
       }
 
     }
@@ -8255,15 +8255,15 @@ public final class MetaDataProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_GetVersionResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_IncrementTableTimeStampRequest_descriptor;
+    internal_static_ClearTableFromCacheRequest_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_IncrementTableTimeStampRequest_fieldAccessorTable;
+      internal_static_ClearTableFromCacheRequest_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_IncrementTableTimeStampResponse_descriptor;
+    internal_static_ClearTableFromCacheResponse_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_IncrementTableTimeStampResponse_fieldAccessorTable;
+      internal_static_ClearTableFromCacheResponse_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -8291,33 +8291,32 @@ public final class MetaDataProtos {
       "st\022\036\n\026tableMetadataMutations\030\001 \003(\014\"\023\n\021Cl" +
       "earCacheRequest\"\024\n\022ClearCacheResponse\"\023\n" +
       "\021GetVersionRequest\"%\n\022GetVersionResponse" +
-      "\022\017\n\007version\030\001 \002(\003\"r\n\036IncrementTableTimeS" +
-      "tampRequest\022\020\n\010tenantId\030\001 \002(\014\022\022\n\nschemaN",
-      "ame\030\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\027\n\017clientTi" +
-      "mestamp\030\004 \002(\003\"!\n\037IncrementTableTimeStamp" +
-      "Response*\212\002\n\014MutationCode\022\030\n\024TABLE_ALREA" +
-      "DY_EXISTS\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\024\n\020COLU" +
-      "MN_NOT_FOUND\020\002\022\031\n\025COLUMN_ALREADY_EXISTS\020" +
-      "\003\022\035\n\031CONCURRENT_TABLE_MUTATION\020\004\022\027\n\023TABL" +
-      "E_NOT_IN_REGION\020\005\022\025\n\021NEWER_TABLE_FOUND\020\006" +
-      "\022\034\n\030UNALLOWED_TABLE_MUTATION\020\007\022\021\n\rNO_PK_" +
-      "COLUMNS\020\010\022\032\n\026PARENT_TABLE_NOT_FOUND\020\t2\241\004" +
-      "\n\017MetaDataService\022/\n\010getTable\022\020.GetTable",
-      "Request\032\021.MetaDataResponse\0225\n\013createTabl" +
-      "e\022\023.CreateTableRequest\032\021.MetaDataRespons" +
-      "e\0221\n\tdropTable\022\021.DropTableRequest\032\021.Meta" +
-      "DataResponse\0221\n\taddColumn\022\021.AddColumnReq" +
-      "uest\032\021.MetaDataResponse\0223\n\ndropColumn\022\022." +
-      "DropColumnRequest\032\021.MetaDataResponse\022?\n\020" +
-      "updateIndexState\022\030.UpdateIndexStateReque" +
-      "st\032\021.MetaDataResponse\0225\n\nclearCache\022\022.Cl" +
-      "earCacheRequest\032\023.ClearCacheResponse\0225\n\n" +
-      "getVersion\022\022.GetVersionRequest\032\023.GetVers",
-      "ionResponse\022\\\n\027incrementTableTimeStamp\022\037" +
-      ".IncrementTableTimeStampRequest\032 .Increm" +
-      "entTableTimeStampResponseBB\n(org.apache." +
-      "phoenix.coprocessor.generatedB\016MetaDataP" +
-      "rotosH\001\210\001\001\240\001\001"
+      "\022\017\n\007version\030\001 \002(\003\"n\n\032ClearTableFromCache" +
+      "Request\022\020\n\010tenantId\030\001 \002(\014\022\022\n\nschemaName\030",
+      "\002 \002(\014\022\021\n\ttableName\030\003 \002(\014\022\027\n\017clientTimest" +
+      "amp\030\004 \002(\003\"\035\n\033ClearTableFromCacheResponse" +
+      "*\212\002\n\014MutationCode\022\030\n\024TABLE_ALREADY_EXIST" +
+      "S\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\024\n\020COLUMN_NOT_F" +
+      "OUND\020\002\022\031\n\025COLUMN_ALREADY_EXISTS\020\003\022\035\n\031CON" +
+      "CURRENT_TABLE_MUTATION\020\004\022\027\n\023TABLE_NOT_IN" +
+      "_REGION\020\005\022\025\n\021NEWER_TABLE_FOUND\020\006\022\034\n\030UNAL" +
+      "LOWED_TABLE_MUTATION\020\007\022\021\n\rNO_PK_COLUMNS\020" +
+      "\010\022\032\n\026PARENT_TABLE_NOT_FOUND\020\t2\225\004\n\017MetaDa" +
+      "taService\022/\n\010getTable\022\020.GetTableRequest\032",
+      "\021.MetaDataResponse\0225\n\013createTable\022\023.Crea" +
+      "teTableRequest\032\021.MetaDataResponse\0221\n\tdro" +
+      "pTable\022\021.DropTableRequest\032\021.MetaDataResp" +
+      "onse\0221\n\taddColumn\022\021.AddColumnRequest\032\021.M" +
+      "etaDataResponse\0223\n\ndropColumn\022\022.DropColu" +
+      "mnRequest\032\021.MetaDataResponse\022?\n\020updateIn" +
+      "dexState\022\030.UpdateIndexStateRequest\032\021.Met" +
+      "aDataResponse\0225\n\nclearCache\022\022.ClearCache" +
+      "Request\032\023.ClearCacheResponse\0225\n\ngetVersi" +
+      "on\022\022.GetVersionRequest\032\023.GetVersionRespo",
+      "nse\022P\n\023clearTableFromCache\022\033.ClearTableF" +
+      "romCacheRequest\032\034.ClearTableFromCacheRes" +
+      "ponseBB\n(org.apache.phoenix.coprocessor." +
+      "generatedB\016MetaDataProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -8390,17 +8389,17 @@ public final class MetaDataProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_GetVersionResponse_descriptor,
               new java.lang.String[] { "Version", });
-          internal_static_IncrementTableTimeStampRequest_descriptor =
+          internal_static_ClearTableFromCacheRequest_descriptor =
             getDescriptor().getMessageTypes().get(11);
-          internal_static_IncrementTableTimeStampRequest_fieldAccessorTable = new
+          internal_static_ClearTableFromCacheRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_IncrementTableTimeStampRequest_descriptor,
+              internal_static_ClearTableFromCacheRequest_descriptor,
               new java.lang.String[] { "TenantId", "SchemaName", "TableName", "ClientTimestamp", });
-          internal_static_IncrementTableTimeStampResponse_descriptor =
+          internal_static_ClearTableFromCacheResponse_descriptor =
             getDescriptor().getMessageTypes().get(12);
-          internal_static_IncrementTableTimeStampResponse_fieldAccessorTable = new
+          internal_static_ClearTableFromCacheResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_IncrementTableTimeStampResponse_descriptor,
+              internal_static_ClearTableFromCacheResponse_descriptor,
               new java.lang.String[] { });
           return null;
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3542285/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index c6daaef..4f3a346 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -80,14 +80,14 @@ import org.apache.phoenix.coprocessor.generated.MetaDataProtos;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.AddColumnRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheResponse;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheRequest;
+import org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.CreateTableRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.DropColumnRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.DropTableRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetTableRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionRequest;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse;
-import org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampRequest;
-import org.apache.phoenix.coprocessor.generated.MetaDataProtos.IncrementTableTimeStampResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
 import org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest;
@@ -1895,17 +1895,17 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             HTableInterface htable = this.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
             try {
                 htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-                        new Batch.Call<MetaDataService, IncrementTableTimeStampResponse>() {
+                        new Batch.Call<MetaDataService, ClearTableFromCacheResponse>() {
                             @Override
-                            public IncrementTableTimeStampResponse call(MetaDataService instance) throws IOException {
+                            public ClearTableFromCacheResponse call(MetaDataService instance) throws IOException {
                                 ServerRpcController controller = new ServerRpcController();
-                                BlockingRpcCallback<IncrementTableTimeStampResponse> rpcCallback = new BlockingRpcCallback<IncrementTableTimeStampResponse>();
-                                IncrementTableTimeStampRequest.Builder builder = IncrementTableTimeStampRequest.newBuilder();
+                                BlockingRpcCallback<ClearTableFromCacheResponse> rpcCallback = new BlockingRpcCallback<ClearTableFromCacheResponse>();
+                                ClearTableFromCacheRequest.Builder builder = ClearTableFromCacheRequest.newBuilder();
                                 builder.setTenantId(HBaseZeroCopyByteString.wrap(tenantId));
                                 builder.setTableName(HBaseZeroCopyByteString.wrap(tableName));
                                 builder.setSchemaName(HBaseZeroCopyByteString.wrap(schemaName));
                                 builder.setClientTimestamp(clientTS);
-                                instance.incrementTableTimeStamp(controller, builder.build(), rpcCallback);
+                                instance.clearTableFromCache(controller, builder.build(), rpcCallback);
                                 if (controller.getFailedOn() != null) { throw controller.getFailedOn(); }
                                 return rpcCallback.get();
                             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b3542285/phoenix-protocol/src/main/MetaDataService.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/MetaDataService.proto b/phoenix-protocol/src/main/MetaDataService.proto
index accd5f2..a8d72da 100644
--- a/phoenix-protocol/src/main/MetaDataService.proto
+++ b/phoenix-protocol/src/main/MetaDataService.proto
@@ -92,14 +92,14 @@ message GetVersionResponse {
   required int64 version = 1;
 }
 
-message IncrementTableTimeStampRequest {
+message ClearTableFromCacheRequest {
   required bytes tenantId = 1;
   required bytes schemaName  = 2;
   required bytes tableName = 3;
   required int64 clientTimestamp = 4;
 }
 
-message IncrementTableTimeStampResponse {
+message ClearTableFromCacheResponse {
 }
 
 service MetaDataService {
@@ -127,6 +127,6 @@ service MetaDataService {
    rpc getVersion(GetVersionRequest)
     returns (GetVersionResponse);
    
-   rpc incrementTableTimeStamp(IncrementTableTimeStampRequest)
-    returns (IncrementTableTimeStampResponse);
+   rpc clearTableFromCache(ClearTableFromCacheRequest)
+    returns (ClearTableFromCacheResponse);
 }


[06/24] git commit: PHOENIX-1366 Use static constants

Posted by ja...@apache.org.
PHOENIX-1366 Use static constants


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ba96d70e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ba96d70e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ba96d70e

Branch: refs/heads/4.2
Commit: ba96d70e59e10714949eb847a4fe4b4cb7c0213e
Parents: 553fb4b
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Oct 26 22:13:31 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sun Oct 26 22:14:26 2014 -0700

----------------------------------------------------------------------
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   | 32 +++++++++++---------
 1 file changed, 18 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ba96d70e/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index ba77f6d..a03cda4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -29,7 +29,6 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.commons.lang.StringEscapeUtils;
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -259,6 +258,9 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     public static final String PARENT_TENANT_ID = "PARENT_TENANT_ID";
     public static final byte[] PARENT_TENANT_ID_BYTES = Bytes.toBytes(PARENT_TENANT_ID);
         
+    private static final String TENANT_POS_SHIFT = "TENANT_POS_SHIFT";
+    private static final byte[] TENANT_POS_SHIFT_BYTES = Bytes.toBytes(TENANT_POS_SHIFT);
+    
     private final PhoenixConnection connection;
     private final ResultSet emptyResultSet;
 
@@ -405,7 +407,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                 SQL_DATA_TYPE + "," +
                 SQL_DATETIME_SUB + "," +
                 CHAR_OCTET_LENGTH + "," +
-                "CASE WHEN TENANT_POS_SHIFT THEN ORDINAL_POSITION-1 ELSE ORDINAL_POSITION END AS " + ORDINAL_POSITION + "," +
+                "CASE WHEN " + TENANT_POS_SHIFT + " THEN " + ORDINAL_POSITION + "-1 ELSE " + ORDINAL_POSITION + " END AS " + ORDINAL_POSITION + "," +
                 "CASE " + NULLABLE + " WHEN " + DatabaseMetaData.attributeNoNulls +  " THEN '" + Boolean.FALSE.toString() + "' WHEN " + DatabaseMetaData.attributeNullable + " THEN '" + Boolean.TRUE.toString() + "' END AS " + IS_NULLABLE + "," +
                 SCOPE_CATALOG + "," +
                 SCOPE_SCHEMA + "," +
@@ -417,8 +419,8 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                 DATA_TYPE + " " + TYPE_ID + "," +// raw type id for potential internal consumption
                 VIEW_CONSTANT + "," +
                 MULTI_TENANT + "," +
-                "CASE WHEN TENANT_POS_SHIFT THEN KEY_SEQ-1 ELSE KEY_SEQ END AS " + KEY_SEQ +
-                " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + "(TENANT_POS_SHIFT BOOLEAN)");
+                "CASE WHEN " + TENANT_POS_SHIFT + " THEN " + KEY_SEQ + "-1 ELSE " + KEY_SEQ + " END AS " + KEY_SEQ +
+                " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS + "(" + TENANT_POS_SHIFT + " BOOLEAN)");
         StringBuilder where = new StringBuilder();
         addTenantIdFilter(where, catalog);
         if (schemaPattern != null) {
@@ -523,23 +525,25 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
                 tuple = super.next();
             }
 
-            if (tuple != null && inMultiTenantTable && !tenantColumnSkipped
-                    && new Long(1L).equals(getColumn(tuple, keySeqIndex))) {
-                tenantColumnSkipped = true;
-                // skip tenant id primary key column
-                return next();
+            if (tuple != null && inMultiTenantTable && !tenantColumnSkipped) {
+                Object value = getColumn(tuple, keySeqIndex);
+                if (value != null && ((Number)value).longValue() == 1L) {
+                    tenantColumnSkipped = true;
+                    // skip tenant id primary key column
+                    return next();
+                }
             }
 
             if (tuple != null && tenantColumnSkipped) {
                 ResultTuple resultTuple = (ResultTuple)tuple;
-                List<Cell> cells = resultTuple.getResult().listCells();
-                KeyValue kv = new KeyValue(resultTuple.getResult().getRow(), QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES,
-                        Bytes.toBytes("TENANT_POS_SHIFT"), PDataType.TRUE_BYTES);
-                List<Cell> newCells = Lists.newArrayListWithCapacity(cells.size() + 1);
+                List<KeyValue> cells = resultTuple.getResult().list();
+                KeyValue kv = new KeyValue(resultTuple.getResult().getRow(), TABLE_FAMILY_BYTES,
+                        TENANT_POS_SHIFT_BYTES, PDataType.TRUE_BYTES);
+                List<KeyValue> newCells = Lists.newArrayListWithCapacity(cells.size() + 1);
                 newCells.addAll(cells);
                 newCells.add(kv);
                 Collections.sort(newCells, KeyValue.COMPARATOR);
-                resultTuple.setResult(Result.create(newCells));
+                resultTuple.setResult(new Result(newCells));
             }
 
             return tuple;


[07/24] git commit: PHOENIX-1366 Use static constants

Posted by ja...@apache.org.
PHOENIX-1366 Use static constants


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5093c2f2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5093c2f2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5093c2f2

Branch: refs/heads/4.2
Commit: 5093c2f262480d04f8495c55d23960b89a674c6e
Parents: ba96d70
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Oct 26 22:25:32 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Sun Oct 26 22:25:32 2014 -0700

----------------------------------------------------------------------
 .../java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5093c2f2/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index a03cda4..8edc7ae 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -29,6 +29,7 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.commons.lang.StringEscapeUtils;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -536,14 +537,14 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
 
             if (tuple != null && tenantColumnSkipped) {
                 ResultTuple resultTuple = (ResultTuple)tuple;
-                List<KeyValue> cells = resultTuple.getResult().list();
+                List<Cell> cells = resultTuple.getResult().listCells();
                 KeyValue kv = new KeyValue(resultTuple.getResult().getRow(), TABLE_FAMILY_BYTES,
                         TENANT_POS_SHIFT_BYTES, PDataType.TRUE_BYTES);
-                List<KeyValue> newCells = Lists.newArrayListWithCapacity(cells.size() + 1);
+                List<Cell> newCells = Lists.newArrayListWithCapacity(cells.size() + 1);
                 newCells.addAll(cells);
                 newCells.add(kv);
                 Collections.sort(newCells, KeyValue.COMPARATOR);
-                resultTuple.setResult(new Result(newCells));
+                resultTuple.setResult(Result.create(newCells));
             }
 
             return tuple;


[11/24] git commit: PHOENIX-1381 NPE in CellUtil.matchingFamily() for IndexedKeyValue (Jeffrey Zhong)

Posted by ja...@apache.org.
PHOENIX-1381 NPE in CellUtil.matchingFamily() for IndexedKeyValue (Jeffrey Zhong)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b84b91ee
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b84b91ee
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b84b91ee

Branch: refs/heads/4.2
Commit: b84b91eeebc31fafced60e96a23c9e484500d44c
Parents: f5a49bf
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Oct 27 09:13:11 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Oct 27 09:13:11 2014 -0700

----------------------------------------------------------------------
 .../hbase/index/wal/IndexedKeyValue.java        | 46 +++++++++++++++++++-
 1 file changed, 44 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b84b91ee/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
index 0be1a6b..0270de5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/wal/IndexedKeyValue.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 
 public class IndexedKeyValue extends KeyValue {
-    public static final byte [] COLUMN_FAMILY = Bytes.toBytes("INDEXEDKEYVALUE_FAKED_FAMILY");
+    public static final byte [] COLUMN_QUALIFIER = Bytes.toBytes("INDEXEDKEYVALUE_FAKED_COLUMN");
   
     private static int calcHashCode(ImmutableBytesPtr indexTableName, Mutation mutation) {
         final int prime = 31;
@@ -71,9 +71,51 @@ public class IndexedKeyValue extends KeyValue {
      */
     @Override
     public byte [] getFamily() {
-      return COLUMN_FAMILY;
+      return WALEdit.METAFAMILY;
     }
     
+    @Override
+    public byte[] getFamilyArray() {
+        return WALEdit.METAFAMILY;
+    }
+
+    /**
+     * @return Family offset
+     */
+    @Override
+    public int getFamilyOffset() {
+        return 0;
+    }
+
+    /**
+     * @return Family length
+     */
+    @Override
+    public byte getFamilyLength() {
+        return (byte) WALEdit.METAFAMILY.length;
+    }
+
+    @Override
+    public byte[] getQualifierArray() {
+        return COLUMN_QUALIFIER;
+    }
+
+    /**
+     * @return Qualifier offset
+     */
+    @Override
+    public int getQualifierOffset() {
+        return 0;
+    }
+
+    /**
+     * @return Qualifier length
+     */
+    @Override
+    public int getQualifierLength() {
+        return COLUMN_QUALIFIER.length;
+    }
+
     /**
      * This is a KeyValue that shouldn't actually be replayed/replicated, so we always mark it as 
      * an {@link WALEdit#METAFAMILY} so it isn't replayed/replicated via the normal replay mechanism


[24/24] git commit: Update CHANGES and fix Apache RAT warning

Posted by ja...@apache.org.
Update CHANGES and fix Apache RAT warning


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5614dbd5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5614dbd5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5614dbd5

Branch: refs/heads/4.2
Commit: 5614dbd5eb196cd13ee8c8ea75ee7927cd6644a3
Parents: 6477e0f
Author: Mujtaba <mu...@apache.org>
Authored: Tue Oct 28 16:17:13 2014 -0700
Committer: Mujtaba <mu...@apache.org>
Committed: Tue Oct 28 16:17:13 2014 -0700

----------------------------------------------------------------------
 CHANGES                                            | 14 ++++++++++++++
 .../org/apache/phoenix/trace/TracingTestUtil.java  | 17 +++++++++++++++++
 2 files changed, 31 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5614dbd5/CHANGES
----------------------------------------------------------------------
diff --git a/CHANGES b/CHANGES
index 840a91a..745a7c6 100644
--- a/CHANGES
+++ b/CHANGES
@@ -6,6 +6,8 @@ Release Notes - Phoenix - Version 4.2
     * [PHOENIX-943] - Handle pushed down post-filters for subquery in joins with limit and non-groupby aggregation
     * [PHOENIX-945] - Support correlated subqueries in comparison without ANY/SOME/ALL
     * [PHOENIX-1085] - Commonize logic for adding salt byte and adding region start key in ParallelIterators
+    * [PHOENIX-1168] - Support non-correlated sub-queries in where clause having a comparison operator with no modifier or a comparison operator modified by ANY, SOME or ALL
+    * [PHOENIX-1170] - Change status of local index during splitting to prevent usage when slower than query through data table
     * [PHOENIX-1249] - Support local immutable index 
     * [PHOENIX-1259] - Perform partial scan for ANALYZE when table salted or local index
     * [PHOENIX-1263] - Only cache guideposts on physical PTable
@@ -23,7 +25,9 @@ Release Notes - Phoenix - Version 4.2
 
 ** Bug
     * [PHOENIX-105] - Remove org.apache.commons.csv source once available in Maven repo
+    * [PHOENIX-897] - psql command doesn't allow using certain characters in invocation
     * [PHOENIX-941] - Parallelize within regions to prevent rpc timeout
+    * [PHOENIX-944] - Support derived tables in FROM clause that needs extra steps of client-side aggregation or other processing
     * [PHOENIX-973] - Lexer skips unexpected characters
     * [PHOENIX-1044] - Phoenix-Pig: No results returned unless all used columns are selected
     * [PHOENIX-1095] - Cannot add column to SYSTEM.CATALOG using Alter Table 
@@ -63,6 +67,7 @@ Release Notes - Phoenix - Version 4.2
     * [PHOENIX-1281] - Each MultiKeyValueTuple.setKeyValues creates a new immutable list object
     * [PHOENIX-1284] - Override config properties for unit tests not making it to server
     * [PHOENIX-1285] - Override default for histogram depth in QueryServicesTestImpl
+    * [PHOENIX-1286] - Remove hadoop2 compat modules
     * [PHOENIX-1288] - Selecting more than 2 array elements via index fails with ArrayIndexOutOfBoundsException
     * [PHOENIX-1289] - Drop index during upsert may abort RS
     * [PHOENIX-1298] - Queries on fixed width type columns that have an index declared on them don't use that index
@@ -92,9 +97,18 @@ Release Notes - Phoenix - Version 4.2
     * [PHOENIX-1361] - Sequence value goes backwards if sequence validated before reserved
     * [PHOENIX-1364] - Close tracing scope to stop excessive tracing
     * [PHOENIX-1365] - Make sequence salt buckets configurable
+    * [PHOENIX-1366] - ORDINAL_POSITION incorrect for multi-tenant table over tenant-specific connection
     * [PHOENIX-1368] - Persist link from VIEW back to its child VIEW
     * [PHOENIX-1369] - Add back encode/decode methods as deprecated
     * [PHOENIX-1370] - Allow query timeout to differ from RPC timeout
+    * [PHOENIX-1376] - java.lang.NullPointerException occurs in JDBC driver
+    * [PHOENIX-1379] - Wrong MultiIndexWriteFailureException when recovering local index table
+    * [PHOENIX-1381] - NPE in CellUtil.matchingFamily() for IndexedKeyValue
+    * [PHOENIX-1382] - Phoenix 4.2 RC Issue
+    * [PHOENIX-1385] - Adding, dropping and adding columns fails with NPE
+    * [PHOENIX-1386] - ANY function only works with absolute value and doesn't work with other parameters  
+    * [PHOENIX-1390] - Stats not updated on client after major compaction
+    * [PHOENIX-1391] - Remove obsolete hint
 
 ** Improvement
     * [PHOENIX-619] - Support DELETE over table with immutable index when possible

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5614dbd5/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java
index d502175..91c9962 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.phoenix.trace;
 
 import org.apache.hadoop.metrics2.MetricsSink;


[12/24] git commit: PHOENIX-1385 Adding, dropping and adding columns fails with NPE (Samarth Jain, James Taylor)

Posted by ja...@apache.org.
PHOENIX-1385 Adding, dropping and adding columns fails with NPE (Samarth Jain, James Taylor)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f4d8bb01
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f4d8bb01
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f4d8bb01

Branch: refs/heads/4.2
Commit: f4d8bb01374eae5ab2ded67d225de4028797a5ab
Parents: b84b91e
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Oct 27 13:35:49 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Oct 27 13:35:49 2014 -0700

----------------------------------------------------------------------
 .../apache/phoenix/end2end/AlterTableIT.java    | 22 +++++++-
 .../apache/phoenix/jdbc/PhoenixConnection.java  |  8 +--
 .../query/ConnectionQueryServicesImpl.java      |  4 +-
 .../query/ConnectionlessQueryServicesImpl.java  |  6 +--
 .../query/DelegateConnectionQueryServices.java  |  6 +--
 .../apache/phoenix/query/MetaDataMutated.java   |  2 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  6 +--
 .../apache/phoenix/schema/PMetaDataImpl.java    | 53 +++++++++++---------
 8 files changed, 64 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4d8bb01/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 98a98d2..5745bf0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -892,4 +892,24 @@ public class AlterTableIT extends BaseHBaseManagedTimeIT {
         pstmt2.close();
         conn1.close();
     }
-}
+    
+    @Test
+    public void testAddColumnsUsingNewConnection() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String ddl = "CREATE TABLE T (\n"
+                +"ID1 VARCHAR(15) NOT NULL,\n"
+                +"ID2 VARCHAR(15) NOT NULL,\n"
+                +"CREATED_DATE DATE,\n"
+                +"CREATION_TIME BIGINT,\n"
+                +"LAST_USED DATE,\n"
+                +"CONSTRAINT PK PRIMARY KEY (ID1, ID2))";
+        Connection conn1 = DriverManager.getConnection(getUrl(), props);
+        conn1.createStatement().execute(ddl);
+        ddl = "ALTER TABLE T ADD STRING VARCHAR, STRING_DATA_TYPES VARCHAR";
+        conn1.createStatement().execute(ddl);
+        ddl = "ALTER TABLE T DROP COLUMN STRING, STRING_DATA_TYPES";
+        conn1.createStatement().execute(ddl);
+        ddl = "ALTER TABLE T ADD STRING_ARRAY1 VARCHAR[]";
+        conn1.createStatement().execute(ddl);
+        conn1.close();
+    }}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4d8bb01/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
index 9a01018..4c57d09 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixConnection.java
@@ -742,11 +742,11 @@ public class PhoenixConnection implements Connection, org.apache.phoenix.jdbc.Jd
     }
 
     @Override
-    public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName,
-            long tableTimeStamp, long tableSeqNum) throws SQLException {
-        metaData = metaData.removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+    public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
+            long tableSeqNum) throws SQLException {
+        metaData = metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum);
         //Cascade through to connectionQueryServices too
-        getQueryServices().removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+        getQueryServices().removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum);
         return metaData;
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4d8bb01/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index d46497d..c6daaef 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -512,12 +512,12 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
 
     @Override
-    public PMetaData removeColumn(final PName tenantId, final String tableName, final String familyName, final String columnName, final long tableTimeStamp, final long tableSeqNum) throws SQLException {
+    public PMetaData removeColumn(final PName tenantId, final String tableName, final List<PColumn> columnsToRemove, final long tableTimeStamp, final long tableSeqNum) throws SQLException {
         return metaDataMutated(tenantId, tableName, tableSeqNum, new Mutator() {
             @Override
             public PMetaData mutate(PMetaData metaData) throws SQLException {
                 try {
-                    return metaData.removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+                    return metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum);
                 } catch (TableNotFoundException e) {
                     // The DROP TABLE may have been processed first, so just ignore.
                     return metaData;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4d8bb01/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 6ecb6d1..386050c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -152,9 +152,9 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName,
-            long tableTimeStamp, long tableSeqNum) throws SQLException {
-        return metaData = metaData.removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+    public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
+            long tableSeqNum) throws SQLException {
+        return metaData = metaData.removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum);
     }
 
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4d8bb01/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index bb4bb33..defad5b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -88,9 +88,9 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
 
     @Override
-    public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName,
-            long tableTimeStamp, long tableSeqNum) throws SQLException {
-        return getDelegate().removeColumn(tenantId, tableName, familyName, columnName, tableTimeStamp, tableSeqNum);
+    public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp,
+            long tableSeqNum) throws SQLException {
+        return getDelegate().removeColumn(tenantId, tableName, columnsToRemove, tableTimeStamp, tableSeqNum);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4d8bb01/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java b/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
index 1b8ebda..cd4e2de 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/MetaDataMutated.java
@@ -37,5 +37,5 @@ public interface MetaDataMutated {
     PMetaData addTable(PTable table) throws SQLException;
     PMetaData removeTable(PName tenantId, String tableName, String parentTableName, long tableTimeStamp) throws SQLException;
     PMetaData addColumn(PName tenantId, String tableName, List<PColumn> columns, long tableTimeStamp, long tableSeqNum, boolean isImmutableRows) throws SQLException;
-    PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException;
+    PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp, long tableSeqNum) throws SQLException;
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4d8bb01/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 1f26274..afe21e8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -2308,10 +2308,8 @@ public class MetaDataClient {
                     // If we've done any index metadata updates, don't bother trying to update
                     // client-side cache as it would be too painful. Just let it pull it over from
                     // the server when needed.
-                    if (columnsToDrop.size() > 0 && indexesToDrop.isEmpty()) {
-                        for(PColumn columnToDrop : tableColumnsToDrop) {
-                            connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName) , columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString(), result.getMutationTime(), seqNum);
-                        }
+                    if (tableColumnsToDrop.size() > 0 && indexesToDrop.isEmpty()) {
+                        connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName) , tableColumnsToDrop, result.getMutationTime(), seqNum);
                     }
                     // If we have a VIEW, then only delete the metadata, and leave the table data alone
                     if (table.getType() != PTableType.VIEW) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f4d8bb01/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
index 8b26709..0d75aa2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PMetaDataImpl.java
@@ -365,38 +365,41 @@ public class PMetaDataImpl implements PMetaData {
     }
     
     @Override
-    public PMetaData removeColumn(PName tenantId, String tableName, String familyName, String columnName, long tableTimeStamp, long tableSeqNum) throws SQLException {
+    public PMetaData removeColumn(PName tenantId, String tableName, List<PColumn> columnsToRemove, long tableTimeStamp, long tableSeqNum) throws SQLException {
         PTableRef tableRef = metaData.get(new PTableKey(tenantId, tableName));
         if (tableRef == null) {
             return this;
         }
         PTable table = tableRef.table;
         PTableCache tables = metaData.clone();
-        PColumn column;
-        if (familyName == null) {
-            column = table.getPKColumn(columnName);
-        } else {
-            column = table.getColumnFamily(familyName).getColumn(columnName);
-        }
-        int positionOffset = 0;
-        int position = column.getPosition();
-        List<PColumn> oldColumns = table.getColumns();
-        if (table.getBucketNum() != null) {
-            position--;
-            positionOffset = 1;
-            oldColumns = oldColumns.subList(positionOffset, oldColumns.size());
-        }
-        List<PColumn> columns = Lists.newArrayListWithExpectedSize(oldColumns.size() - 1);
-        columns.addAll(oldColumns.subList(0, position));
-        // Update position of columns that follow removed column
-        for (int i = position+1; i < oldColumns.size(); i++) {
-            PColumn oldColumn = oldColumns.get(i);
-            PColumn newColumn = new PColumnImpl(oldColumn.getName(), oldColumn.getFamilyName(), oldColumn.getDataType(), oldColumn.getMaxLength(), oldColumn.getScale(), oldColumn.isNullable(), i-1+positionOffset, oldColumn.getSortOrder(), oldColumn.getArraySize(), oldColumn.getViewConstant(), oldColumn.isViewReferenced());
-            columns.add(newColumn);
+        for (PColumn columnToRemove : columnsToRemove) {
+            PColumn column;
+            String familyName = columnToRemove.getFamilyName().getString();
+            if (familyName == null) {
+                column = table.getPKColumn(columnToRemove.getName().getString());
+            } else {
+                column = table.getColumnFamily(familyName).getColumn(columnToRemove.getName().getString());
+            }
+            int positionOffset = 0;
+            int position = column.getPosition();
+            List<PColumn> oldColumns = table.getColumns();
+            if (table.getBucketNum() != null) {
+                position--;
+                positionOffset = 1;
+                oldColumns = oldColumns.subList(positionOffset, oldColumns.size());
+            }
+            List<PColumn> columns = Lists.newArrayListWithExpectedSize(oldColumns.size() - 1);
+            columns.addAll(oldColumns.subList(0, position));
+            // Update position of columns that follow removed column
+            for (int i = position+1; i < oldColumns.size(); i++) {
+                PColumn oldColumn = oldColumns.get(i);
+                PColumn newColumn = new PColumnImpl(oldColumn.getName(), oldColumn.getFamilyName(), oldColumn.getDataType(), oldColumn.getMaxLength(), oldColumn.getScale(), oldColumn.isNullable(), i-1+positionOffset, oldColumn.getSortOrder(), oldColumn.getArraySize(), oldColumn.getViewConstant(), oldColumn.isViewReferenced());
+                columns.add(newColumn);
+            }
+            
+            table = PTableImpl.makePTable(table, tableTimeStamp, tableSeqNum, columns);
         }
-        
-        PTable newTable = PTableImpl.makePTable(table, tableTimeStamp, tableSeqNum, columns);
-        tables.put(newTable.getKey(), newTable);
+        tables.put(table.getKey(), table);
         return new PMetaDataImpl(tables);
     }
 


[02/24] git commit: PHOENIX-1373: Ctrl-C out of sqlline causes terminal to be useless

Posted by ja...@apache.org.
PHOENIX-1373: Ctrl-C out of sqlline causes terminal to be useless


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e8a0355a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e8a0355a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e8a0355a

Branch: refs/heads/4.2
Commit: e8a0355a4734beaf69998754f617172974b46670
Parents: 6a28b7d
Author: Jeffrey Zhong <je...@apache.org>
Authored: Thu Oct 23 17:34:43 2014 -0700
Committer: Jeffrey Zhong <je...@apache.org>
Committed: Thu Oct 23 17:34:43 2014 -0700

----------------------------------------------------------------------
 bin/sqlline.py | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e8a0355a/bin/sqlline.py
----------------------------------------------------------------------
diff --git a/bin/sqlline.py b/bin/sqlline.py
index d41c2e7..f48e527 100755
--- a/bin/sqlline.py
+++ b/bin/sqlline.py
@@ -31,6 +31,8 @@ def kill_child():
     if childProc is not None:
         childProc.terminate()
         childProc.kill()
+        if os.name != 'nt':
+            os.system("reset")
 atexit.register(kill_child)
 
 phoenix_utils.setPath()


[03/24] git commit: PHOENIX-910 Filter should override hasFilterRow() when filterRow() is overridden.(Ted)

Posted by ja...@apache.org.
PHOENIX-910 Filter should override hasFilterRow() when filterRow() is overridden.(Ted)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4753c4ef
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4753c4ef
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4753c4ef

Branch: refs/heads/4.2
Commit: 4753c4efea1d50c290ba84dca1601d873ef2ad6d
Parents: e8a0355
Author: anoopsjohn <an...@gmail.com>
Authored: Fri Oct 24 13:42:45 2014 +0530
Committer: anoopsjohn <an...@gmail.com>
Committed: Fri Oct 24 13:42:45 2014 +0530

----------------------------------------------------------------------
 .../org/apache/phoenix/filter/BooleanExpressionFilter.java     | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4753c4ef/phoenix-core/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
index e0caf9f..c5b36b2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/BooleanExpressionFilter.java
@@ -37,6 +37,7 @@ import org.apache.phoenix.util.ServerUtil;
  * 
  * Base class for filter that evaluates a WHERE clause expression.
  *
+ * Subclass is expected to implement filterRow() method
  * 
  * @since 0.1
  */
@@ -57,6 +58,11 @@ abstract public class BooleanExpressionFilter extends FilterBase implements Writ
     }
     
     @Override
+    public boolean hasFilterRow() {
+      return true;
+    }
+
+    @Override
     public int hashCode() {
         final int prime = 31;
         int result = 1;


[16/24] git commit: PHOENIX-1286 Remove hadoop2 compat modules

Posted by ja...@apache.org.
PHOENIX-1286 Remove hadoop2 compat modules

There was some reflection and wrapping done in the metrics/tracing tools
to support working with Hadoop1/2 (though hadoop1 support was never completed).
Removing this extra code now that we don't want to support hadoop1 anymore


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b48ca7b5
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b48ca7b5
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b48ca7b5

Branch: refs/heads/4.2
Commit: b48ca7b5c3c97fe00c7e89978fb387d7013be320
Parents: b2c5ffa
Author: Jesse Yates <jy...@apache.org>
Authored: Mon Sep 22 15:00:00 2014 -0700
Committer: Jesse Yates <jy...@apache.org>
Committed: Mon Oct 27 13:56:29 2014 -0700

----------------------------------------------------------------------
 phoenix-assembly/pom.xml                        |  10 +-
 phoenix-core/pom.xml                            |  23 +-
 .../apache/phoenix/trace/BaseTracingTestIT.java | 112 ++++---
 .../phoenix/trace/DisableableMetricsWriter.java |  27 +-
 .../trace/Hadoop1TracingTestEnabler.java        |  84 ------
 .../apache/phoenix/trace/PhoenixMetricImpl.java |  44 ---
 .../phoenix/trace/PhoenixMetricRecordImpl.java  |  71 -----
 .../trace/PhoenixTableMetricsWriterIT.java      |  28 +-
 .../apache/phoenix/trace/PhoenixTagImpl.java    |  22 +-
 .../phoenix/trace/PhoenixTraceReaderIT.java     |  61 ++--
 .../phoenix/trace/PhoenixTracingEndToEndIT.java |  59 ++--
 .../apache/phoenix/trace/TracingTestUtil.java   |  14 +
 .../org/apache/phoenix/hbase/index/Indexer.java |   4 +-
 .../org/apache/phoenix/metrics/MetricInfo.java  |  51 ++++
 .../org/apache/phoenix/metrics/Metrics.java     |  66 ++++
 .../apache/phoenix/trace/MetricsInfoImpl.java   |  63 ++++
 .../phoenix/trace/PhoenixMetricsSink.java       | 298 +++++++++++++++++++
 .../trace/PhoenixTableMetricsWriter.java        | 278 -----------------
 .../apache/phoenix/trace/TraceMetricSource.java | 188 ++++++++++++
 .../org/apache/phoenix/trace/TraceReader.java   |  12 +-
 .../org/apache/phoenix/trace/TracingUtils.java  |  63 ++++
 .../org/apache/phoenix/trace/util/Tracing.java  |   5 +-
 .../metrics2/impl/ExposedMetricCounterLong.java |  36 +++
 .../metrics2/impl/ExposedMetricsRecordImpl.java |  42 +++
 .../metrics2/lib/ExposedMetricsInfoImpl.java    |  34 +++
 .../org/apache/phoenix/metrics/LoggingSink.java |  60 ++++
 .../phoenix/trace/TraceMetricsSourceTest.java   |  96 ++++++
 phoenix-hadoop-compat/pom.xml                   |  89 ------
 .../org/apache/phoenix/metrics/MetricInfo.java  |  51 ----
 .../org/apache/phoenix/metrics/Metrics.java     |  80 -----
 .../apache/phoenix/metrics/MetricsManager.java  |  58 ----
 .../apache/phoenix/metrics/MetricsWriter.java   |  31 --
 .../phoenix/metrics/PhoenixAbstractMetric.java  |  30 --
 .../phoenix/metrics/PhoenixMetricTag.java       |  27 --
 .../phoenix/metrics/PhoenixMetricsRecord.java   |  35 ---
 .../phoenix/trace/PhoenixSpanReceiver.java      |  26 --
 .../phoenix/trace/TestableMetricsWriter.java    |  30 --
 .../org/apache/phoenix/trace/TracingCompat.java |  89 ------
 .../org/apache/phoenix/metrics/LoggingSink.java |  56 ----
 .../phoenix/metrics/TracingTestCompat.java      |  45 ---
 phoenix-hadoop2-compat/pom.xml                  |  77 -----
 .../phoenix/metrics/MetricsManagerImpl.java     |  71 -----
 .../apache/phoenix/trace/MetricsInfoImpl.java   |  63 ----
 .../phoenix/trace/PhoenixMetricsSink.java       | 191 ------------
 .../apache/phoenix/trace/TraceMetricSource.java | 197 ------------
 .../org.apache.phoenix.metrics.MetricsManager   |   1 -
 ...org.apache.phoenix.trace.PhoenixSpanReceiver |   1 -
 ...g.apache.phoenix.trace.TestableMetricsWriter |   1 -
 .../metrics2/impl/ExposedMetricCounterLong.java |  35 ---
 .../metrics2/impl/ExposedMetricsRecordImpl.java |  43 ---
 .../metrics2/lib/ExposedMetricsInfoImpl.java    |  32 --
 .../phoenix/trace/PhoenixMetricsWriterTest.java | 142 ---------
 .../phoenix/trace/TraceMetricsSourceTest.java   |  96 ------
 .../org/apache/phoenix/trace/TracingTest.java   |  34 ---
 pom.xml                                         |  27 --
 55 files changed, 1156 insertions(+), 2353 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index fe02636..b0cbac4 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -138,14 +138,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-hadoop-compat</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-hadoop2-compat</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
       <artifactId>phoenix-flume</artifactId>
     </dependency>
         <dependency>
@@ -153,4 +145,4 @@
       <artifactId>phoenix-pig</artifactId>
     </dependency>
   </dependencies>
-</project>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/pom.xml
----------------------------------------------------------------------
diff --git a/phoenix-core/pom.xml b/phoenix-core/pom.xml
index 539c38b..194ed58 100644
--- a/phoenix-core/pom.xml
+++ b/phoenix-core/pom.xml
@@ -205,17 +205,6 @@
   </build>
 
   <dependencies>
-    <!-- Intra project dependencies -->
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-hadoop-compat</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-hadoop-compat</artifactId>
-      <classifier>tests</classifier>
-      <scope>test</scope>
-    </dependency>
     <!-- Make sure we have all the antlr dependencies -->
     <dependency>
       <groupId>org.antlr</groupId>
@@ -409,15 +398,5 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-minicluster</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-hadoop2-compat</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.phoenix</groupId>
-      <artifactId>phoenix-hadoop2-compat</artifactId>
-      <classifier>tests</classifier>
-      <scope>test</scope>
-    </dependency>
   </dependencies>
-</project>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
index 0f8a666..f504d12 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
@@ -17,25 +17,18 @@
  */
 package org.apache.phoenix.trace;
 
-import static org.apache.phoenix.util.PhoenixRuntime.ANNOTATION_ATTRIB_PREFIX;
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Properties;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.impl.ExposedMetricCounterLong;
+import org.apache.hadoop.metrics2.impl.ExposedMetricsRecordImpl;
+import org.apache.hadoop.metrics2.lib.ExposedMetricsInfoImpl;
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
 import org.apache.phoenix.end2end.HBaseManagedTimeTest;
 import org.apache.phoenix.metrics.MetricInfo;
-import org.apache.phoenix.metrics.Metrics;
-import org.apache.phoenix.metrics.PhoenixAbstractMetric;
-import org.apache.phoenix.metrics.PhoenixMetricTag;
-import org.apache.phoenix.metrics.PhoenixMetricsRecord;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.trace.util.Tracing;
@@ -45,6 +38,14 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Before;
 import org.junit.experimental.categories.Category;
 
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.*;
+
+import static org.apache.phoenix.util.PhoenixRuntime.ANNOTATION_ATTRIB_PREFIX;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+
 /**
  * Base test for tracing tests - helps manage getting tracing/non-tracing
  * connections, as well as any supporting utils.
@@ -53,36 +54,17 @@ import org.junit.experimental.categories.Category;
 public class BaseTracingTestIT extends BaseHBaseManagedTimeIT {
     private static final Log LOG = LogFactory.getLog(BaseTracingTestIT.class);
 
-    /**
-     * Hadoop1 doesn't yet support tracing (need metrics library support) so we just skip those
-     * tests for the moment
-     * @return <tt>true</tt> if the test should exit because some necessary classes are missing, or
-     *         <tt>false</tt> if the tests can continue normally
-     */
-    static boolean shouldEarlyExitForHadoop1Test() {
-        try {
-            // get a receiver for the spans
-            TracingCompat.newTraceMetricSource();
-            // which also needs to a source for the metrics system
-            Metrics.getManager();
-            return false;
-        } catch (RuntimeException e) {
-            LOG.error("Shouldn't run test because can't instantiate necessary metrics/tracing classes!");
-        }
-
-        return true;
-    }
-
     @Before
     public void resetTracingTableIfExists() throws Exception {
         Connection conn = getConnectionWithoutTracing();
         conn.setAutoCommit(true);
         try {
-            conn.createStatement().executeUpdate("DELETE FROM " + QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
+            conn.createStatement().executeUpdate(
+                    "DELETE FROM " + QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
         } catch (TableNotFoundException ignore) {
         }
     }
-    
+
     public static Connection getConnectionWithoutTracing() throws SQLException {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         return getConnectionWithoutTracing(props);
@@ -93,18 +75,19 @@ public class BaseTracingTestIT extends BaseHBaseManagedTimeIT {
         conn.setAutoCommit(false);
         return conn;
     }
-    
-    public static Connection getTracingConnection() throws Exception { 
-    	return getTracingConnection(Collections.<String, String>emptyMap(), null);
+
+    public static Connection getTracingConnection() throws Exception {
+        return getTracingConnection(Collections.<String, String>emptyMap(), null);
     }
 
-    public static Connection getTracingConnection(Map<String, String> customAnnotations, String tenantId) throws Exception {
+    public static Connection getTracingConnection(Map<String, String> customAnnotations,
+            String tenantId) throws Exception {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         for (Map.Entry<String, String> annot : customAnnotations.entrySet()) {
-        	props.put(ANNOTATION_ATTRIB_PREFIX + annot.getKey(), annot.getValue());
+            props.put(ANNOTATION_ATTRIB_PREFIX + annot.getKey(), annot.getValue());
         }
         if (tenantId != null) {
-        	props.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+            props.put(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
         }
         return getConnectionWithTracingFrequency(props, Tracing.Frequency.ALWAYS);
     }
@@ -115,34 +98,49 @@ public class BaseTracingTestIT extends BaseHBaseManagedTimeIT {
         return DriverManager.getConnection(getUrl(), props);
     }
 
-    public static PhoenixMetricsRecord createRecord(long traceid, long parentid, long spanid,
+    public static MetricsRecord createRecord(long traceid, long parentid, long spanid,
             String desc, long startTime, long endTime, String hostname, String... tags) {
-        PhoenixMetricRecordImpl record =
-                new PhoenixMetricRecordImpl(TracingCompat.getTraceMetricName(traceid), desc);
-        PhoenixAbstractMetric span = new PhoenixMetricImpl(MetricInfo.SPAN.traceName, spanid);
-        record.addMetric(span);
 
-        PhoenixAbstractMetric parent = new PhoenixMetricImpl(MetricInfo.PARENT.traceName, parentid);
-        record.addMetric(parent);
+        List<AbstractMetric> metrics = new ArrayList<AbstractMetric>();
+        AbstractMetric span = new ExposedMetricCounterLong(asInfo(MetricInfo
+                .SPAN.traceName),
+                spanid);
+        metrics.add(span);
 
-        PhoenixAbstractMetric start = new PhoenixMetricImpl(MetricInfo.START.traceName, startTime);
-        record.addMetric(start);
+        AbstractMetric parent = new ExposedMetricCounterLong(asInfo(MetricInfo.PARENT.traceName),
+                parentid);
+        metrics.add(parent);
 
-        PhoenixAbstractMetric end = new PhoenixMetricImpl(MetricInfo.END.traceName, endTime);
-        record.addMetric(end);
+        AbstractMetric start = new ExposedMetricCounterLong(asInfo(MetricInfo.START.traceName),
+                startTime);
+        metrics.add(start);
 
+        AbstractMetric
+                end =
+                new ExposedMetricCounterLong(asInfo(MetricInfo.END.traceName), endTime);
+        metrics.add(end);
+
+        List<MetricsTag> tagsList = new ArrayList<MetricsTag>();
         int tagCount = 0;
         for (String annotation : tags) {
-            PhoenixMetricTag tag =
+            MetricsTag tag =
                     new PhoenixTagImpl(MetricInfo.ANNOTATION.traceName,
                             Integer.toString(tagCount++), annotation);
-            record.addTag(tag);
+            tagsList.add(tag);
         }
         String hostnameValue = "host-name.value";
-        PhoenixMetricTag hostnameTag =
+        MetricsTag hostnameTag =
                 new PhoenixTagImpl(MetricInfo.HOSTNAME.traceName, "", hostnameValue);
-        record.addTag(hostnameTag);
+        tagsList.add(hostnameTag);
 
+        MetricsRecord record =
+                new ExposedMetricsRecordImpl(new ExposedMetricsInfoImpl(TracingUtils
+                        .getTraceMetricName(traceid), desc), System.currentTimeMillis(),
+                        tagsList, metrics);
         return record;
     }
+
+    private static MetricsInfo asInfo(String name) {
+        return new ExposedMetricsInfoImpl(name, "");
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/it/java/org/apache/phoenix/trace/DisableableMetricsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/DisableableMetricsWriter.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/DisableableMetricsWriter.java
index a054bf2..875717c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/DisableableMetricsWriter.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/DisableableMetricsWriter.java
@@ -17,31 +17,32 @@
  */
 package org.apache.phoenix.trace;
 
-import java.sql.SQLException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
+import org.apache.commons.configuration.SubsetConfiguration;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.phoenix.metrics.MetricsWriter;
-import org.apache.phoenix.metrics.PhoenixMetricsRecord;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+
+import java.sql.SQLException;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  *
  */
-public class DisableableMetricsWriter implements MetricsWriter {
+public class DisableableMetricsWriter implements MetricsSink {
 
     private static final Log LOG = LogFactory.getLog(DisableableMetricsWriter.class);
-    private PhoenixTableMetricsWriter writer;
+    private PhoenixMetricsSink writer;
     private AtomicBoolean disabled = new AtomicBoolean(false);
 
-    public DisableableMetricsWriter(PhoenixTableMetricsWriter writer) {
+    public DisableableMetricsWriter(PhoenixMetricsSink writer) {
         this.writer = writer;
     }
 
     @Override
-    public void initialize() {
+    public void init(SubsetConfiguration config) {
         if (this.disabled.get()) return;
-        writer.initialize();
+        writer.init(config);
     }
 
     @Override
@@ -55,9 +56,9 @@ public class DisableableMetricsWriter implements MetricsWriter {
     }
 
     @Override
-    public void addMetrics(PhoenixMetricsRecord record) {
+    public void putMetrics(MetricsRecord record) {
         if (this.disabled.get()) return;
-        writer.addMetrics(record);
+        writer.putMetrics(record);
     }
 
     public void disable() {
@@ -77,7 +78,7 @@ public class DisableableMetricsWriter implements MetricsWriter {
         }
     }
 
-    public PhoenixTableMetricsWriter getDelegate() {
+    public PhoenixMetricsSink getDelegate() {
         return this.writer;
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/it/java/org/apache/phoenix/trace/Hadoop1TracingTestEnabler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/Hadoop1TracingTestEnabler.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/Hadoop1TracingTestEnabler.java
deleted file mode 100644
index 9a592d3..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/Hadoop1TracingTestEnabler.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-import org.junit.runner.notification.RunNotifier;
-import org.junit.runners.BlockJUnit4ClassRunner;
-import org.junit.runners.model.FrameworkMethod;
-import org.junit.runners.model.InitializationError;
-
-/**
- * Test runner to run classes that depend on Hadoop1 compatibility that may not be present for the
- * feature
- */
-public class Hadoop1TracingTestEnabler extends BlockJUnit4ClassRunner {
-
-    public Hadoop1TracingTestEnabler(Class<?> klass) throws InitializationError {
-        super(klass);
-    }
-
-    @Override
-    public void runChild(FrameworkMethod method, RunNotifier notifier) {
-        // if the class is already disabled, then we can disable on the class level, otherwise we
-        // just check the per-method
-        Hadoop1Disabled condition =
-                getTestClass().getJavaClass().getAnnotation(Hadoop1Disabled.class);
-        if (condition == null) {
-            condition = method
-                        .getAnnotation(Hadoop1Disabled.class);
-        }
-
-        // if this has the flag, then we want to disable it if hadoop1 is not enabled for that
-        // feature
-        if (condition != null && getEnabled(condition.value())) {
-            super.runChild(method, notifier);
-        } else {
-            notifier.fireTestIgnored(describeChild(method));
-        }
-    }
-
-    /**
-     * Simple check that just uses if-else logic. We can move to something more complex, policy
-     * based later when this gets more complex.
-     * @param feature name of the feature to check
-     * @return <tt>true</tt> if the test method is enabled for the given feature, <tt>false</tt>
-     *         otherwise
-     */
-    private boolean getEnabled(String feature) {
-        if (feature.equals("tracing")) {
-            return !BaseTracingTestIT.shouldEarlyExitForHadoop1Test();
-        }
-        return true;
-    }
-
-    /**
-     * Marker that a class/method should be disabled if hadoop1 features are not enabled. It takes a
-     * value for the Hadoop1 feature on which this class/method depends, for instance "tracing" is
-     * not supported in Hadoop1 (yet).
-     */
-    @Target({ ElementType.TYPE, ElementType.METHOD })
-    @Retention(RetentionPolicy.RUNTIME)
-    public static @interface Hadoop1Disabled {
-        String value();
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixMetricImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixMetricImpl.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixMetricImpl.java
deleted file mode 100644
index 985504f..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixMetricImpl.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import org.apache.phoenix.metrics.PhoenixAbstractMetric;
-
-/**
- * Simple metric implementation for testing
- */
-public class PhoenixMetricImpl implements PhoenixAbstractMetric {
-
-    private String name;
-    private Number value;
-
-    public PhoenixMetricImpl(String name, Number value) {
-        this.name = name;
-        this.value = value;
-    }
-
-    @Override
-    public String getName() {
-        return name;
-    }
-
-    @Override
-    public Number value() {
-        return value;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixMetricRecordImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixMetricRecordImpl.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixMetricRecordImpl.java
deleted file mode 100644
index 45cabf0..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixMetricRecordImpl.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.phoenix.metrics.PhoenixAbstractMetric;
-import org.apache.phoenix.metrics.PhoenixMetricTag;
-import org.apache.phoenix.metrics.PhoenixMetricsRecord;
-
-import com.google.common.collect.Lists;
-
-/**
- *
- */
-public class PhoenixMetricRecordImpl implements PhoenixMetricsRecord {
-
-    private String name;
-    private String description;
-    private final List<PhoenixAbstractMetric> metrics = Lists.newArrayList();
-    private final List<PhoenixMetricTag> tags = Lists.newArrayList();
-
-    public PhoenixMetricRecordImpl(String name, String description) {
-        this.name = name;
-        this.description = description;
-    }
-
-    public void addMetric(PhoenixAbstractMetric metric) {
-        this.metrics.add(metric);
-    }
-
-    public void addTag(PhoenixMetricTag tag) {
-        this.tags.add(tag);
-    }
-
-    @Override
-    public String name() {
-        return this.name;
-    }
-
-    @Override
-    public String description() {
-        return this.description;
-    }
-
-    @Override
-    public Iterable<PhoenixAbstractMetric> metrics() {
-        return metrics;
-    }
-
-    @Override
-    public Collection<PhoenixMetricTag> tags() {
-        return tags;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java
index ecac21b..533b6f8 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java
@@ -17,25 +17,21 @@
  */
 package org.apache.phoenix.trace;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.sql.Connection;
-import java.util.Collection;
-
-import org.apache.phoenix.metrics.PhoenixMetricsRecord;
+import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.trace.Hadoop1TracingTestEnabler.Hadoop1Disabled;
 import org.apache.phoenix.trace.TraceReader.SpanInfo;
 import org.apache.phoenix.trace.TraceReader.TraceHolder;
 import org.junit.Test;
-import org.junit.runner.RunWith;
+
+import java.sql.Connection;
+import java.util.Collection;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 /**
  * Test that the logging sink stores the expected metrics/stats
  */
-@RunWith(Hadoop1TracingTestEnabler.class)
-@Hadoop1Disabled("tracing")
 public class PhoenixTableMetricsWriterIT extends BaseTracingTestIT {
 
     /**
@@ -45,7 +41,7 @@ public class PhoenixTableMetricsWriterIT extends BaseTracingTestIT {
      */
     @Test
     public void testCreatesTable() throws Exception {
-        PhoenixTableMetricsWriter sink = new PhoenixTableMetricsWriter();
+        PhoenixMetricsSink sink = new PhoenixMetricsSink();
         Connection conn = getConnectionWithoutTracing();
         sink.initForTesting(conn);
 
@@ -69,13 +65,13 @@ public class PhoenixTableMetricsWriterIT extends BaseTracingTestIT {
 
     /**
      * Simple metrics writing and reading check, that uses the standard wrapping in the
-     * {@link PhoenixMetricsWriter}
+     * {@link PhoenixMetricsSink}
      * @throws Exception on failure
      */
     @Test
     public void writeMetrics() throws Exception {
         // hook up a phoenix sink
-        PhoenixTableMetricsWriter sink = new PhoenixTableMetricsWriter();
+        PhoenixMetricsSink sink = new PhoenixMetricsSink();
         Connection conn = getConnectionWithoutTracing();
         sink.initForTesting(conn);
 
@@ -88,12 +84,12 @@ public class PhoenixTableMetricsWriterIT extends BaseTracingTestIT {
         long endTime = 13;
         String annotation = "test annotation for a span";
         String hostnameValue = "host-name.value";
-        PhoenixMetricsRecord record =
+       MetricsRecord record =
                 createRecord(traceid, parentid, spanid, description, startTime, endTime,
                     hostnameValue, annotation);
 
         // actually write the record to the table
-        sink.addMetrics(record);
+        sink.putMetrics(record);
         sink.flush();
 
         // make sure we only get expected stat entry (matcing the trace id), otherwise we could the

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java
index c8e2219..a911a2c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java
@@ -17,36 +17,22 @@
  */
 package org.apache.phoenix.trace;
 
-import org.apache.phoenix.metrics.PhoenixMetricTag;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.MetricsTag;
 
 /**
  * Simple Tag implementation for testing
  */
-public class PhoenixTagImpl implements PhoenixMetricTag {
+public class PhoenixTagImpl extends MetricsTag {
 
     private final String name;
     private final String description;
     private final String value;
 
     public PhoenixTagImpl(String name, String description, String value) {
-        super();
+        super(new MetricsInfoImpl(name, description), value);
         this.name = name;
         this.description = description;
         this.value = value;
     }
-
-    @Override
-    public String name() {
-        return name;
-    }
-
-    @Override
-    public String description() {
-        return description;
-    }
-
-    @Override
-    public String value() {
-        return value;
-    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
index f0a47bb..d75e281 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
@@ -17,40 +17,31 @@
  */
 package org.apache.phoenix.trace;
 
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Properties;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.phoenix.end2end.HBaseManagedTimeTest;
 import org.apache.phoenix.metrics.MetricInfo;
-import org.apache.phoenix.metrics.PhoenixAbstractMetric;
-import org.apache.phoenix.metrics.PhoenixMetricTag;
-import org.apache.phoenix.metrics.PhoenixMetricsRecord;
-import org.apache.phoenix.trace.Hadoop1TracingTestEnabler.Hadoop1Disabled;
 import org.apache.phoenix.trace.TraceReader.SpanInfo;
 import org.apache.phoenix.trace.TraceReader.TraceHolder;
 import org.cloudera.htrace.Span;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.*;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 
 /**
  * Test that the {@link TraceReader} will correctly read traces written by the
- * {@link PhoenixTableMetricsWriter}
+ * {@link org.apache.phoenix.trace.PhoenixMetricsSink}
  */
-@RunWith(Hadoop1TracingTestEnabler.class)
-@Hadoop1Disabled("tracing")
 @Category(HBaseManagedTimeTest.class)
 public class PhoenixTraceReaderIT extends BaseTracingTestIT {
 
@@ -58,14 +49,14 @@ public class PhoenixTraceReaderIT extends BaseTracingTestIT {
 
     @Test
     public void singleSpan() throws Exception {
-        PhoenixTableMetricsWriter sink = new PhoenixTableMetricsWriter();
+        PhoenixMetricsSink sink = new PhoenixMetricsSink();
         Properties props = new Properties(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
         sink.initForTesting(conn);
 
         // create a simple metrics record
         long traceid = 987654;
-        PhoenixMetricsRecord record =
+        MetricsRecord record =
                 createAndFlush(sink, traceid, Span.ROOT_SPAN_ID, 10, "root", 12, 13,
                     "host-name.value", "test annotation for a span");
 
@@ -73,12 +64,12 @@ public class PhoenixTraceReaderIT extends BaseTracingTestIT {
         validateTraces(Collections.singletonList(record), conn, traceid);
     }
 
-    private PhoenixMetricsRecord createAndFlush(PhoenixTableMetricsWriter sink, long traceid,
+    private MetricsRecord createAndFlush(PhoenixMetricsSink sink, long traceid,
             long parentid, long spanid, String desc, long startTime, long endTime, String hostname,
             String... tags) {
-        PhoenixMetricsRecord record =
+        MetricsRecord record =
                 createRecord(traceid, parentid, spanid, desc, startTime, endTime, hostname, tags);
-        sink.addMetrics(record);
+        sink.putMetrics(record);
         sink.flush();
         return record;
     }
@@ -91,14 +82,14 @@ public class PhoenixTraceReaderIT extends BaseTracingTestIT {
     @Test
     public void testMultipleSpans() throws Exception {
         // hook up a phoenix sink
-        PhoenixTableMetricsWriter sink = new PhoenixTableMetricsWriter();
+        PhoenixMetricsSink sink = new PhoenixMetricsSink();
         Connection conn = getConnectionWithoutTracing();
         sink.initForTesting(conn);
 
         // create a simple metrics record
         long traceid = 12345;
-        List<PhoenixMetricsRecord> records = new ArrayList<PhoenixMetricsRecord>();
-        PhoenixMetricsRecord record =
+        List<MetricsRecord> records = new ArrayList<MetricsRecord>();
+        MetricsRecord record =
                 createAndFlush(sink, traceid, Span.ROOT_SPAN_ID, 7777, "root", 10, 30,
                     "hostname.value", "root-span tag");
         records.add(record);
@@ -128,7 +119,7 @@ public class PhoenixTraceReaderIT extends BaseTracingTestIT {
         validateTraces(records, conn, traceid);
     }
 
-    private void validateTraces(List<PhoenixMetricsRecord> records, Connection conn, long traceid)
+    private void validateTraces(List<MetricsRecord> records, Connection conn, long traceid)
             throws Exception {
         TraceReader reader = new TraceReader(conn);
         Collection<TraceHolder> traces = reader.readAll(1);
@@ -145,13 +136,13 @@ public class PhoenixTraceReaderIT extends BaseTracingTestIT {
      * @param records
      * @param trace
      */
-    private void validateTrace(List<PhoenixMetricsRecord> records, TraceHolder trace) {
+    private void validateTrace(List<MetricsRecord> records, TraceHolder trace) {
         // drop each span into a sorted list so we get the expected ordering
         Iterator<SpanInfo> spanIter = trace.spans.iterator();
-        for (PhoenixMetricsRecord record : records) {
+        for (MetricsRecord record : records) {
             SpanInfo spanInfo = spanIter.next();
             LOG.info("Checking span:\n" + spanInfo);
-            Iterator<PhoenixAbstractMetric> metricIter = record.metrics().iterator();
+            Iterator<AbstractMetric> metricIter = record.metrics().iterator();
             assertEquals("Got an unexpected span id", metricIter.next().value(), spanInfo.id);
             long parentId = (Long) metricIter.next().value();
             if (parentId == Span.ROOT_SPAN_ID) {
@@ -162,12 +153,12 @@ public class PhoenixTraceReaderIT extends BaseTracingTestIT {
             assertEquals("Got an unexpected start time", metricIter.next().value(), spanInfo.start);
             assertEquals("Got an unexpected end time", metricIter.next().value(), spanInfo.end);
 
-            Iterator<PhoenixMetricTag> tags = record.tags().iterator();
+            Iterator<MetricsTag> tags = record.tags().iterator();
 
             int annotationCount = 0;
             while (tags.hasNext()) {
                 // hostname is a tag, so we differentiate it
-                PhoenixMetricTag tag = tags.next();
+                MetricsTag tag = tags.next();
                 if (tag.name().equals(MetricInfo.HOSTNAME.traceName)) {
                     assertEquals("Didn't store correct hostname value", tag.value(),
                         spanInfo.hostname);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
index 87d80da..f4cf0d1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
@@ -17,46 +17,37 @@
  */
 package org.apache.phoenix.trace;
 
-import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.Collection;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.end2end.HBaseManagedTimeTest;
 import org.apache.phoenix.metrics.Metrics;
-import org.apache.phoenix.metrics.TracingTestCompat;
 import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.trace.Hadoop1TracingTestEnabler.Hadoop1Disabled;
 import org.apache.phoenix.trace.TraceReader.SpanInfo;
 import org.apache.phoenix.trace.TraceReader.TraceHolder;
-import org.cloudera.htrace.Sampler;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.SpanReceiver;
-import org.cloudera.htrace.Trace;
-import org.cloudera.htrace.TraceScope;
+import org.cloudera.htrace.*;
 import org.junit.After;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
 
-import com.google.common.collect.ImmutableMap;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Collection;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Test that the logging sink stores the expected metrics/stats
  */
-@RunWith(Hadoop1TracingTestEnabler.class)
-@Hadoop1Disabled("tracing")
 @Category(HBaseManagedTimeTest.class)
 public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
 
@@ -69,15 +60,12 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
 
     @BeforeClass
     public static void setupMetrics() throws Exception {
-        if (shouldEarlyExitForHadoop1Test()) {
-            return;
-        }
-        PhoenixTableMetricsWriter pWriter = new PhoenixTableMetricsWriter();
+        PhoenixMetricsSink pWriter = new PhoenixMetricsSink();
         Connection conn = getConnectionWithoutTracing();
         pWriter.initForTesting(conn);
         sink = new DisableableMetricsWriter(pWriter);
 
-        TracingTestCompat.registerSink(sink);
+        TracingTestUtil.registerSink(sink);
     }
 
     @After
@@ -112,10 +100,10 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
     @Test
     public void testWriteSpans() throws Exception {
         // get a receiver for the spans
-        SpanReceiver receiver = TracingCompat.newTraceMetricSource();
+        SpanReceiver receiver = new TraceMetricSource();
         // which also needs to a source for the metrics system
-        Metrics.getManager().registerSource("testWriteSpans-source", "source for testWriteSpans",
-            receiver);
+        Metrics.initialize().register("testWriteSpans-source", "source for testWriteSpans",
+                (MetricsSource) receiver);
 
         // watch our sink so we know when commits happen
         CountDownLatch latch = new CountDownLatch(1);
@@ -128,7 +116,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         // add a child with some annotations
         Span child = span.child("child 1");
         child.addTimelineAnnotation("timeline annotation");
-        TracingCompat.addAnnotation(child, "test annotation", 10);
+        TracingUtils.addAnnotation(child, "test annotation", 10);
         child.stop();
 
         // sleep a little bit to get some time difference
@@ -230,10 +218,7 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
                 if (traceInfo.contains(QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME)) {
                     return false;
                 }
-                if (traceInfo.contains("Completing index")) {
-                    return true;
-                }
-                return false;
+                return traceInfo.contains("Completing index");
             }
         });
 
@@ -467,4 +452,4 @@ public class PhoenixTracingEndToEndIT extends BaseTracingTestIT {
         }
 
     }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java
new file mode 100644
index 0000000..d502175
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/TracingTestUtil.java
@@ -0,0 +1,14 @@
+package org.apache.phoenix.trace;
+
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.phoenix.metrics.Metrics;
+
+/**
+ *
+ */
+public class TracingTestUtil {
+
+    public static void registerSink(MetricsSink sink){
+        Metrics.initialize().register("phoenix", "test sink gets logged", sink);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index d55dfbf..9c48a8d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -64,7 +64,7 @@ import org.apache.phoenix.hbase.index.write.IndexWriter;
 import org.apache.phoenix.hbase.index.write.recovery.PerRegionIndexWriteCache;
 import org.apache.phoenix.hbase.index.write.recovery.StoreFailuresInCachePolicy;
 import org.apache.phoenix.hbase.index.write.recovery.TrackingParallelWriterIndexCommitter;
-import org.apache.phoenix.trace.TracingCompat;
+import org.apache.phoenix.trace.TracingUtils;
 import org.apache.phoenix.trace.util.NullSpan;
 import org.cloudera.htrace.Span;
 import org.cloudera.htrace.Trace;
@@ -276,7 +276,7 @@ public class Indexer extends BaseRegionObserver {
         this.builder.getIndexUpdate(miniBatchOp, mutations.values());
 
         current.addTimelineAnnotation("Built index updates, doing preStep");
-        TracingCompat.addAnnotation(current, "index update count", indexUpdates.size());
+        TracingUtils.addAnnotation(current, "index update count", indexUpdates.size());
 
     // write them, either to WAL or the index tables
     doPre(indexUpdates, edit, durability);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/main/java/org/apache/phoenix/metrics/MetricInfo.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/metrics/MetricInfo.java b/phoenix-core/src/main/java/org/apache/phoenix/metrics/MetricInfo.java
new file mode 100644
index 0000000..e6ad976
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/metrics/MetricInfo.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.metrics;
+
+/**
+ * Metrics and their conversion from the trace name to the name we store in the stats table
+ */
+public enum MetricInfo {
+
+    TRACE("", "trace_id"),
+    SPAN("span_id", "span_id"),
+    PARENT("parent_id", "parent_id"),
+    START("start_time", "start_time"),
+    END("end_time", "end_time"),
+    TAG("phoenix.tag", "t"),
+    ANNOTATION("phoenix.annotation", "a"),
+    HOSTNAME("Hostname", "hostname"),
+    DESCRIPTION("", "description");
+
+    public final String traceName;
+    public final String columnName;
+
+    private MetricInfo(String traceName, String columnName) {
+        this.traceName = traceName;
+        this.columnName = columnName;
+    }
+
+    public static String getColumnName(String traceName) {
+        for (MetricInfo info : MetricInfo.values()) {
+            if (info.traceName.equals(traceName)) {
+                return info.columnName;
+            }
+        }
+        throw new IllegalArgumentException("Unknown tracename: " + traceName);
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java b/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
new file mode 100644
index 0000000..24950c4
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/metrics/Metrics.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.metrics;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+
+public class Metrics {
+
+    private static final Log LOG = LogFactory.getLog(Metrics.class);
+
+  private static volatile MetricsSystem manager = DefaultMetricsSystem.instance();
+
+    private static boolean initialized;
+
+    /** This must match the prefix that we are using in the hadoop-metrics2 config on the client */
+    public static final String METRICS_SYSTEM_NAME = "phoenix";
+    public static MetricsSystem initialize() {
+        // if the jars aren't on the classpath, then we don't start the metrics system
+        if (manager == null) {
+            LOG.warn("Phoenix metrics could not be initialized - no MetricsManager found!");
+            return null;
+        }
+        // only initialize the metrics system once
+        synchronized (Metrics.class) {
+            if (!initialized) {
+                LOG.info("Initializing metrics system: " + Metrics.METRICS_SYSTEM_NAME);
+                manager.init(Metrics.METRICS_SYSTEM_NAME);
+                initialized = true;
+            }
+        }
+        return manager;
+    }
+
+    private static volatile boolean sinkInitialized = false;
+
+    /**
+     * Mark that the metrics/tracing sink has been initialized
+     */
+    public static void markSinkInitialized() {
+        sinkInitialized = true;
+    }
+
+    public static void ensureConfigured() {
+        if (!sinkInitialized) {
+            LOG.warn("Phoenix metrics2/tracing sink was not started. Should be it be?");
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java
new file mode 100644
index 0000000..47c1dda
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/MetricsInfoImpl.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace;
+
+import com.google.common.base.Objects;
+import static com.google.common.base.Preconditions.*;
+import org.apache.hadoop.metrics2.MetricsInfo;
+
+/**
+ * Making implementing metric info a little easier
+ * <p>
+ * Just a copy of the same from Hadoop, but exposed for usage.
+ */
+public class MetricsInfoImpl implements MetricsInfo {
+  private final String name, description;
+
+  MetricsInfoImpl(String name, String description) {
+    this.name = checkNotNull(name, "name");
+    this.description = checkNotNull(description, "description");
+  }
+
+  @Override public String name() {
+    return name;
+  }
+
+  @Override public String description() {
+    return description;
+  }
+
+  @Override public boolean equals(Object obj) {
+    if (obj instanceof MetricsInfo) {
+      MetricsInfo other = (MetricsInfo) obj;
+      return Objects.equal(name, other.name()) &&
+             Objects.equal(description, other.description());
+    }
+    return false;
+  }
+
+  @Override public int hashCode() {
+    return Objects.hashCode(name, description);
+  }
+
+  @Override public String toString() {
+    return Objects.toStringHelper(this)
+        .add("name", name).add("description", description)
+        .toString();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
new file mode 100644
index 0000000..265fc78
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterators;
+import org.apache.commons.configuration.Configuration;
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.phoenix.metrics.*;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.trace.util.Tracing;
+import org.apache.phoenix.util.QueryUtil;
+
+import javax.annotation.Nullable;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.*;
+
+import static org.apache.phoenix.metrics.MetricInfo.*;
+import static org.apache.phoenix.metrics.MetricInfo.HOSTNAME;
+
+/**
+ * Write the metrics to a phoenix table.
+ * Generally, this class is instantiated via hadoop-metrics2 property files.
+ * Specifically, you would create this class by adding the following to
+ * by
+ * This would actually be set as: <code>
+ * [prefix].sink.[some instance name].class=org.apache.phoenix.trace.PhoenixMetricsSink
+ * </code>, where <tt>prefix</tt> is either:
+ * <ol>
+ * <li>"phoenix", for the client</li>
+ * <li>"hbase", for the server</li>
+ * </ol>
+ * and
+ * <tt>some instance name</tt> is just any unique name, so properties can be differentiated if
+ * there are multiple sinks of the same type created
+ */
+public class PhoenixMetricsSink implements MetricsSink {
+
+    private static final Log LOG = LogFactory.getLog(PhoenixMetricsSink.class);
+
+    private static final String VARIABLE_VALUE = "?";
+
+    private static final Joiner COLUMN_JOIN = Joiner.on(".");
+    static final String TAG_FAMILY = "tags";
+    /**
+     * Count of the number of tags we are storing for this row
+     */
+    static final String TAG_COUNT = COLUMN_JOIN.join(TAG_FAMILY, "count");
+
+    static final String ANNOTATION_FAMILY = "annotations";
+    static final String ANNOTATION_COUNT = COLUMN_JOIN.join(ANNOTATION_FAMILY, "count");
+
+    /**
+     * Join strings on a comma
+     */
+    private static final Joiner COMMAS = Joiner.on(',');
+
+    private Connection conn;
+
+    private String table;
+
+    public PhoenixMetricsSink() {
+        LOG.info("Writing tracing metrics to phoenix table");
+
+    }
+
+    @Override
+    public void init(SubsetConfiguration config) {
+        Metrics.markSinkInitialized();
+        LOG.info("Phoenix tracing writer started");
+    }
+
+    /**
+     * Initialize <tt>this</tt> only when we need it
+     */
+    private void lazyInitialize() {
+        synchronized (this) {
+            if (this.conn != null) {
+                return;
+            }
+            try {
+                // create the phoenix connection
+                Properties props = new Properties();
+                props.setProperty(QueryServices.TRACING_FREQ_ATTRIB,
+                        Tracing.Frequency.NEVER.getKey());
+                org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create();
+                Connection conn = QueryUtil.getConnection(props, conf);
+                // enable bulk loading when we have enough data
+                conn.setAutoCommit(true);
+
+                String tableName =
+                        conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB,
+                                QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
+
+                initializeInternal(conn, tableName);
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    private void initializeInternal(Connection conn, String tableName) throws SQLException {
+        this.conn = conn;
+
+        // ensure that the target table already exists
+        createTable(conn, tableName);
+    }
+
+    /**
+     * Used for <b>TESTING ONLY</b>
+     * Initialize the connection and setup the table to use the
+     * {@link org.apache.phoenix.query.QueryServicesOptions#DEFAULT_TRACING_STATS_TABLE_NAME}
+     *
+     * @param conn to store for upserts and to create the table (if necessary)
+     * @throws SQLException if any phoenix operation fails
+     */
+    @VisibleForTesting
+    public void initForTesting(Connection conn) throws SQLException {
+        initializeInternal(conn, QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
+    }
+
+    /**
+     * Create a stats table with the given name. Stores the name for use later when creating upsert
+     * statements
+     *
+     * @param conn  connection to use when creating the table
+     * @param table name of the table to create
+     * @throws SQLException if any phoenix operations fails
+     */
+    private void createTable(Connection conn, String table) throws SQLException {
+        // only primary-key columns can be marked non-null
+        String ddl =
+                "create table if not exists " + table + "( " +
+                        TRACE.columnName + " bigint not null, " +
+                        PARENT.columnName + " bigint not null, " +
+                        SPAN.columnName + " bigint not null, " +
+                        DESCRIPTION.columnName + " varchar, " +
+                        START.columnName + " bigint, " +
+                        END.columnName + " bigint, " +
+                        HOSTNAME.columnName + " varchar, " +
+                        TAG_COUNT + " smallint, " +
+                        ANNOTATION_COUNT + " smallint" +
+                        "  CONSTRAINT pk PRIMARY KEY (" + TRACE.columnName + ", "
+                        + PARENT.columnName + ", " + SPAN.columnName + "))\n";
+        PreparedStatement stmt = conn.prepareStatement(ddl);
+        stmt.execute();
+        this.table = table;
+    }
+
+    @Override
+    public void flush() {
+        try {
+            this.conn.commit();
+            this.conn.rollback();
+        } catch (SQLException e) {
+            LOG.error("Failed to commit changes to table", e);
+        }
+    }
+
+    /**
+     * Add a new metric record to be written.
+     *
+     * @param record
+     */
+    @Override
+    public void putMetrics(MetricsRecord record) {
+        // its not a tracing record, we are done. This could also be handled by filters, but safer
+        // to do it here, in case it gets misconfigured
+        if (!record.name().startsWith(TracingUtils.METRIC_SOURCE_KEY)) {
+            return;
+        }
+
+        // don't initialize until we actually have something to write
+        lazyInitialize();
+
+        String stmt = "UPSERT INTO " + table + " (";
+        // drop it into the queue of things that should be written
+        List<String> keys = new ArrayList<String>();
+        List<Object> values = new ArrayList<Object>();
+        // we need to keep variable values in a separate set since they may have spaces, which
+        // causes the parser to barf. Instead, we need to add them after the statement is prepared
+        List<String> variableValues = new ArrayList<String>(record.tags().size());
+        keys.add(TRACE.columnName);
+        values.add(
+                Long.parseLong(record.name().substring(TracingUtils.METRIC_SOURCE_KEY.length())));
+
+        keys.add(DESCRIPTION.columnName);
+        values.add(VARIABLE_VALUE);
+        variableValues.add(record.description());
+
+        // add each of the metrics
+        for (AbstractMetric metric : record.metrics()) {
+            // name of the metric is also the column name to which we write
+            keys.add(MetricInfo.getColumnName(metric.name()));
+            values.add(metric.value());
+        }
+
+        // get the tags out so we can set them later (otherwise, need to be a single value)
+        int annotationCount = 0;
+        int tagCount = 0;
+        for (MetricsTag tag : record.tags()) {
+            if (tag.name().equals(ANNOTATION.traceName)) {
+                addDynamicEntry(keys, values, variableValues, ANNOTATION_FAMILY, tag, ANNOTATION,
+                        annotationCount);
+                annotationCount++;
+            } else if (tag.name().equals(TAG.traceName)) {
+                addDynamicEntry(keys, values, variableValues, TAG_FAMILY, tag, TAG, tagCount);
+                tagCount++;
+            } else if (tag.name().equals(HOSTNAME.traceName)) {
+                keys.add(HOSTNAME.columnName);
+                values.add(VARIABLE_VALUE);
+                variableValues.add(tag.value());
+            } else if (tag.name().equals("Context")) {
+                // ignored
+            } else {
+                LOG.error("Got an unexpected tag: " + tag);
+            }
+        }
+
+        // add the tag count, now that we know it
+        keys.add(TAG_COUNT);
+        // ignore the hostname in the tags, if we know it
+        values.add(tagCount);
+
+        keys.add(ANNOTATION_COUNT);
+        values.add(annotationCount);
+
+        // compile the statement together
+        stmt += COMMAS.join(keys);
+        stmt += ") VALUES (" + COMMAS.join(values) + ")";
+
+        if (LOG.isTraceEnabled()) {
+            LOG.trace("Logging metrics to phoenix table via: " + stmt);
+            LOG.trace("With tags: " + variableValues);
+        }
+        try {
+            PreparedStatement ps = conn.prepareStatement(stmt);
+            // add everything that wouldn't/may not parse
+            int index = 1;
+            for (String tag : variableValues) {
+                ps.setString(index++, tag);
+            }
+            ps.execute();
+        } catch (SQLException e) {
+            LOG.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt,
+                    e);
+        }
+    }
+
+    public static String getDynamicColumnName(String family, String column, int count) {
+        return COLUMN_JOIN.join(family, column) + count;
+    }
+
+    private void addDynamicEntry(List<String> keys, List<Object> values,
+            List<String> variableValues, String family, MetricsTag tag,
+            MetricInfo metric, int count) {
+        // <family><.dynColumn><count> <VARCHAR>
+        keys.add(getDynamicColumnName(family, metric.columnName, count) + " VARCHAR");
+
+        // build the annotation value
+        String val = tag.description() + " - " + tag.value();
+        values.add(VARIABLE_VALUE);
+        variableValues.add(val);
+    }
+
+    @VisibleForTesting
+    public void clearForTesting() throws SQLException {
+        this.conn.rollback();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixTableMetricsWriter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixTableMetricsWriter.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixTableMetricsWriter.java
deleted file mode 100644
index 7fcb92d..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixTableMetricsWriter.java
+++ /dev/null
@@ -1,278 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import static org.apache.phoenix.metrics.MetricInfo.ANNOTATION;
-import static org.apache.phoenix.metrics.MetricInfo.DESCRIPTION;
-import static org.apache.phoenix.metrics.MetricInfo.END;
-import static org.apache.phoenix.metrics.MetricInfo.HOSTNAME;
-import static org.apache.phoenix.metrics.MetricInfo.PARENT;
-import static org.apache.phoenix.metrics.MetricInfo.SPAN;
-import static org.apache.phoenix.metrics.MetricInfo.START;
-import static org.apache.phoenix.metrics.MetricInfo.TAG;
-import static org.apache.phoenix.metrics.MetricInfo.TRACE;
-
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.phoenix.metrics.MetricInfo;
-import org.apache.phoenix.metrics.MetricsWriter;
-import org.apache.phoenix.metrics.PhoenixAbstractMetric;
-import org.apache.phoenix.metrics.PhoenixMetricTag;
-import org.apache.phoenix.metrics.PhoenixMetricsRecord;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.trace.util.Tracing;
-import org.apache.phoenix.util.QueryUtil;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Joiner;
-
-/**
- * Sink that writes phoenix metrics to a phoenix table
- * <p>
- * Each metric record should only correspond to a single completed span. Each span is only updated
- * in the phoenix table <i>once</i>
- */
-public class PhoenixTableMetricsWriter implements MetricsWriter {
-
-    private static final String VARIABLE_VALUE = "?";
-
-    public static final Log LOG = LogFactory.getLog(PhoenixTableMetricsWriter.class);
-
-    private static final Joiner COLUMN_JOIN = Joiner.on(".");
-    static final String TAG_FAMILY = "tags";
-    /** Count of the number of tags we are storing for this row */
-    static final String TAG_COUNT = COLUMN_JOIN.join(TAG_FAMILY, "count");
-
-    static final String ANNOTATION_FAMILY = "annotations";
-    static final String ANNOTATION_COUNT = COLUMN_JOIN.join(ANNOTATION_FAMILY, "count");
-
-    /** Join strings on a comma */
-    private static final Joiner COMMAS = Joiner.on(',');
-
-    private Connection conn;
-
-    private String table;
-
-    @Override
-    public void initialize() {
-        LOG.info("Phoenix tracing writer started");
-    }
-
-    /**
-     * Initialize <tt>this</tt> only when we need it
-     */
-    private void lazyInitialize() {
-        synchronized (this) {
-            if (this.conn != null) {
-                return;
-            }
-            try {
-                // create the phoenix connection
-                Properties props = new Properties();
-                props.setProperty(QueryServices.TRACING_FREQ_ATTRIB,
-                    Tracing.Frequency.NEVER.getKey());
-                Configuration conf = HBaseConfiguration.create();
-                Connection conn = QueryUtil.getConnection(props, conf);
-                // enable bulk loading when we have enough data
-                conn.setAutoCommit(true);
-
-                String tableName =
-                        conf.get(QueryServices.TRACING_STATS_TABLE_NAME_ATTRIB,
-                            QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
-
-                initializeInternal(conn, tableName);
-            } catch (Exception e) {
-                throw new RuntimeException(e);
-            }
-        }
-    }
-
-    private void initializeInternal(Connection conn, String tableName) throws SQLException {
-        this.conn = conn;
-
-        // ensure that the target table already exists
-        createTable(conn, tableName);
-    }
-
-    /**
-     * Used for <b>TESTING ONLY</b>
-     * <p>
-     * Initialize the connection and setup the table to use the
-     * {@link TracingCompat#DEFAULT_TRACING_STATS_TABLE_NAME}
-     * @param conn to store for upserts and to create the table (if necessary)
-     * @throws SQLException if any phoenix operation fails
-     */
-    @VisibleForTesting
-    public void initForTesting(Connection conn) throws SQLException {
-        initializeInternal(conn, QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
-    }
-
-    /**
-     * Create a stats table with the given name. Stores the name for use later when creating upsert
-     * statements
-     * @param conn connection to use when creating the table
-     * @param table name of the table to create
-     * @throws SQLException if any phoenix operations fails
-     */
-    private void createTable(Connection conn, String table) throws SQLException {
-        // only primary-key columns can be marked non-null
-        String ddl =
-                "create table if not exists " + table + "( " + 
-                        TRACE.columnName + " bigint not null, " +
-                        PARENT.columnName + " bigint not null, " +
-                        SPAN.columnName + " bigint not null, " +
-                        DESCRIPTION.columnName + " varchar, " +
-                        START.columnName + " bigint, " +
-                        END.columnName + " bigint, " +
-                        HOSTNAME.columnName + " varchar, " +
-                        TAG_COUNT + " smallint, " +
-                        ANNOTATION_COUNT + " smallint" +
-                        "  CONSTRAINT pk PRIMARY KEY (" + TRACE.columnName + ", "
-                            + PARENT.columnName + ", " + SPAN.columnName + "))\n";
-        PreparedStatement stmt = conn.prepareStatement(ddl);
-        stmt.execute();
-        this.table = table;
-    }
-
-    @Override
-    public void flush() {
-        try {
-            this.conn.commit();
-            this.conn.rollback();
-        } catch (SQLException e) {
-            LOG.error("Failed to commit changes to table", e);
-        }
-    }
-
-    /**
-     * Add a new metric record to be written.
-     * @param record
-     */
-    @Override
-    public void addMetrics(PhoenixMetricsRecord record) {
-        // its not a tracing record, we are done. This could also be handled by filters, but safer
-        // to do it here, in case it gets misconfigured
-        if (!record.name().startsWith(TracingCompat.METRIC_SOURCE_KEY)) {
-            return;
-        }
-
-        // don't initialize until we actually have something to write
-        lazyInitialize();
-
-        String stmt = "UPSERT INTO " + table + " (";
-        // drop it into the queue of things that should be written
-        List<String> keys = new ArrayList<String>();
-        List<Object> values = new ArrayList<Object>();
-        // we need to keep variable values in a separate set since they may have spaces, which
-        // causes the parser to barf. Instead, we need to add them after the statement is prepared
-        List<String> variableValues = new ArrayList<String>(record.tags().size());
-        keys.add(TRACE.columnName);
-        values.add(Long.parseLong(record.name().substring(TracingCompat.METRIC_SOURCE_KEY.length())));
-
-        keys.add(DESCRIPTION.columnName);
-        values.add(VARIABLE_VALUE);
-        variableValues.add(record.description());
-
-        // add each of the metrics
-        for (PhoenixAbstractMetric metric : record.metrics()) {
-            // name of the metric is also the column name to which we write
-            keys.add(MetricInfo.getColumnName(metric.getName()));
-            values.add(metric.value());
-        }
-
-        // get the tags out so we can set them later (otherwise, need to be a single value)
-        int annotationCount = 0;
-        int tagCount = 0;
-        for (PhoenixMetricTag tag : record.tags()) {
-            if (tag.name().equals(ANNOTATION.traceName)) {
-                addDynamicEntry(keys, values, variableValues, ANNOTATION_FAMILY, tag, ANNOTATION,
-                    annotationCount);
-                annotationCount++;
-            } else if (tag.name().equals(TAG.traceName)) {
-                addDynamicEntry(keys, values, variableValues, TAG_FAMILY, tag, TAG, tagCount);
-                tagCount++;
-            } else if (tag.name().equals(HOSTNAME.traceName)) {
-                keys.add(HOSTNAME.columnName);
-                values.add(VARIABLE_VALUE);
-                variableValues.add(tag.value());
-            } else if (tag.name().equals("Context")) {
-                // ignored
-            } else {
-                LOG.error("Got an unexpected tag: " + tag);
-            }
-        }
-
-        // add the tag count, now that we know it
-        keys.add(TAG_COUNT);
-        // ignore the hostname in the tags, if we know it
-        values.add(tagCount);
-
-        keys.add(ANNOTATION_COUNT);
-        values.add(annotationCount);
-
-        // compile the statement together
-        stmt += COMMAS.join(keys);
-        stmt += ") VALUES (" + COMMAS.join(values) + ")";
-
-        if (LOG.isTraceEnabled()) {
-            LOG.trace("Logging metrics to phoenix table via: " + stmt);
-            LOG.trace("With tags: " + variableValues);
-        }
-        try {
-            PreparedStatement ps = conn.prepareStatement(stmt);
-            // add everything that wouldn't/may not parse
-            int index = 1;
-            for (String tag : variableValues) {
-                ps.setString(index++, tag);
-            }
-            ps.execute();
-        } catch (SQLException e) {
-            LOG.error("Could not write metric: \n" + record + " to prepared statement:\n" + stmt, e);
-        }
-    }
-
-    public static String getDynamicColumnName(String family, String column, int count) {
-        return COLUMN_JOIN.join(family, column) + count;
-    }
-
-    private void addDynamicEntry(List<String> keys, List<Object> values,
-            List<String> variableValues, String family, PhoenixMetricTag tag,
-            MetricInfo metric, int count) {
-        // <family><.dynColumn><count> <VARCHAR>
-        keys.add(getDynamicColumnName(family, metric.columnName, count) + " VARCHAR");
-
-        // build the annotation value
-        String val = tag.description() + " - " + tag.value();
-        values.add(VARIABLE_VALUE);
-        variableValues.add(val);
-    }
-
-    public void clearForTesting() throws SQLException {
-        this.conn.rollback();
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
new file mode 100644
index 0000000..1b9e31a
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceMetricSource.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.trace;
+
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.metrics2.*;
+import org.apache.hadoop.metrics2.lib.Interns;
+import org.apache.phoenix.metrics.MetricInfo;
+import org.apache.phoenix.metrics.Metrics;
+import org.cloudera.htrace.HTraceConfiguration;
+import org.cloudera.htrace.Span;
+import org.cloudera.htrace.SpanReceiver;
+import org.cloudera.htrace.TimelineAnnotation;
+import org.cloudera.htrace.impl.MilliSpan;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import static org.apache.phoenix.metrics.MetricInfo.*;
+
+/**
+ * Sink for request traces ({@link SpanReceiver}) that pushes writes to {@link MetricsSource} in a
+ * format that we can more easily consume.
+ * <p>
+ * <p>
+ * Rather than write directly to a phoenix table, we drop it into the metrics queue so we can more
+ * cleanly handle it asyncrhonously.Currently, {@link MilliSpan} submits the span in a synchronized
+ * block to all the receivers, which could have a lot of overhead if we are submitting to multiple
+ * receivers.
+ * <p>
+ * The format of the generated metrics is this:
+ * <ol>
+ *   <li>All Metrics from the same span have the same name (allowing correlation in the sink)</li>
+ *   <li>The description of the metric describes what it contains. For instance,
+ *   <ul>
+ *     <li>{@link MetricInfo#PARENT} is the id of the parent of this span. (Root span is
+ *     {@link Span#ROOT_SPAN_ID}).</li>
+ *     <li>{@value MetricInfo#START} is the start time of the span</li>
+ *     <li>{@value MetricInfo#END} is the end time of the span</li>
+ *   </ul></li>
+ *   <li>Each span's messages are contained in a {@link MetricsTag} with the same name as above and a
+ *   generic counter for the number of messages (to differentiate messages and provide timeline
+ *   ordering).</li>
+ * </ol>
+ * <p>
+ * <i>So why even submit to metrics2 framework if we only have a single source?</i>
+ * <p>
+ * This allows us to make the updates in batches. We might have spans that finish before other spans
+ * (for instance in the same parent). By batching the updates we can lessen the overhead on the
+ * client, which is also busy doing 'real' work. <br>
+ * We could make our own queue and manage batching and filtering and dropping extra metrics, but
+ * that starts to get complicated fast (its not as easy as it sounds) so we use metrics2 to abstract
+ * out that pipeline and also provides us flexibility to dump metrics to other sources.
+ * <p>
+ * This is a somewhat rough implementation - we do excessive locking for correctness,
+ * rather than trying to make it fast, for the moment.
+ */
+public class TraceMetricSource implements SpanReceiver, MetricsSource {
+
+  private static final String EMPTY_STRING = "";
+
+  private static final String CONTEXT = "tracing";
+
+  private List<Metric> spans = new ArrayList<Metric>();
+
+  public TraceMetricSource() {
+
+    MetricsSystem manager = Metrics.initialize();
+
+    // Register this instance.
+    // For right now, we ignore the MBean registration issues that show up in DEBUG logs. Basically,
+    // we need a Jmx MBean compliant name. We'll get to a better name when we want that later
+    manager.register(CONTEXT, "Phoenix call tracing", this);
+  }
+
+  @Override
+  public void receiveSpan(Span span) {
+    Metric builder = new Metric(span);
+    // add all the metrics for the span
+    builder.addCounter(Interns.info(SPAN.traceName, EMPTY_STRING), span.getSpanId());
+    builder.addCounter(Interns.info(PARENT.traceName, EMPTY_STRING), span.getParentId());
+    builder.addCounter(Interns.info(START.traceName, EMPTY_STRING), span.getStartTimeMillis());
+    builder.addCounter(Interns.info(END.traceName, EMPTY_STRING), span.getStopTimeMillis());
+    // add the tags to the span. They were written in order received so we mark them as such
+    for (TimelineAnnotation ta : span.getTimelineAnnotations()) {
+      builder.add(new MetricsTag(Interns.info(TAG.traceName, Long.toString(ta.getTime())), ta
+          .getMessage()));
+    }
+
+    // add the annotations. We assume they are serialized as strings and integers, but that can
+    // change in the future
+    Map<byte[], byte[]> annotations = span.getKVAnnotations();
+    for (Entry<byte[], byte[]> annotation : annotations.entrySet()) {
+      Pair<String, String> val =
+          TracingUtils.readAnnotation(annotation.getKey(), annotation.getValue());
+      builder.add(new MetricsTag(Interns.info(ANNOTATION.traceName, val.getFirst()), val
+          .getSecond()));
+    }
+
+    // add the span to the list we care about
+    synchronized (this) {
+      spans.add(builder);
+    }
+  }
+
+  @Override
+  public void getMetrics(MetricsCollector collector, boolean all) {
+    // add a marker record so we know how many spans are used
+    // this is also necessary to ensure that we register the metrics source as an MBean (avoiding a
+    // runtime warning)
+    MetricsRecordBuilder marker = collector.addRecord(TracingUtils.METRICS_MARKER_CONTEXT);
+    marker.add(new MetricsTag(new MetricsInfoImpl("stat", "num spans"), Integer
+        .toString(spans.size())));
+
+    // actually convert the known spans into metric records as well
+    synchronized (this) {
+      for (Metric span : spans) {
+        MetricsRecordBuilder builder = collector.addRecord(new MetricsInfoImpl(TracingUtils
+            .getTraceMetricName(span.id), span.desc));
+        builder.setContext(TracingUtils.METRICS_CONTEXT);
+        for (Pair<MetricsInfo, Long> metric : span.counters) {
+          builder.addCounter(metric.getFirst(), metric.getSecond());
+        }
+        for (MetricsTag tag : span.tags) {
+          builder.add(tag);
+        }
+      }
+      // reset the spans so we don't keep a big chunk of memory around
+      spans = new ArrayList<Metric>();
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    // noop
+  }
+
+  @Override
+  public void configure(HTraceConfiguration conf) {
+    // noop
+  }
+
+  private static class Metric {
+
+    List<Pair<MetricsInfo, Long>> counters = new ArrayList<Pair<MetricsInfo, Long>>();
+    List<MetricsTag> tags = new ArrayList<MetricsTag>();
+    private String id;
+    private String desc;
+
+    public Metric(Span span) {
+      this.id = Long.toString(span.getTraceId());
+      this.desc = span.getDescription();
+    }
+
+    /**
+     * @param metricsInfoImpl
+     * @param startTimeMillis
+     */
+    public void addCounter(MetricsInfo metricsInfoImpl, long startTimeMillis) {
+      counters.add(new Pair<MetricsInfo, Long>(metricsInfoImpl, startTimeMillis));
+    }
+
+    /**
+     * @param metricsTag
+     */
+    public void add(MetricsTag metricsTag) {
+      tags.add(metricsTag);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
index 3d6eb9b..f3fc81d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -40,7 +40,7 @@ import com.google.common.base.Joiner;
 import com.google.common.primitives.Longs;
 
 /**
- * Read the traces written to phoenix tables by the {@link PhoenixTableMetricsWriter}.
+ * Read the traces written to phoenix tables by the {@link PhoenixMetricsSink}.
  */
 public class TraceReader {
 
@@ -54,8 +54,8 @@ public class TraceReader {
                 comma.join(MetricInfo.TRACE.columnName, MetricInfo.PARENT.columnName,
                     MetricInfo.SPAN.columnName, MetricInfo.DESCRIPTION.columnName,
                     MetricInfo.START.columnName, MetricInfo.END.columnName,
-                    MetricInfo.HOSTNAME.columnName, PhoenixTableMetricsWriter.TAG_COUNT,
-                    PhoenixTableMetricsWriter.ANNOTATION_COUNT);
+                    MetricInfo.HOSTNAME.columnName, PhoenixMetricsSink.TAG_COUNT,
+                        PhoenixMetricsSink.ANNOTATION_COUNT);
     }
 
     private Connection conn;
@@ -181,13 +181,13 @@ public class TraceReader {
     private Collection<? extends String> getTags(long traceid, long parent, long span, int count)
             throws SQLException {
         return getDynamicCountColumns(traceid, parent, span, count,
-            PhoenixTableMetricsWriter.TAG_FAMILY, MetricInfo.TAG.columnName);
+                PhoenixMetricsSink.TAG_FAMILY, MetricInfo.TAG.columnName);
     }
 
     private Collection<? extends String> getAnnotations(long traceid, long parent, long span,
             int count) throws SQLException {
         return getDynamicCountColumns(traceid, parent, span, count,
-            PhoenixTableMetricsWriter.ANNOTATION_FAMILY, MetricInfo.ANNOTATION.columnName);
+                PhoenixMetricsSink.ANNOTATION_FAMILY, MetricInfo.ANNOTATION.columnName);
     }
 
     private Collection<? extends String> getDynamicCountColumns(long traceid, long parent,
@@ -199,7 +199,7 @@ public class TraceReader {
         // build the column strings, family.column<index>
         String[] parts = new String[count];
         for (int i = 0; i < count; i++) {
-            parts[i] = PhoenixTableMetricsWriter.getDynamicColumnName(family, columnName, i);
+            parts[i] = PhoenixMetricsSink.getDynamicColumnName(family, columnName, i);
         }
         // join the columns together
         String columns = comma.join(parts);


[13/24] git commit: PHOENIX-1376 java.lang.NullPointerException occurs in JDBC driver

Posted by ja...@apache.org.
PHOENIX-1376 java.lang.NullPointerException occurs in JDBC driver


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b2c5ffa9
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b2c5ffa9
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b2c5ffa9

Branch: refs/heads/4.2
Commit: b2c5ffa9cfa4962da675df2e9a9480b9d9887c1a
Parents: f4d8bb0
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Oct 27 13:40:47 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Oct 27 13:40:47 2014 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java | 5 -----
 .../main/java/org/apache/phoenix/schema/tuple/ResultTuple.java  | 2 +-
 2 files changed, 1 insertion(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2c5ffa9/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
index e662a3f..8a6cf64 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixResultSet.java
@@ -1232,9 +1232,4 @@ public class PhoenixResultSet implements ResultSet, SQLCloseable, org.apache.pho
     public <T> T getObject(String columnLabel, Class<T> type) throws SQLException {
         return (T) getObject(columnLabel); // Just ignore type since we only support built-in types
     }
-
-    @Override
-    public String toString(){
-      return "ResultSet:\n"+ "\tclosed: "+this.isClosed+"\n\tcurrent row: "+currentRow;
-    }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2c5ffa9/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
index a7f411c..c28a2bf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
@@ -65,7 +65,7 @@ public class ResultTuple extends BaseTuple {
     public String toString() {
       StringBuilder sb = new StringBuilder();
       sb.append("keyvalues=");
-      if(this.result.isEmpty()) {
+      if(this.result == null || this.result.isEmpty()) {
         sb.append("NONE");
         return sb.toString();
       }


[21/24] git commit: Serialize table ts of when stats calculated

Posted by ja...@apache.org.
Serialize table ts of when stats calculated


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/346891be
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/346891be
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/346891be

Branch: refs/heads/4.2
Commit: 346891be76a5307b41912af408f8adeef1283ae0
Parents: b354228
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 28 13:16:12 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 28 13:17:01 2014 -0700

----------------------------------------------------------------------
 .../coprocessor/generated/PTableProtos.java     | 103 +++++++++++++++++--
 .../org/apache/phoenix/schema/PTableImpl.java   |   5 +-
 phoenix-protocol/src/main/PTable.proto          |   1 +
 3 files changed, 101 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/346891be/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
index f1b3be1..ff2cfe4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/PTableProtos.java
@@ -2722,6 +2722,16 @@ public final class PTableProtos {
      * <code>optional bytes indexType = 22;</code>
      */
     com.google.protobuf.ByteString getIndexType();
+
+    // optional int64 statsTimeStamp = 23;
+    /**
+     * <code>optional int64 statsTimeStamp = 23;</code>
+     */
+    boolean hasStatsTimeStamp();
+    /**
+     * <code>optional int64 statsTimeStamp = 23;</code>
+     */
+    long getStatsTimeStamp();
   }
   /**
    * Protobuf type {@code PTable}
@@ -2902,6 +2912,11 @@ public final class PTableProtos {
               indexType_ = input.readBytes();
               break;
             }
+            case 184: {
+              bitField0_ |= 0x00040000;
+              statsTimeStamp_ = input.readInt64();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -3400,6 +3415,22 @@ public final class PTableProtos {
       return indexType_;
     }
 
+    // optional int64 statsTimeStamp = 23;
+    public static final int STATSTIMESTAMP_FIELD_NUMBER = 23;
+    private long statsTimeStamp_;
+    /**
+     * <code>optional int64 statsTimeStamp = 23;</code>
+     */
+    public boolean hasStatsTimeStamp() {
+      return ((bitField0_ & 0x00040000) == 0x00040000);
+    }
+    /**
+     * <code>optional int64 statsTimeStamp = 23;</code>
+     */
+    public long getStatsTimeStamp() {
+      return statsTimeStamp_;
+    }
+
     private void initFields() {
       schemaNameBytes_ = com.google.protobuf.ByteString.EMPTY;
       tableNameBytes_ = com.google.protobuf.ByteString.EMPTY;
@@ -3423,6 +3454,7 @@ public final class PTableProtos {
       tenantId_ = com.google.protobuf.ByteString.EMPTY;
       viewIndexId_ = 0;
       indexType_ = com.google.protobuf.ByteString.EMPTY;
+      statsTimeStamp_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -3556,6 +3588,9 @@ public final class PTableProtos {
       if (((bitField0_ & 0x00020000) == 0x00020000)) {
         output.writeBytes(22, indexType_);
       }
+      if (((bitField0_ & 0x00040000) == 0x00040000)) {
+        output.writeInt64(23, statsTimeStamp_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -3658,6 +3693,10 @@ public final class PTableProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeBytesSize(22, indexType_);
       }
+      if (((bitField0_ & 0x00040000) == 0x00040000)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeInt64Size(23, statsTimeStamp_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -3779,6 +3818,11 @@ public final class PTableProtos {
         result = result && getIndexType()
             .equals(other.getIndexType());
       }
+      result = result && (hasStatsTimeStamp() == other.hasStatsTimeStamp());
+      if (hasStatsTimeStamp()) {
+        result = result && (getStatsTimeStamp()
+            == other.getStatsTimeStamp());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -3880,6 +3924,10 @@ public final class PTableProtos {
         hash = (37 * hash) + INDEXTYPE_FIELD_NUMBER;
         hash = (53 * hash) + getIndexType().hashCode();
       }
+      if (hasStatsTimeStamp()) {
+        hash = (37 * hash) + STATSTIMESTAMP_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getStatsTimeStamp());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -4048,6 +4096,8 @@ public final class PTableProtos {
         bitField0_ = (bitField0_ & ~0x00100000);
         indexType_ = com.google.protobuf.ByteString.EMPTY;
         bitField0_ = (bitField0_ & ~0x00200000);
+        statsTimeStamp_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00400000);
         return this;
       }
 
@@ -4180,6 +4230,10 @@ public final class PTableProtos {
           to_bitField0_ |= 0x00020000;
         }
         result.indexType_ = indexType_;
+        if (((from_bitField0_ & 0x00400000) == 0x00400000)) {
+          to_bitField0_ |= 0x00040000;
+        }
+        result.statsTimeStamp_ = statsTimeStamp_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -4340,6 +4394,9 @@ public final class PTableProtos {
         if (other.hasIndexType()) {
           setIndexType(other.getIndexType());
         }
+        if (other.hasStatsTimeStamp()) {
+          setStatsTimeStamp(other.getStatsTimeStamp());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -5878,6 +5935,39 @@ public final class PTableProtos {
         return this;
       }
 
+      // optional int64 statsTimeStamp = 23;
+      private long statsTimeStamp_ ;
+      /**
+       * <code>optional int64 statsTimeStamp = 23;</code>
+       */
+      public boolean hasStatsTimeStamp() {
+        return ((bitField0_ & 0x00400000) == 0x00400000);
+      }
+      /**
+       * <code>optional int64 statsTimeStamp = 23;</code>
+       */
+      public long getStatsTimeStamp() {
+        return statsTimeStamp_;
+      }
+      /**
+       * <code>optional int64 statsTimeStamp = 23;</code>
+       */
+      public Builder setStatsTimeStamp(long value) {
+        bitField0_ |= 0x00400000;
+        statsTimeStamp_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional int64 statsTimeStamp = 23;</code>
+       */
+      public Builder clearStatsTimeStamp() {
+        bitField0_ = (bitField0_ & ~0x00400000);
+        statsTimeStamp_ = 0L;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:PTable)
     }
 
@@ -5922,7 +6012,7 @@ public final class PTableProtos {
       "ed\030\013 \001(\010\"w\n\013PTableStats\022\013\n\003key\030\001 \002(\014\022\016\n\006" +
       "values\030\002 \003(\014\022\033\n\023guidePostsByteCount\030\003 \001(" +
       "\003\022\025\n\rkeyBytesCount\030\004 \001(\003\022\027\n\017guidePostsCo" +
-      "unt\030\005 \001(\005\"\212\004\n\006PTable\022\027\n\017schemaNameBytes\030",
+      "unt\030\005 \001(\005\"\242\004\n\006PTable\022\027\n\017schemaNameBytes\030",
       "\001 \002(\014\022\026\n\016tableNameBytes\030\002 \002(\014\022\036\n\ttableTy" +
       "pe\030\003 \002(\0162\013.PTableType\022\022\n\nindexState\030\004 \001(" +
       "\t\022\026\n\016sequenceNumber\030\005 \002(\003\022\021\n\ttimeStamp\030\006" +
@@ -5935,10 +6025,11 @@ public final class PTableProtos {
       "Tenant\030\020 \002(\010\022\020\n\010viewType\030\021 \001(\014\022\025\n\rviewSt",
       "atement\030\022 \001(\014\022\025\n\rphysicalNames\030\023 \003(\014\022\020\n\010" +
       "tenantId\030\024 \001(\014\022\023\n\013viewIndexId\030\025 \001(\005\022\021\n\ti" +
-      "ndexType\030\026 \001(\014*A\n\nPTableType\022\n\n\006SYSTEM\020\000" +
-      "\022\010\n\004USER\020\001\022\010\n\004VIEW\020\002\022\t\n\005INDEX\020\003\022\010\n\004JOIN\020" +
-      "\004B@\n(org.apache.phoenix.coprocessor.gene" +
-      "ratedB\014PTableProtosH\001\210\001\001\240\001\001"
+      "ndexType\030\026 \001(\014\022\026\n\016statsTimeStamp\030\027 \001(\003*A" +
+      "\n\nPTableType\022\n\n\006SYSTEM\020\000\022\010\n\004USER\020\001\022\010\n\004VI" +
+      "EW\020\002\022\t\n\005INDEX\020\003\022\010\n\004JOIN\020\004B@\n(org.apache." +
+      "phoenix.coprocessor.generatedB\014PTablePro" +
+      "tosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -5962,7 +6053,7 @@ public final class PTableProtos {
           internal_static_PTable_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_PTable_descriptor,
-              new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "GuidePosts", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", });
+              new java.lang.String[] { "SchemaNameBytes", "TableNameBytes", "TableType", "IndexState", "SequenceNumber", "TimeStamp", "PkNameBytes", "BucketNum", "Columns", "Indexes", "IsImmutableRows", "GuidePosts", "DataTableNameBytes", "DefaultFamilyName", "DisableWAL", "MultiTenant", "ViewType", "ViewStatement", "PhysicalNames", "TenantId", "ViewIndexId", "IndexType", "StatsTimeStamp", });
           return null;
         }
       };

http://git-wip-us.apache.org/repos/asf/phoenix/blob/346891be/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 8f85ccc..66cfa0b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -886,7 +886,7 @@ public class PTableImpl implements PTable {
             GuidePostsInfo info = new GuidePostsInfo(pTableStatsProto.getGuidePostsByteCount(), value);
             tableGuidePosts.put(pTableStatsProto.getKey().toByteArray(), info);
       }
-      PTableStats stats = new PTableStatsImpl(tableGuidePosts, timeStamp);
+      PTableStats stats = new PTableStatsImpl(tableGuidePosts, table.getStatsTimeStamp());
 
       PName dataTableName = null;
       if (table.hasDataTableNameBytes()) {
@@ -979,7 +979,8 @@ public class PTableImpl implements PTable {
          }
          statsBuilder.setGuidePostsByteCount(entry.getValue().getByteCount());
          builder.addGuidePosts(statsBuilder.build());
-       }
+      }
+      builder.setStatsTimeStamp(table.getTableStats().getTimestamp());
 
       if (table.getParentName() != null) {
         builder.setDataTableNameBytes(HBaseZeroCopyByteString.wrap(table.getParentTableName().getBytes()));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/346891be/phoenix-protocol/src/main/PTable.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/PTable.proto b/phoenix-protocol/src/main/PTable.proto
index b622a26..772d51d 100644
--- a/phoenix-protocol/src/main/PTable.proto
+++ b/phoenix-protocol/src/main/PTable.proto
@@ -77,4 +77,5 @@ message PTable {
   optional bytes tenantId = 20;
   optional int32 viewIndexId = 21;
   optional bytes indexType = 22;
+  optional int64 statsTimeStamp = 23;
 }


[23/24] git commit: Rename method for consistency

Posted by ja...@apache.org.
Rename method for consistency


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6477e0fd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6477e0fd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6477e0fd

Branch: refs/heads/4.2
Commit: 6477e0fd3de14a1f1c351ef2a576ff70b6c7ff5b
Parents: 668d6ee
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 28 15:58:01 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 28 15:58:01 2014 -0700

----------------------------------------------------------------------
 .../java/org/apache/phoenix/query/ConnectionQueryServices.java   | 2 +-
 .../org/apache/phoenix/query/ConnectionQueryServicesImpl.java    | 2 +-
 .../apache/phoenix/query/ConnectionlessQueryServicesImpl.java    | 2 +-
 .../apache/phoenix/query/DelegateConnectionQueryServices.java    | 4 ++--
 .../src/main/java/org/apache/phoenix/schema/MetaDataClient.java  | 2 +-
 5 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6477e0fd/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
index 8af9310..8826b48 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServices.java
@@ -104,7 +104,7 @@ public interface ConnectionQueryServices extends QueryServices, MetaDataMutated
     public boolean supportsFeature(Feature feature);
     
     public String getUserName();
-    public void incrementTableTimeStamp(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, long clientTS) throws SQLException;
+    public void clearTableFromCache(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, long clientTS) throws SQLException;
 
     public PTableStats getTableStats(byte[] physicalName, long clientTimeStamp) throws SQLException;
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6477e0fd/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 4f3a346..21208b5 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -1887,7 +1887,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
     
     @Override
-    public void incrementTableTimeStamp(final byte[] tenantId, final byte[] schemaName, final byte[] tableName,
+    public void clearTableFromCache(final byte[] tenantId, final byte[] schemaName, final byte[] tableName,
             final long clientTS) throws SQLException {
         // clear the meta data cache for the table here
         try {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6477e0fd/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
index 386050c..ec10d5f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionlessQueryServicesImpl.java
@@ -231,7 +231,7 @@ public class ConnectionlessQueryServicesImpl extends DelegateQueryServices imple
     }
     
     @Override
-    public void incrementTableTimeStamp(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
+    public void clearTableFromCache(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
             throws SQLException {}
     // TODO: share this with ConnectionQueryServicesImpl
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6477e0fd/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
index defad5b..ae0b689 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/DelegateConnectionQueryServices.java
@@ -229,9 +229,9 @@ public class DelegateConnectionQueryServices extends DelegateQueryServices imple
     }
     
     @Override
-    public void incrementTableTimeStamp(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
+    public void clearTableFromCache(byte[] tenantId, byte[] schemaName, byte[] tableName, long clientTS)
             throws SQLException {
-        getDelegate().incrementTableTimeStamp(tenantId, schemaName, tableName, clientTS);
+        getDelegate().clearTableFromCache(tenantId, schemaName, tableName, clientTS);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/6477e0fd/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index b763bbb..5892d14 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -640,7 +640,7 @@ public class MetaDataClient {
 
         // We need to update the stats table so that client will pull the new one with
         // the updated stats.
-        connection.getQueryServices().incrementTableTimeStamp(tenantIdBytes,
+        connection.getQueryServices().clearTableFromCache(tenantIdBytes,
                 Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())),
                 Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), clientTimeStamp);
         return rowCount;


[10/24] git commit: PHOENIX-897 Quote parameters in psql.py

Posted by ja...@apache.org.
PHOENIX-897 Quote parameters in psql.py

Properly quote supplied command-line parameters in psql.py so that
it's possible to supply any character (including ones that have
special meanings in various shells) as parameters.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f5a49bff
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f5a49bff
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f5a49bff

Branch: refs/heads/4.2
Commit: f5a49bff2c43686fc0381f5262de75f504e366ed
Parents: 7b57160
Author: Gabriel Reid <ga...@ngdata.com>
Authored: Mon Oct 27 10:57:43 2014 +0100
Committer: Gabriel Reid <ga...@ngdata.com>
Committed: Mon Oct 27 10:57:43 2014 +0100

----------------------------------------------------------------------
 bin/psql.py | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f5a49bff/bin/psql.py
----------------------------------------------------------------------
diff --git a/bin/psql.py b/bin/psql.py
index a8cbe31..34a95df 100755
--- a/bin/psql.py
+++ b/bin/psql.py
@@ -26,11 +26,17 @@ import phoenix_utils
 
 phoenix_utils.setPath()
 
+if os.name == 'nt':
+    args = subprocess.list2cmdline(sys.argv[1:])
+else:
+    import pipes    # pipes module isn't available on Windows
+    args = " ".join([pipes.quote(v) for v in sys.argv[1:]])
+
 # HBase configuration folder path (where hbase-site.xml reside) for
 # HBase/Phoenix client side property override
 java_cmd = 'java -cp "' + phoenix_utils.hbase_conf_path + os.pathsep + phoenix_utils.phoenix_client_jar + \
     '" -Dlog4j.configuration=file:' + \
     os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
-    " org.apache.phoenix.util.PhoenixRuntime " + ' '.join(sys.argv[1:])
+    " org.apache.phoenix.util.PhoenixRuntime " + args 
 
 subprocess.call(java_cmd, shell=True)


[17/24] git commit: PHOENIX-1170 Change status of local index during splitting to prevent usage when slower than query through data table (Rajeshbabu)

Posted by ja...@apache.org.
PHOENIX-1170 Change status of local index during splitting to prevent usage when slower than query through data table (Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4c0d00bd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4c0d00bd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4c0d00bd

Branch: refs/heads/4.2
Commit: 4c0d00bdd32e1853e929729f396dda567dc6faeb
Parents: b48ca7b
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Oct 27 14:29:07 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Oct 27 14:29:07 2014 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/QueryIT.java     |  20 ++-
 .../phoenix/end2end/index/LocalIndexIT.java     | 152 ++++++++++++++++---
 .../IndexHalfStoreFileReaderGenerator.java      |  63 ++++++++
 .../hbase/regionserver/LocalIndexSplitter.java  |  40 +++++
 .../java/org/apache/phoenix/query/BaseTest.java |   1 +
 5 files changed, 250 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c0d00bd/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
index cc431c1..f45b689 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
@@ -50,6 +50,7 @@ import java.sql.Timestamp;
 import java.util.Arrays;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -60,6 +61,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.SequenceNotFoundException;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
@@ -761,14 +763,16 @@ public class QueryIT extends BaseQueryIT {
             HTable htable = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(tableName);
             htable.clearRegionCache();
             int nRegions = htable.getRegionLocations().size();
-            admin.split(tableName, ByteUtil.concat(Bytes.toBytes(tenantId), Bytes.toBytes("00A" + Character.valueOf((char) ('3' + nextRunCount())) + ts))); // vary split point with test run
-            int retryCount = 0;
-            do {
-                Thread.sleep(2000);
-                retryCount++;
-                //htable.clearRegionCache();
-            } while (retryCount < 10 && htable.getRegionLocations().size() == nRegions);
-            assertNotEquals(nRegions, htable.getRegionLocations().size());
+            if(!admin.tableExists(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(ATABLE_NAME)))) {
+                admin.split(tableName, ByteUtil.concat(Bytes.toBytes(tenantId), Bytes.toBytes("00A" + Character.valueOf((char) ('3' + nextRunCount())) + ts))); // vary split point with test run
+                int retryCount = 0;
+                do {
+                    Thread.sleep(2000);
+                    retryCount++;
+                    //htable.clearRegionCache();
+                } while (retryCount < 10 && htable.getRegionLocations().size() == nRegions);
+                assertNotEquals(nRegions, htable.getRegionLocations().size());
+            } 
             
             statement.setString(1, tenantId);
             rs = statement.executeQuery();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c0d00bd/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 019e0fb..7fa69d4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -24,22 +24,31 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.CountDownLatch;
 
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.IndexHalfStoreFileReaderGenerator;
+import org.apache.hadoop.hbase.regionserver.LocalIndexSplitter;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.QueryPlan;
@@ -51,12 +60,15 @@ import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -65,6 +77,9 @@ import com.google.common.collect.Maps;
 
 public class LocalIndexIT extends BaseIndexIT {
 
+    private static CountDownLatch latch1 = new CountDownLatch(1);
+    private static CountDownLatch latch2 = new CountDownLatch(1);
+
     @BeforeClass 
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
@@ -651,25 +666,25 @@ public class LocalIndexIT extends BaseIndexIT {
             
             HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
             for (int i = 1; i < 5; i++) {
+                CatalogTracker ct = new CatalogTracker(admin.getConfiguration());
+                admin.split(Bytes.toBytes(DATA_TABLE_NAME), ByteUtil.concat(Bytes.toBytes(strings[3*i])));
+                List<HRegionInfo> regionsOfUserTable =
+                        MetaReader.getTableRegions(ct, TableName.valueOf(DATA_TABLE_NAME), false);
 
-              admin.split(Bytes.toBytes(DATA_TABLE_NAME), ByteUtil.concat(Bytes.toBytes(strings[3*i])));
-              List<HRegionInfo> regionsOfUserTable = admin.getTableRegions(TableName.valueOf(DATA_TABLE_NAME));
+                while (regionsOfUserTable.size() != (4+i)) {
+                    Thread.sleep(100);
+                    regionsOfUserTable = MetaReader.getTableRegions(ct, TableName.valueOf(DATA_TABLE_NAME), false);
+                }
+                assertEquals(4+i, regionsOfUserTable.size());
+                TableName indexTable =
+                        TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME));
+                List<HRegionInfo> regionsOfIndexTable =
+                        MetaReader.getTableRegions(ct, indexTable, false);
 
-              while (regionsOfUserTable.size() != (4+i)) {
-                Thread.sleep(100);
-                regionsOfUserTable = admin.getTableRegions(TableName.valueOf(DATA_TABLE_NAME)); 
-              }
-              assertEquals(4+i, regionsOfUserTable.size());
-              List<HRegionInfo> regionsOfIndexTable =
-                  admin.getTableRegions(TableName.valueOf(MetaDataUtil
-                    .getLocalIndexTableName(DATA_TABLE_NAME))); 
-
-              while (regionsOfIndexTable.size() != (4+i)) {
-                Thread.sleep(100);
-                regionsOfIndexTable =
-                    admin.getTableRegions(TableName.valueOf(MetaDataUtil
-                      .getLocalIndexTableName(DATA_TABLE_NAME)));
-              }
+                while (regionsOfIndexTable.size() != (4 + i)) {
+                    Thread.sleep(100);
+                    regionsOfIndexTable = MetaReader.getTableRegions(ct, indexTable, false);
+                }
                 assertEquals(4 + i, regionsOfIndexTable.size());
                 String query = "SELECT t_id,k1,v1 FROM " + DATA_TABLE_NAME;
                 rs = conn1.createStatement().executeQuery(query);
@@ -707,4 +722,105 @@ public class LocalIndexIT extends BaseIndexIT {
             conn1.close();
         }
     }
+
+    @Test
+    public void testLocalIndexStateWhenSplittingInProgress() throws Exception {
+        createBaseTable(DATA_TABLE_NAME+"2", null, "('e','j','o')");
+        Connection conn1 = DriverManager.getConnection(getUrl());
+        try{
+            String[] strings = {"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
+            for (int i = 0; i < 26; i++) {
+                conn1.createStatement().execute(
+                    "UPSERT INTO " + DATA_TABLE_NAME+"2" + " values('"+strings[i]+"'," + i + ","
+                            + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
+            }
+            conn1.commit();
+            conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_NAME+"2" + "(v1)");
+            conn1.createStatement().execute("CREATE LOCAL INDEX " + INDEX_TABLE_NAME + "_2 ON " + DATA_TABLE_NAME+"2" + "(k3)");
+
+            ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + DATA_TABLE_NAME+"2");
+            assertTrue(rs.next());
+            HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+            HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(DATA_TABLE_NAME+"2"));
+            tableDesc.removeCoprocessor(LocalIndexSplitter.class.getName());
+            tableDesc.addCoprocessor(MockedLocalIndexSplitter.class.getName(), null,
+                1, null);
+            admin.disableTable(tableDesc.getTableName());
+            admin.modifyTable(tableDesc.getTableName(), tableDesc);
+            admin.enableTable(tableDesc.getTableName());
+            TableName indexTable =
+                    TableName.valueOf(MetaDataUtil.getLocalIndexTableName(DATA_TABLE_NAME+"2"));
+            HTableDescriptor indexTableDesc = admin.getTableDescriptor(indexTable);
+            indexTableDesc.removeCoprocessor(IndexHalfStoreFileReaderGenerator.class.getName());
+            indexTableDesc.addCoprocessor(MockedIndexHalfStoreFileReaderGenerator.class.getName(), null,
+                1, null);
+            admin.disableTable(indexTable);
+            admin.modifyTable(indexTable, indexTableDesc);
+            admin.enableTable(indexTable);
+
+            admin.split(Bytes.toBytes(DATA_TABLE_NAME+"2"), ByteUtil.concat(Bytes.toBytes(strings[3])));
+            List<HRegionInfo> regionsOfUserTable =
+                    admin.getTableRegions(TableName.valueOf(DATA_TABLE_NAME+"2"));
+
+            while (regionsOfUserTable.size() != 5) {
+                Thread.sleep(100);
+                regionsOfUserTable = admin.getTableRegions(TableName.valueOf(DATA_TABLE_NAME+"2"));
+            }
+            assertEquals(5, regionsOfUserTable.size());
+
+            List<HRegionInfo> regionsOfIndexTable = admin.getTableRegions(indexTable);
+
+            while (regionsOfIndexTable.size() != 5) {
+                Thread.sleep(100);
+                regionsOfIndexTable = admin.getTableRegions(indexTable);
+            }
+
+            assertEquals(5, regionsOfIndexTable.size());
+            latch1.await();
+            // Verify the metadata for index is correct.
+            rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME,
+                    new String[] { PTableType.INDEX.toString() });
+            assertTrue(rs.next());
+            assertEquals(INDEX_TABLE_NAME, rs.getString(3));
+            assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE"));
+            assertFalse(rs.next());
+            rs = conn1.getMetaData().getTables(null, StringUtil.escapeLike(SCHEMA_NAME), INDEX_TABLE_NAME+"_2",
+                new String[] { PTableType.INDEX.toString() });
+            assertTrue(rs.next());
+            assertEquals(INDEX_TABLE_NAME+"_2", rs.getString(3));
+            assertEquals(PIndexState.INACTIVE.toString(), rs.getString("INDEX_STATE"));
+            assertFalse(rs.next());
+
+            String query = "SELECT t_id,k1,v1 FROM " + DATA_TABLE_NAME+"2";
+            rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
+            assertEquals("CLIENT PARALLEL " + 1 + "-WAY FULL SCAN OVER " + DATA_TABLE_NAME+"2",
+                QueryUtil.getExplainPlan(rs));
+            latch2.countDown();
+       } finally {
+            conn1.close();
+            latch1.countDown();
+            latch2.countDown();
+        }
+    }
+
+    public static class MockedIndexHalfStoreFileReaderGenerator extends IndexHalfStoreFileReaderGenerator {
+        @Override
+        public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
+                StoreFile resultFile) throws IOException {
+            try {
+                latch2.await();
+            } catch (InterruptedException e1) {
+            }
+            super.postCompact(e, store, resultFile);
+        }
+    }
+
+    public static class MockedLocalIndexSplitter extends LocalIndexSplitter {
+        @Override
+        public void preSplitAfterPONR(ObserverContext<RegionCoprocessorEnvironment> ctx)
+                throws IOException {
+            super.preSplitAfterPONR(ctx);
+            latch1.countDown();
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c0d00bd/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index b04227f..2fbea22 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
 import java.sql.SQLException;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -43,9 +44,14 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.parse.AlterIndexStatement;
+import org.apache.phoenix.parse.ParseNodeFactory;
+import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.PTable.IndexType;
@@ -57,6 +63,10 @@ import org.apache.phoenix.util.QueryUtil;
 
 public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
     
+    int storeFilesCount = 0;
+    int compactedFilesCount = 0;
+    private static final ParseNodeFactory FACTORY = new ParseNodeFactory();
+
     @Override
     public Reader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx,
             FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf,
@@ -126,6 +136,59 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
         return reader;
     }
     
+    @Override
+    public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
+            Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
+            long earliestPutTs, InternalScanner s, CompactionRequest request) throws IOException {
+        InternalScanner internalScanner = super.preCompactScannerOpen(c, store, scanners, scanType, earliestPutTs, s, request);
+        Collection<StoreFile> files = request.getFiles();
+        storeFilesCount = 0;
+        compactedFilesCount = 0;
+        for(StoreFile file:files) {
+            if(!file.isReference()) {
+                return internalScanner;
+            }
+        }
+        storeFilesCount = files.size();
+        return internalScanner;
+    }
+
+    @Override
+    public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
+            StoreFile resultFile) throws IOException {
+        super.postCompact(e, store, resultFile);
+        if(storeFilesCount > 0) compactedFilesCount++;
+        if(compactedFilesCount == storeFilesCount) {
+            PhoenixConnection conn = null;
+            try {
+                conn = QueryUtil.getConnection(e.getEnvironment().getConfiguration()).unwrap(
+                    PhoenixConnection.class);
+                MetaDataClient client = new MetaDataClient(conn);
+                String userTableName = MetaDataUtil.getUserTableName(e.getEnvironment().getRegion().getTableDesc().getNameAsString());
+                PTable dataTable = PhoenixRuntime.getTable(conn, userTableName);
+                List<PTable> indexes = dataTable.getIndexes();
+                for (PTable index : indexes) {
+                    if (index.getIndexType() == IndexType.LOCAL) {
+                        AlterIndexStatement indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null,
+                            org.apache.phoenix.parse.TableName.create(index.getSchemaName().getString(), index.getTableName().getString())),
+                            dataTable.getTableName().getString(), false, PIndexState.ACTIVE);
+                        client.alterIndex(indexStatement);
+                    }
+                }
+                conn.commit();
+            } catch (ClassNotFoundException ex) {
+            } catch (SQLException ex) {
+            } finally {
+                if (conn != null) {
+                    try {
+                        conn.close();
+                    } catch (SQLException ex) {
+                    }
+                }
+            }
+        }
+    }
+
     private byte[][] getViewConstants(PTable dataTable) {
         int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0);
         byte[][] viewConstants = null;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c0d00bd/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
index acdb353..9ec5d01 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
+import java.sql.SQLException;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -35,9 +36,19 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.regionserver.IndexSplitTransaction;
 import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.parse.AlterIndexStatement;
+import org.apache.phoenix.parse.ParseNodeFactory;
+import org.apache.phoenix.parse.TableName;
+import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.PDataType;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
 public class LocalIndexSplitter extends BaseRegionObserver {
@@ -46,6 +57,7 @@ public class LocalIndexSplitter extends BaseRegionObserver {
 
     private IndexSplitTransaction st = null;
     private PairOfSameType<HRegion> daughterRegions = null;
+    private static final ParseNodeFactory FACTORY = new ParseNodeFactory();
 
     @Override
     public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
@@ -104,6 +116,34 @@ public class LocalIndexSplitter extends BaseRegionObserver {
             throws IOException {
         if (st == null || daughterRegions == null) return;
         RegionCoprocessorEnvironment environment = ctx.getEnvironment();
+        PhoenixConnection conn = null;
+        try {
+            conn = QueryUtil.getConnection(ctx.getEnvironment().getConfiguration()).unwrap(
+                PhoenixConnection.class);
+            MetaDataClient client = new MetaDataClient(conn);
+            String userTableName = ctx.getEnvironment().getRegion().getTableDesc().getNameAsString();
+            PTable dataTable = PhoenixRuntime.getTable(conn, userTableName);
+            List<PTable> indexes = dataTable.getIndexes();
+            for (PTable index : indexes) {
+                if (index.getIndexType() == IndexType.LOCAL) {
+                    AlterIndexStatement indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null,
+                        TableName.create(index.getSchemaName().getString(), index.getTableName().getString())),
+                        dataTable.getTableName().getString(), false, PIndexState.INACTIVE);
+                    client.alterIndex(indexStatement);
+                }
+            }
+            conn.commit();
+        } catch (ClassNotFoundException ex) {
+        } catch (SQLException ex) {
+        } finally {
+            if (conn != null) {
+                try {
+                    conn.close();
+                } catch (SQLException ex) {
+                }
+            }
+        }
+
         HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
         st.stepsAfterPONR(rs, rs, daughterRegions);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4c0d00bd/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index e0b0a96..449abd6 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -634,6 +634,7 @@ public abstract class BaseTest {
         conf.setInt("hbase.hlog.asyncer.number", 2);
         conf.setInt("hbase.assignment.zkevent.workers", 5);
         conf.setInt("hbase.assignment.threads.max", 5);
+        conf.setInt("hbase.catalogjanitor.interval", 5000);
         return conf;
     }
 


[22/24] git commit: PHOENIX-1391 Remove obsolete hint

Posted by ja...@apache.org.
PHOENIX-1391 Remove obsolete hint


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/668d6ee4
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/668d6ee4
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/668d6ee4

Branch: refs/heads/4.2
Commit: 668d6ee48ccd4c616966a173ad00393694ff1b50
Parents: 346891b
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 28 14:08:08 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 28 14:08:08 2014 -0700

----------------------------------------------------------------------
 .../end2end/SkipScanAfterManualSplitIT.java        |  2 +-
 .../org/apache/phoenix/execute/BaseQueryPlan.java  |  6 ++++++
 .../org/apache/phoenix/iterate/ExplainTable.java   | 17 +++++++++++++++--
 .../apache/phoenix/iterate/ParallelIterators.java  |  2 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java      | 15 +++++++--------
 .../java/org/apache/phoenix/parse/HintNode.java    |  9 ++++-----
 .../phoenix/query/BaseConnectionlessQueryTest.java |  2 ++
 .../org/apache/phoenix/query/QueryPlanTest.java    |  8 ++++++++
 8 files changed, 44 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/668d6ee4/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
index 22b33f8..0478816 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanAfterManualSplitIT.java
@@ -128,7 +128,7 @@ public class SkipScanAfterManualSplitIT extends BaseHBaseManagedTimeIT {
             assertEquals(nRegions, nInitialRegions);
             
             int nRows = 2;
-            String query = "SELECT /*+ NO_INTRA_REGION_PARALLELIZATION */ count(*) FROM S WHERE a IN ('tl','jt',' a',' b',' c',' d')";
+            String query = "SELECT count(*) FROM S WHERE a IN ('tl','jt',' a',' b',' c',' d')";
             ResultSet rs1 = conn.createStatement().executeQuery(query);
             assertTrue(rs1.next());
             nRegions = services.getAllTableRegions(TABLE_NAME_BYTES).size();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/668d6ee4/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 03c643d..d91ad51 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -48,6 +48,7 @@ import org.apache.phoenix.iterate.ParallelIterators.ParallelIteratorFactory;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.parse.FilterableStatement;
+import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.parse.ParseNodeFactory;
 import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.schema.KeyValueSchema;
@@ -165,6 +166,11 @@ public abstract class BaseQueryPlan implements QueryPlan {
         if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
             ScanUtil.setReversed(scan);
         }
+        
+        if (statement.getHint().hasHint(Hint.SMALL)) {
+            scan.setSmall(true);
+        }
+        
         // Set producer on scan so HBase server does round robin processing
         //setProducer(scan);
         // Set the time range on the scan so we don't get back rows newer than when the statement was compiled

http://git-wip-us.apache.org/repos/asf/phoenix/blob/668d6ee4/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index 40a0cff..8c04383 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -31,9 +31,12 @@ import org.apache.hadoop.hbase.filter.PageFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
+import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+import org.apache.phoenix.parse.HintNode;
+import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.KeyRange.Bound;
 import org.apache.phoenix.schema.PDataType;
@@ -50,15 +53,19 @@ public abstract class ExplainTable {
     protected final StatementContext context;
     protected final TableRef tableRef;
     protected final GroupBy groupBy;
+    protected final OrderBy orderBy;
+    protected final HintNode hint;
    
     public ExplainTable(StatementContext context, TableRef table) {
-        this(context,table,GroupBy.EMPTY_GROUP_BY);
+        this(context,table,GroupBy.EMPTY_GROUP_BY, OrderBy.EMPTY_ORDER_BY, HintNode.EMPTY_HINT_NODE);
     }
 
-    public ExplainTable(StatementContext context, TableRef table, GroupBy groupBy) {
+    public ExplainTable(StatementContext context, TableRef table, GroupBy groupBy, OrderBy orderBy, HintNode hintNode) {
         this.context = context;
         this.tableRef = table;
         this.groupBy = groupBy;
+        this.orderBy = orderBy;
+        this.hint = hintNode;
     }
 
     private boolean explainSkipScan(StringBuilder buf) {
@@ -90,6 +97,12 @@ public abstract class ExplainTable {
         StringBuilder buf = new StringBuilder(prefix);
         ScanRanges scanRanges = context.getScanRanges();
         boolean hasSkipScanFilter = false;
+        if (hint.hasHint(Hint.SMALL)) {
+            buf.append("SMALL ");
+        }
+        if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
+            buf.append("REVERSE ");
+        }
         if (scanRanges.isEverything()) {
             buf.append("FULL SCAN ");
         } else {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/668d6ee4/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 8c33954..4f8b6a6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -134,7 +134,7 @@ public class ParallelIterators extends ExplainTable implements ResultIterators {
     
     public ParallelIterators(QueryPlan plan, Integer perScanLimit, ParallelIteratorFactory iteratorFactory)
             throws SQLException {
-        super(plan.getContext(), plan.getTableRef(), plan.getGroupBy());
+        super(plan.getContext(), plan.getTableRef(), plan.getGroupBy(), plan.getOrderBy(), plan.getStatement().getHint());
         this.plan = plan;
         StatementContext context = plan.getContext();
         TableRef tableRef = plan.getTableRef();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/668d6ee4/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
index 8edc7ae..0a39104 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDatabaseMetaData.java
@@ -51,7 +51,6 @@ import org.apache.phoenix.hbase.index.util.VersionUtil;
 import org.apache.phoenix.iterate.DelegateResultIterator;
 import org.apache.phoenix.iterate.MaterializedResultIterator;
 import org.apache.phoenix.iterate.ResultIterator;
-import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PDatum;
@@ -336,7 +335,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
 
     @Override
     public ResultSet getCatalogs() throws SQLException {
-        StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n" +
+        StringBuilder buf = new StringBuilder("select \n" +
                 " DISTINCT " + TENANT_ID + " " + TABLE_CAT +
                 " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
                 " where " + COLUMN_NAME + " is null" +
@@ -392,7 +391,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     @Override
     public ResultSet getColumns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
             throws SQLException {
-        StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n " +
+        StringBuilder buf = new StringBuilder("select \n " +
                 TENANT_ID + " " + TABLE_CAT + "," + // use this for tenant id
                 TABLE_SCHEM + "," +
                 TABLE_NAME + " ," +
@@ -650,7 +649,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
         if (unique) { // No unique indexes
             return emptyResultSet;
         }
-        StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n" +
+        StringBuilder buf = new StringBuilder("select \n" +
                 TENANT_ID + " " + TABLE_CAT + ",\n" + // use this column for column family name
                 TABLE_SCHEM + ",\n" +
                 DATA_TABLE_NAME + " " + TABLE_NAME + ",\n" +
@@ -803,7 +802,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
         if (table == null || table.length() == 0) {
             return emptyResultSet;
         }
-        StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n" +
+        StringBuilder buf = new StringBuilder("select \n" +
                 TENANT_ID + " " + TABLE_CAT + "," + // use catalog for tenant_id
                 TABLE_SCHEM + "," +
                 TABLE_NAME + " ," +
@@ -877,7 +876,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
 
     @Override
     public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
-        StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n distinct " +
+        StringBuilder buf = new StringBuilder("select distinct \n" +
                 TENANT_ID + " " + TABLE_CATALOG + "," + // no catalog for tables
                 TABLE_SCHEM +
                 " from " + SYSTEM_CATALOG + " " + SYSTEM_CATALOG_ALIAS +
@@ -902,7 +901,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
 
     @Override
     public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) throws SQLException {
-        StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n" +
+        StringBuilder buf = new StringBuilder("select \n" +
                 TENANT_ID + " " + TABLE_CAT + "," + // Use tenantId for catalog
                 TABLE_SCHEM + "," +
                 TABLE_NAME + "," +
@@ -989,7 +988,7 @@ public class PhoenixDatabaseMetaData implements DatabaseMetaData, org.apache.pho
     @Override
     public ResultSet getTables(String catalog, String schemaPattern, String tableNamePattern, String[] types)
             throws SQLException {
-        StringBuilder buf = new StringBuilder("select /*+" + Hint.NO_INTRA_REGION_PARALLELIZATION + "*/\n" +
+        StringBuilder buf = new StringBuilder("select \n" +
                 TENANT_ID + " " + TABLE_CAT + "," + // tenant_id is the catalog
                 TABLE_SCHEM + "," +
                 TABLE_NAME + " ," +

http://git-wip-us.apache.org/repos/asf/phoenix/blob/668d6ee4/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
index 0ded0b5..ea20114 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/HintNode.java
@@ -55,11 +55,6 @@ public class HintNode {
          */
         SKIP_SCAN_HASH_JOIN,
         /**
-         * Prevents the spawning of multiple threads during
-         * query processing.
-         */
-        NO_INTRA_REGION_PARALLELIZATION,
-        /**
         * Prevents the usage of indexes, forcing usage
         * of the data table for a query.
         */
@@ -100,6 +95,10 @@ public class HintNode {
         * between 2 selected columns this will be give better performance.
         */
       NO_SEEK_TO_COLUMN,
+      /**
+       * Saves an RPC call on the scan. See Scan.setSmall(true) in HBase documentation.
+       */
+     SMALL,
     };
 
     private final Map<Hint,String> hints;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/668d6ee4/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
index 8ac322f..8f17a7c 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseConnectionlessQueryTest.java
@@ -27,6 +27,7 @@ import static org.apache.phoenix.util.TestUtil.JOIN_ORDER_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.JOIN_SUPPLIER_TABLE_FULL_NAME;
 import static org.apache.phoenix.util.TestUtil.MULTI_CF_NAME;
 import static org.apache.phoenix.util.TestUtil.PHOENIX_CONNECTIONLESS_JDBC_URL;
+import static org.apache.phoenix.util.TestUtil.PTSDB2_NAME;
 import static org.apache.phoenix.util.TestUtil.PTSDB3_NAME;
 import static org.apache.phoenix.util.TestUtil.PTSDB_NAME;
 import static org.apache.phoenix.util.TestUtil.TABLE_WITH_ARRAY;
@@ -104,6 +105,7 @@ public class BaseConnectionlessQueryTest extends BaseTest {
         ensureTableCreated(getUrl(), ENTITY_HISTORY_TABLE_NAME);
         ensureTableCreated(getUrl(), FUNKY_NAME);
         ensureTableCreated(getUrl(), PTSDB_NAME);
+        ensureTableCreated(getUrl(), PTSDB2_NAME);
         ensureTableCreated(getUrl(), PTSDB3_NAME);
         ensureTableCreated(getUrl(), MULTI_CF_NAME);
         ensureTableCreated(getUrl(), JOIN_ORDER_TABLE_FULL_NAME);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/668d6ee4/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
index fd22e47..1e3df0b 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryPlanTest.java
@@ -41,6 +41,14 @@ public class QueryPlanTest extends BaseConnectionlessQueryTest {
                 "CLIENT PARALLEL 1-WAY SKIP SCAN ON 3 KEYS OVER PTSDB3 [~'na3'] - [~'na1']\n" + 
                 "    SERVER FILTER BY FIRST KEY ONLY",
 
+                "SELECT /*+ SMALL*/ host FROM PTSDB3 WHERE host IN ('na1', 'na2','na3')",
+                "CLIENT PARALLEL 1-WAY SMALL SKIP SCAN ON 3 KEYS OVER PTSDB3 [~'na3'] - [~'na1']\n" + 
+                "    SERVER FILTER BY FIRST KEY ONLY",
+
+                "SELECT inst,date FROM PTSDB2 WHERE inst = 'na1' ORDER BY inst DESC, date DESC",
+                "CLIENT PARALLEL 1-WAY REVERSE RANGE SCAN OVER PTSDB2 ['na1']\n" +
+                "    SERVER FILTER BY FIRST KEY ONLY",
+
                 "SELECT host FROM PTSDB WHERE inst IS NULL AND host IS NOT NULL AND date >= to_date('2013-01-01')",
                 "CLIENT PARALLEL 1-WAY RANGE SCAN OVER PTSDB [null,not null]\n" + 
                 "    SERVER FILTER BY FIRST KEY ONLY AND DATE >= '2013-01-01 00:00:00.000'",


[14/24] PHOENIX-1286 Remove hadoop2 compat modules

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java b/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java
deleted file mode 100644
index 3258e8a..0000000
--- a/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/TraceMetricsSourceTest.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.metrics2.MetricsCollector;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.metrics2.MetricsTag;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.cloudera.htrace.Span;
-import org.cloudera.htrace.impl.MilliSpan;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Test that the @{link TraceMetricSource} correctly handles different kinds of traces
- */
-public class TraceMetricsSourceTest {
-
-  @BeforeClass
-  public static void setup() throws Exception{
-    DefaultMetricsSystem.setMiniClusterMode(true);
-  }
-
-  /**
-   * For PHOENIX-1126, Phoenix originally assumed all the annotation values were integers,
-   * but HBase writes some strings as well, so we need to be able to handle that too
-   */
-  @Test
-  public void testNonIntegerAnnotations(){
-    Span span = getSpan();
-    // make sure its less than the length of an integer
-    byte[] value = Bytes.toBytes("a");
-    byte[] someInt = Bytes.toBytes(1);
-    assertTrue(someInt.length >value.length);
-
-    // an annotation that is not an integer
-    span.addKVAnnotation(Bytes.toBytes("key"), value);
-
-    // Create the sink and write the span
-    TraceMetricSource source = new TraceMetricSource();
-    source.receiveSpan(span);
-  }
-
-  @Test
-  public void testIntegerAnnotations(){
-    Span span = getSpan();
-
-    // add annotation through the phoenix interfaces
-    TracingCompat.addAnnotation(span, "message", 10);
-
-    TraceMetricSource source = new TraceMetricSource();
-    source.receiveSpan(span);
-  }
-
-  /**
-   * If the source does not write any metrics when there are no spans, i.e. when initialized,
-   * then the metrics system will discard the source, so it needs to always emit some metrics.
-   */
-  @Test
-  public void testWritesInfoWhenNoSpans(){
-    TraceMetricSource source = new TraceMetricSource();
-    MetricsCollector collector = Mockito.mock(MetricsCollector.class);
-    MetricsRecordBuilder builder = Mockito.mock(MetricsRecordBuilder.class);
-    Mockito.when(collector.addRecord(Mockito.anyString())).thenReturn(builder);
-
-    source.getMetrics(collector, true);
-
-    // verify that we add a record and that the record has some info
-    Mockito.verify(collector).addRecord(Mockito.anyString());
-    Mockito.verify(builder).add(Mockito.any(MetricsTag.class));
-  }
-
-  private Span getSpan(){
-    return new MilliSpan("test span", 0, 1 , 2, "pid");
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/TracingTest.java
----------------------------------------------------------------------
diff --git a/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/TracingTest.java b/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/TracingTest.java
deleted file mode 100644
index ffe6c82..0000000
--- a/phoenix-hadoop2-compat/src/test/java/org/apache/phoenix/trace/TracingTest.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.trace;
-
-import static org.junit.Assert.assertNotNull;
-
-import org.junit.Test;
-
-public class TracingTest {
-
-    /**
-     * Test that we can correctly load a class that will convert the tracing output to metrics
-     * @throws Exception on failure
-     */
-    @Test
-    public void testLoadTracingToMetrics() throws Exception{
-        assertNotNull("Didn't find a trace receiver", TracingCompat.newTraceMetricSource());
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b48ca7b5/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 10ef778..3346ccb 100644
--- a/pom.xml
+++ b/pom.xml
@@ -24,12 +24,9 @@
 
   <modules>
     <module>phoenix-core</module>
-    <module>phoenix-hadoop-compat</module>
     <module>phoenix-flume</module>
     <module>phoenix-pig</module>
     <module>phoenix-assembly</module>
-    <!--Temporary inclusion - to be removed in next patch, for ease of review -->
-    <module>phoenix-hadoop2-compat</module>
   </modules>
 
   <repositories>
@@ -411,25 +408,6 @@
       </dependency>
       <dependency>
         <groupId>org.apache.phoenix</groupId>
-        <artifactId>phoenix-hadoop-compat</artifactId>
-        <version>${project.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.phoenix</groupId>
-        <artifactId>phoenix-hadoop-compat</artifactId>
-        <version>${project.version}</version>
-        <classifier>tests</classifier>
-        <scope>test</scope>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.phoenix</groupId>
-        <artifactId>phoenix-hadoop2-compat</artifactId>
-        <version>${project.version}</version>
-        <classifier>tests</classifier>
-        <scope>test</scope>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.phoenix</groupId>
         <artifactId>phoenix-flume</artifactId>
         <version>${project.version}</version>
       </dependency>
@@ -438,11 +416,6 @@
         <artifactId>phoenix-pig</artifactId>
         <version>${project.version}</version>
       </dependency>
-      <dependency>
-        <groupId>org.apache.phoenix</groupId>
-        <artifactId>phoenix-hadoop2-compat</artifactId>
-        <version>${project.version}</version>
-      </dependency>
 
       <!-- HBase dependencies -->
       <dependency>


[18/24] git commit: Remove unused imports and local variables

Posted by ja...@apache.org.
Remove unused imports and local variables


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/851f57a6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/851f57a6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/851f57a6

Branch: refs/heads/4.2
Commit: 851f57a6da1c91b02711d8974568206fb88ed49a
Parents: 4c0d00b
Author: James Taylor <jt...@salesforce.com>
Authored: Tue Oct 28 12:43:48 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Tue Oct 28 12:43:48 2014 -0700

----------------------------------------------------------------------
 .../apache/phoenix/trace/BaseTracingTestIT.java | 24 ++++++++++----------
 .../apache/phoenix/trace/PhoenixTagImpl.java    |  9 --------
 .../org/apache/phoenix/trace/TracingUtils.java  |  5 ----
 3 files changed, 12 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/851f57a6/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
index f504d12..99c8bc3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/BaseTracingTestIT.java
@@ -17,8 +17,18 @@
  */
 package org.apache.phoenix.trace;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import static org.apache.phoenix.util.PhoenixRuntime.ANNOTATION_ATTRIB_PREFIX;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsRecord;
@@ -38,22 +48,12 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Before;
 import org.junit.experimental.categories.Category;
 
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.util.*;
-
-import static org.apache.phoenix.util.PhoenixRuntime.ANNOTATION_ATTRIB_PREFIX;
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-
 /**
  * Base test for tracing tests - helps manage getting tracing/non-tracing
  * connections, as well as any supporting utils.
  */
 @Category(HBaseManagedTimeTest.class)
 public class BaseTracingTestIT extends BaseHBaseManagedTimeIT {
-    private static final Log LOG = LogFactory.getLog(BaseTracingTestIT.class);
-
     @Before
     public void resetTracingTableIfExists() throws Exception {
         Connection conn = getConnectionWithoutTracing();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/851f57a6/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java
index a911a2c..0d2def3 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTagImpl.java
@@ -17,22 +17,13 @@
  */
 package org.apache.phoenix.trace;
 
-import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsTag;
 
 /**
  * Simple Tag implementation for testing
  */
 public class PhoenixTagImpl extends MetricsTag {
-
-    private final String name;
-    private final String description;
-    private final String value;
-
     public PhoenixTagImpl(String name, String description, String value) {
         super(new MetricsInfoImpl(name, description), value);
-        this.name = name;
-        this.description = description;
-        this.value = value;
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/851f57a6/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
index 6ae52d8..cee3b95 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TracingUtils.java
@@ -17,8 +17,6 @@
  */
 package org.apache.phoenix.trace;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.cloudera.htrace.Span;
@@ -27,9 +25,6 @@ import org.cloudera.htrace.Span;
  * Utilities for tracing
  */
 public class TracingUtils {
-
-    private static final Log LOG = LogFactory.getLog(TracingUtils.class);
-
     public static final String METRIC_SOURCE_KEY = "phoenix.";
 
     /** Set context to enable filtering */


[05/24] git commit: PHOENIX-1382: Phoenix 4.2 RC Issue

Posted by ja...@apache.org.
PHOENIX-1382: Phoenix 4.2 RC Issue


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/00522cfd
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/00522cfd
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/00522cfd

Branch: refs/heads/4.2
Commit: 00522cfd3fee98336be442ae9c4696a8c657ddc2
Parents: 553fb4b
Author: Jeffrey Zhong <je...@apache.org>
Authored: Sun Oct 26 22:07:13 2014 -0700
Committer: Jeffrey Zhong <je...@apache.org>
Committed: Sun Oct 26 22:07:13 2014 -0700

----------------------------------------------------------------------
 bin/phoenix_utils.py                                               | 2 +-
 .../main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/00522cfd/bin/phoenix_utils.py
----------------------------------------------------------------------
diff --git a/bin/phoenix_utils.py b/bin/phoenix_utils.py
index 4f7d9c3..2331ae9 100755
--- a/bin/phoenix_utils.py
+++ b/bin/phoenix_utils.py
@@ -49,7 +49,7 @@ def findFileInPathWithoutRecursion(pattern, path):
     return ""
 
 def setPath():
- PHOENIX_CLIENT_JAR_PATTERN = "phoenix-*-client*.jar"
+ PHOENIX_CLIENT_JAR_PATTERN = "phoenix-*-client.jar"
  PHOENIX_TESTS_JAR_PATTERN = "phoenix-*-tests*.jar"
  global current_dir
  current_dir = os.path.dirname(os.path.abspath(__file__))

http://git-wip-us.apache.org/repos/asf/phoenix/blob/00522cfd/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
index ac1287a..662bed3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataProtocol.java
@@ -52,7 +52,7 @@ import com.google.protobuf.HBaseZeroCopyByteString;
  */
 public abstract class MetaDataProtocol extends MetaDataService {
     public static final int PHOENIX_MAJOR_VERSION = 4;
-    public static final int PHOENIX_MINOR_VERSION = 1;
+    public static final int PHOENIX_MINOR_VERSION = 2;
     public static final int PHOENIX_PATCH_NUMBER = 0;
     public static final int PHOENIX_VERSION = 
             VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER);