You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ec...@apache.org on 2013/08/15 01:28:22 UTC

svn commit: r1514092 - in /hbase/branches/0.95: ./ hbase-client/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ hbase-it/ ...

Author: eclark
Date: Wed Aug 14 23:28:21 2013
New Revision: 1514092

URL: http://svn.apache.org/r1514092
Log:
HBASE-9121 Update HTrace to 2.00 and add new example usage.

Modified:
    hbase/branches/0.95/hbase-client/pom.xml
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
    hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
    hbase/branches/0.95/hbase-it/pom.xml
    hbase/branches/0.95/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
    hbase/branches/0.95/hbase-server/pom.xml
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
    hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
    hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java
    hbase/branches/0.95/pom.xml

Modified: hbase/branches/0.95/hbase-client/pom.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/pom.xml?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/pom.xml (original)
+++ hbase/branches/0.95/hbase-client/pom.xml Wed Aug 14 23:28:21 2013
@@ -120,7 +120,7 @@
     </dependency>
     <dependency>
       <groupId>org.cloudera.htrace</groupId>
-      <artifactId>htrace</artifactId>
+      <artifactId>htrace-core</artifactId>
     </dependency>
     <dependency>
       <groupId>org.codehaus.jackson</groupId>

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java Wed Aug 14 23:28:21 2013
@@ -29,9 +29,12 @@ import org.apache.hadoop.hbase.HRegionLo
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.protobuf.generated.Tracing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
+import org.cloudera.htrace.Span;
+import org.cloudera.htrace.Trace;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -407,7 +410,7 @@ class AsyncProcess<CResult> {
 
       incTaskCounters(regionName);
 
-      Runnable runnable = new Runnable() {
+      Runnable runnable = Trace.wrap("AsyncProcess.sendMultiAction", new Runnable() {
         @Override
         public void run() {
           MultiResponse res;
@@ -427,7 +430,7 @@ class AsyncProcess<CResult> {
             decTaskCounters(regionName);
           }
         }
-      };
+      });
 
       try {
         this.pool.submit(runnable);

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java Wed Aug 14 23:28:21 2013
@@ -993,7 +993,7 @@ public class RpcClient {
         RequestHeader.Builder builder = RequestHeader.newBuilder();
         builder.setCallId(call.id);
         if (Trace.isTracing()) {
-          Span s = Trace.currentTrace();
+          Span s = Trace.currentSpan();
           builder.setTraceInfo(RPCTInfo.newBuilder().
             setParentId(s.getSpanId()).setTraceId(s.getTraceId()));
         }

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java Wed Aug 14 23:28:21 2013
@@ -38,6 +38,8 @@ import org.apache.zookeeper.data.ACL;
 import org.apache.zookeeper.data.Stat;
 import org.apache.zookeeper.proto.CreateRequest;
 import org.apache.zookeeper.proto.SetDataRequest;
+import org.cloudera.htrace.Trace;
+import org.cloudera.htrace.TraceScope;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -146,36 +148,42 @@ public class RecoverableZooKeeper {
    */
   public void delete(String path, int version)
   throws InterruptedException, KeeperException {
-    RetryCounter retryCounter = retryCounterFactory.create();
-    boolean isRetry = false; // False for first attempt, true for all retries.
-    while (true) {
-      try {
-        zk.delete(path, version);
-        return;
-      } catch (KeeperException e) {
-        switch (e.code()) {
-          case NONODE:
-            if (isRetry) {
-              LOG.info("Node " + path + " already deleted. Assuming a " +
-                  "previous attempt succeeded.");
-              return;
-            }
-            LOG.warn("Node " + path + " already deleted, retry=" + isRetry);
-            throw e;
+    TraceScope traceScope = null;
+    try {
+      traceScope = Trace.startSpan("RecoverableZookeeper.delete");
+      RetryCounter retryCounter = retryCounterFactory.create();
+      boolean isRetry = false; // False for first attempt, true for all retries.
+      while (true) {
+        try {
+          zk.delete(path, version);
+          return;
+        } catch (KeeperException e) {
+          switch (e.code()) {
+            case NONODE:
+              if (isRetry) {
+                LOG.info("Node " + path + " already deleted. Assuming a " +
+                    "previous attempt succeeded.");
+                return;
+              }
+              LOG.warn("Node " + path + " already deleted, retry=" + isRetry);
+              throw e;
 
-          case CONNECTIONLOSS:
-          case SESSIONEXPIRED:
-          case OPERATIONTIMEOUT:
-            retryOrThrow(retryCounter, e, "delete");
-            break;
+            case CONNECTIONLOSS:
+            case SESSIONEXPIRED:
+            case OPERATIONTIMEOUT:
+              retryOrThrow(retryCounter, e, "delete");
+              break;
 
-          default:
-            throw e;
+            default:
+              throw e;
+          }
         }
+        retryCounter.sleepUntilNextRetry();
+        retryCounter.useRetry();
+        isRetry = true;
       }
-      retryCounter.sleepUntilNextRetry();
-      retryCounter.useRetry();
-      isRetry = true;
+    } finally {
+      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -185,24 +193,30 @@ public class RecoverableZooKeeper {
    */
   public Stat exists(String path, Watcher watcher)
   throws KeeperException, InterruptedException {
-    RetryCounter retryCounter = retryCounterFactory.create();
-    while (true) {
-      try {
-        return zk.exists(path, watcher);
-      } catch (KeeperException e) {
-        switch (e.code()) {
-          case CONNECTIONLOSS:
-          case SESSIONEXPIRED:
-          case OPERATIONTIMEOUT:
-            retryOrThrow(retryCounter, e, "exists");
-            break;
+    TraceScope traceScope = null;
+    try {
+      traceScope = Trace.startSpan("RecoverableZookeeper.exists");
+      RetryCounter retryCounter = retryCounterFactory.create();
+      while (true) {
+        try {
+          return zk.exists(path, watcher);
+        } catch (KeeperException e) {
+          switch (e.code()) {
+            case CONNECTIONLOSS:
+            case SESSIONEXPIRED:
+            case OPERATIONTIMEOUT:
+              retryOrThrow(retryCounter, e, "exists");
+              break;
 
-          default:
-            throw e;
+            default:
+              throw e;
+          }
         }
+        retryCounter.sleepUntilNextRetry();
+        retryCounter.useRetry();
       }
-      retryCounter.sleepUntilNextRetry();
-      retryCounter.useRetry();
+    } finally {
+      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -212,24 +226,30 @@ public class RecoverableZooKeeper {
    */
   public Stat exists(String path, boolean watch)
   throws KeeperException, InterruptedException {
-    RetryCounter retryCounter = retryCounterFactory.create();
-    while (true) {
-      try {
-        return zk.exists(path, watch);
-      } catch (KeeperException e) {
-        switch (e.code()) {
-          case CONNECTIONLOSS:
-          case SESSIONEXPIRED:
-          case OPERATIONTIMEOUT:
-            retryOrThrow(retryCounter, e, "exists");
-            break;
+    TraceScope traceScope = null;
+    try {
+      traceScope = Trace.startSpan("RecoverableZookeeper.exists");
+      RetryCounter retryCounter = retryCounterFactory.create();
+      while (true) {
+        try {
+          return zk.exists(path, watch);
+        } catch (KeeperException e) {
+          switch (e.code()) {
+            case CONNECTIONLOSS:
+            case SESSIONEXPIRED:
+            case OPERATIONTIMEOUT:
+              retryOrThrow(retryCounter, e, "exists");
+              break;
 
-          default:
-            throw e;
+            default:
+              throw e;
+          }
         }
+        retryCounter.sleepUntilNextRetry();
+        retryCounter.useRetry();
       }
-      retryCounter.sleepUntilNextRetry();
-      retryCounter.useRetry();
+    } finally {
+      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -249,24 +269,30 @@ public class RecoverableZooKeeper {
    */
   public List<String> getChildren(String path, Watcher watcher)
     throws KeeperException, InterruptedException {
-    RetryCounter retryCounter = retryCounterFactory.create();
-    while (true) {
-      try {
-        return zk.getChildren(path, watcher);
-      } catch (KeeperException e) {
-        switch (e.code()) {
-          case CONNECTIONLOSS:
-          case SESSIONEXPIRED:
-          case OPERATIONTIMEOUT:
-            retryOrThrow(retryCounter, e, "getChildren");
-            break;
+    TraceScope traceScope = null;
+    try {
+      traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
+      RetryCounter retryCounter = retryCounterFactory.create();
+      while (true) {
+        try {
+          return zk.getChildren(path, watcher);
+        } catch (KeeperException e) {
+          switch (e.code()) {
+            case CONNECTIONLOSS:
+            case SESSIONEXPIRED:
+            case OPERATIONTIMEOUT:
+              retryOrThrow(retryCounter, e, "getChildren");
+              break;
 
-          default:
-            throw e;
+            default:
+              throw e;
+          }
         }
+        retryCounter.sleepUntilNextRetry();
+        retryCounter.useRetry();
       }
-      retryCounter.sleepUntilNextRetry();
-      retryCounter.useRetry();
+    } finally {
+      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -276,24 +302,30 @@ public class RecoverableZooKeeper {
    */
   public List<String> getChildren(String path, boolean watch)
   throws KeeperException, InterruptedException {
-    RetryCounter retryCounter = retryCounterFactory.create();
-    while (true) {
-      try {
-        return zk.getChildren(path, watch);
-      } catch (KeeperException e) {
-        switch (e.code()) {
-          case CONNECTIONLOSS:
-          case SESSIONEXPIRED:
-          case OPERATIONTIMEOUT:
-            retryOrThrow(retryCounter, e, "getChildren");
-            break;
+    TraceScope traceScope = null;
+    try {
+      traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
+      RetryCounter retryCounter = retryCounterFactory.create();
+      while (true) {
+        try {
+          return zk.getChildren(path, watch);
+        } catch (KeeperException e) {
+          switch (e.code()) {
+            case CONNECTIONLOSS:
+            case SESSIONEXPIRED:
+            case OPERATIONTIMEOUT:
+              retryOrThrow(retryCounter, e, "getChildren");
+              break;
 
-          default:
-            throw e;
+            default:
+              throw e;
+          }
         }
+        retryCounter.sleepUntilNextRetry();
+        retryCounter.useRetry();
       }
-      retryCounter.sleepUntilNextRetry();
-      retryCounter.useRetry();
+    } finally {
+      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -303,25 +335,31 @@ public class RecoverableZooKeeper {
    */
   public byte[] getData(String path, Watcher watcher, Stat stat)
   throws KeeperException, InterruptedException {
-    RetryCounter retryCounter = retryCounterFactory.create();
-    while (true) {
-      try {
-        byte[] revData = zk.getData(path, watcher, stat);
-        return this.removeMetaData(revData);
-      } catch (KeeperException e) {
-        switch (e.code()) {
-          case CONNECTIONLOSS:
-          case SESSIONEXPIRED:
-          case OPERATIONTIMEOUT:
-            retryOrThrow(retryCounter, e, "getData");
-            break;
+    TraceScope traceScope = null;
+    try {
+      traceScope = Trace.startSpan("RecoverableZookeeper.getData");
+      RetryCounter retryCounter = retryCounterFactory.create();
+      while (true) {
+        try {
+          byte[] revData = zk.getData(path, watcher, stat);
+          return this.removeMetaData(revData);
+        } catch (KeeperException e) {
+          switch (e.code()) {
+            case CONNECTIONLOSS:
+            case SESSIONEXPIRED:
+            case OPERATIONTIMEOUT:
+              retryOrThrow(retryCounter, e, "getData");
+              break;
 
-          default:
-            throw e;
+            default:
+              throw e;
+          }
         }
+        retryCounter.sleepUntilNextRetry();
+        retryCounter.useRetry();
       }
-      retryCounter.sleepUntilNextRetry();
-      retryCounter.useRetry();
+    } finally {
+      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -331,25 +369,31 @@ public class RecoverableZooKeeper {
    */
   public byte[] getData(String path, boolean watch, Stat stat)
   throws KeeperException, InterruptedException {
-    RetryCounter retryCounter = retryCounterFactory.create();
-    while (true) {
-      try {
-        byte[] revData = zk.getData(path, watch, stat);
-        return this.removeMetaData(revData);
-      } catch (KeeperException e) {
-        switch (e.code()) {
-          case CONNECTIONLOSS:
-          case SESSIONEXPIRED:
-          case OPERATIONTIMEOUT:
-            retryOrThrow(retryCounter, e, "getData");
-            break;
+    TraceScope traceScope = null;
+    try {
+      traceScope = Trace.startSpan("RecoverableZookeeper.getData");
+      RetryCounter retryCounter = retryCounterFactory.create();
+      while (true) {
+        try {
+          byte[] revData = zk.getData(path, watch, stat);
+          return this.removeMetaData(revData);
+        } catch (KeeperException e) {
+          switch (e.code()) {
+            case CONNECTIONLOSS:
+            case SESSIONEXPIRED:
+            case OPERATIONTIMEOUT:
+              retryOrThrow(retryCounter, e, "getData");
+              break;
 
-          default:
-            throw e;
+            default:
+              throw e;
+          }
         }
+        retryCounter.sleepUntilNextRetry();
+        retryCounter.useRetry();
       }
-      retryCounter.sleepUntilNextRetry();
-      retryCounter.useRetry();
+    } finally {
+      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -361,42 +405,48 @@ public class RecoverableZooKeeper {
    */
   public Stat setData(String path, byte[] data, int version)
   throws KeeperException, InterruptedException {
-    RetryCounter retryCounter = retryCounterFactory.create();
-    byte[] newData = appendMetaData(data);
-    boolean isRetry = false;
-    while (true) {
-      try {
-        return zk.setData(path, newData, version);
-      } catch (KeeperException e) {
-        switch (e.code()) {
-          case CONNECTIONLOSS:
-          case SESSIONEXPIRED:
-          case OPERATIONTIMEOUT:
-            retryOrThrow(retryCounter, e, "setData");
-            break;
-          case BADVERSION:
-            if (isRetry) {
-              // try to verify whether the previous setData success or not
-              try{
-                Stat stat = new Stat();
-                byte[] revData = zk.getData(path, false, stat);
-                if(Bytes.compareTo(revData, newData) == 0) {
-                  // the bad version is caused by previous successful setData
-                  return stat;
+    TraceScope traceScope = null;
+    try {
+      traceScope = Trace.startSpan("RecoverableZookeeper.setData");
+      RetryCounter retryCounter = retryCounterFactory.create();
+      byte[] newData = appendMetaData(data);
+      boolean isRetry = false;
+      while (true) {
+        try {
+          return zk.setData(path, newData, version);
+        } catch (KeeperException e) {
+          switch (e.code()) {
+            case CONNECTIONLOSS:
+            case SESSIONEXPIRED:
+            case OPERATIONTIMEOUT:
+              retryOrThrow(retryCounter, e, "setData");
+              break;
+            case BADVERSION:
+              if (isRetry) {
+                // try to verify whether the previous setData success or not
+                try{
+                  Stat stat = new Stat();
+                  byte[] revData = zk.getData(path, false, stat);
+                  if(Bytes.compareTo(revData, newData) == 0) {
+                    // the bad version is caused by previous successful setData
+                    return stat;
+                  }
+                } catch(KeeperException keeperException){
+                  // the ZK is not reliable at this moment. just throwing exception
+                  throw keeperException;
                 }
-              } catch(KeeperException keeperException){
-                // the ZK is not reliable at this moment. just throwing exception
-                throw keeperException;
               }
-            }
-          // throw other exceptions and verified bad version exceptions
-          default:
-            throw e;
+            // throw other exceptions and verified bad version exceptions
+            default:
+              throw e;
+          }
         }
+        retryCounter.sleepUntilNextRetry();
+        retryCounter.useRetry();
+        isRetry = true;
       }
-      retryCounter.sleepUntilNextRetry();
-      retryCounter.useRetry();
-      isRetry = true;
+    } finally {
+      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -418,19 +468,25 @@ public class RecoverableZooKeeper {
   public String create(String path, byte[] data, List<ACL> acl,
       CreateMode createMode)
   throws KeeperException, InterruptedException {
-    byte[] newData = appendMetaData(data);
-    switch (createMode) {
-      case EPHEMERAL:
-      case PERSISTENT:
-        return createNonSequential(path, newData, acl, createMode);
-
-      case EPHEMERAL_SEQUENTIAL:
-      case PERSISTENT_SEQUENTIAL:
-        return createSequential(path, newData, acl, createMode);
-
-      default:
-        throw new IllegalArgumentException("Unrecognized CreateMode: " +
-            createMode);
+    TraceScope traceScope = null;
+    try {
+      traceScope = Trace.startSpan("RecoverableZookeeper.create");
+      byte[] newData = appendMetaData(data);
+      switch (createMode) {
+        case EPHEMERAL:
+        case PERSISTENT:
+          return createNonSequential(path, newData, acl, createMode);
+
+        case EPHEMERAL_SEQUENTIAL:
+        case PERSISTENT_SEQUENTIAL:
+          return createSequential(path, newData, acl, createMode);
+
+        default:
+          throw new IllegalArgumentException("Unrecognized CreateMode: " +
+              createMode);
+      }
+    } finally {
+      if (traceScope != null) traceScope.close();
     }
   }
 
@@ -545,25 +601,31 @@ public class RecoverableZooKeeper {
    */
   public List<OpResult> multi(Iterable<Op> ops)
   throws KeeperException, InterruptedException {
-    RetryCounter retryCounter = retryCounterFactory.create();
-    Iterable<Op> multiOps = prepareZKMulti(ops);
-    while (true) {
-      try {
-        return zk.multi(multiOps);
-      } catch (KeeperException e) {
-        switch (e.code()) {
-          case CONNECTIONLOSS:
-          case SESSIONEXPIRED:
-          case OPERATIONTIMEOUT:
-            retryOrThrow(retryCounter, e, "multi");
-            break;
+    TraceScope traceScope = null;
+    try {
+      traceScope = Trace.startSpan("RecoverableZookeeper.multi");
+      RetryCounter retryCounter = retryCounterFactory.create();
+      Iterable<Op> multiOps = prepareZKMulti(ops);
+      while (true) {
+        try {
+          return zk.multi(multiOps);
+        } catch (KeeperException e) {
+          switch (e.code()) {
+            case CONNECTIONLOSS:
+            case SESSIONEXPIRED:
+            case OPERATIONTIMEOUT:
+              retryOrThrow(retryCounter, e, "multi");
+              break;
 
-          default:
-            throw e;
+            default:
+              throw e;
+          }
         }
-      }
-      retryCounter.sleepUntilNextRetry();
-      retryCounter.useRetry();
+        retryCounter.sleepUntilNextRetry();
+        retryCounter.useRetry();
+    }
+    } finally {
+      if (traceScope != null) traceScope.close();
     }
   }
 

Modified: hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (original)
+++ hbase/branches/0.95/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java Wed Aug 14 23:28:21 2013
@@ -1548,6 +1548,7 @@ public class ZKUtil {
       // run sequentially
       processSequentially(zkw, ops);
     }
+
   }
 
   private static void processSequentially(ZooKeeperWatcher zkw, List<ZKUtilOp> ops)

Modified: hbase/branches/0.95/hbase-it/pom.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-it/pom.xml?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-it/pom.xml (original)
+++ hbase/branches/0.95/hbase-it/pom.xml Wed Aug 14 23:28:21 2013
@@ -210,7 +210,11 @@
       </dependency>
       <dependency>
         <groupId>org.cloudera.htrace</groupId>
-        <artifactId>htrace</artifactId>
+        <artifactId>htrace-core</artifactId>
+      </dependency>
+      <dependency>
+        <groupId>org.cloudera.htrace</groupId>
+        <artifactId>htrace-zipkin</artifactId>
       </dependency>
     <!-- General dependencies -->
 

Modified: hbase/branches/0.95/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java (original)
+++ hbase/branches/0.95/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java Wed Aug 14 23:28:21 2013
@@ -42,6 +42,8 @@ import org.apache.hadoop.hbase.util.Load
 import org.cloudera.htrace.Sampler;
 import org.cloudera.htrace.Span;
 import org.cloudera.htrace.Trace;
+import org.cloudera.htrace.TraceScope;
+import org.cloudera.htrace.impl.AlwaysSampler;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -361,9 +363,9 @@ public class IntegrationTestMTTR {
       // Keep trying until the rs is back up and we've gotten a put through
       while (numAfterDone < 10) {
         long start = System.nanoTime();
-        Span span = null;
+        TraceScope scope = null;
         try {
-          span = Trace.startSpan(getSpanName(), Sampler.ALWAYS);
+          scope = Trace.startSpan(getSpanName(), AlwaysSampler.INSTANCE);
           boolean actionResult = doAction();
           if (actionResult && future.isDone()) {
             numAfterDone ++;
@@ -371,11 +373,11 @@ public class IntegrationTestMTTR {
         } catch (Exception e) {
           numAfterDone = 0;
         } finally {
-          if (span != null) {
-            span.stop();
+          if (scope != null) {
+            scope.close();
           }
         }
-        result.addResult(System.nanoTime() - start, span);
+        result.addResult(System.nanoTime() - start, scope.getSpan());
       }
       return result;
     }

Modified: hbase/branches/0.95/hbase-server/pom.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/pom.xml?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/pom.xml (original)
+++ hbase/branches/0.95/hbase-server/pom.xml Wed Aug 14 23:28:21 2013
@@ -476,8 +476,12 @@
     </dependency>
     <dependency>
       <groupId>org.cloudera.htrace</groupId>
-      <artifactId>htrace</artifactId>
+      <artifactId>htrace-core</artifactId>
    </dependency>
+    <dependency>
+      <groupId>org.cloudera.htrace</groupId>
+      <artifactId>htrace-zipkin</artifactId>
+    </dependency>
   </dependencies>
   <profiles>
     <!-- Skip the tests in this module -->

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java Wed Aug 14 23:28:21 2013
@@ -28,6 +28,8 @@ import org.apache.hadoop.hbase.Server;
 import org.cloudera.htrace.Sampler;
 import org.cloudera.htrace.Span;
 import org.cloudera.htrace.Trace;
+import org.cloudera.htrace.TraceScope;
+import org.cloudera.htrace.impl.AlwaysSampler;
 
 
 /**
@@ -99,7 +101,7 @@ public abstract class EventHandler imple
    * Default base class constructor.
    */
   public EventHandler(Server server, EventType eventType) {
-    this.parent = Trace.currentTrace();
+    this.parent = Trace.currentSpan();
     this.server = server;
     this.eventType = eventType;
     seqid = seqids.incrementAndGet();
@@ -123,8 +125,7 @@ public abstract class EventHandler imple
   }
 
   public void run() {
-    Span chunk = Trace.startSpan(Thread.currentThread().getName(), parent,
-          Sampler.ALWAYS);
+    TraceScope chunk = Trace.startSpan(this.getClass().getSimpleName(), parent);
     try {
       if (getListener() != null) getListener().beforeProcess(this);
       process();
@@ -132,7 +133,7 @@ public abstract class EventHandler imple
     } catch(Throwable t) {
       LOG.error("Caught throwable while processing event " + eventType, t);
     } finally {
-      chunk.stop();
+      chunk.close();
     }
   }
 

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java Wed Aug 14 23:28:21 2013
@@ -39,6 +39,8 @@ import org.apache.hadoop.hbase.io.FSData
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.IdLock;
 import org.apache.hadoop.io.WritableUtils;
+import org.cloudera.htrace.Trace;
+import org.cloudera.htrace.TraceScope;
 
 /**
  * {@link HFile} reader for version 2.
@@ -292,9 +294,9 @@ public class HFileReaderV2 extends Abstr
 
     boolean useLock = false;
     IdLock.Entry lockEntry = null;
+    TraceScope traceScope = Trace.startSpan("HFileReaderV2.readBlock");
     try {
       while (true) {
-
         if (useLock) {
           lockEntry = offsetLock.getLockEntry(dataBlockOffset);
         }
@@ -329,7 +331,9 @@ public class HFileReaderV2 extends Abstr
           useLock = true;
           continue;
         }
-
+        if (Trace.isTracing()) {
+          traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
+        }
         // Load block from filesystem.
         long startTimeNs = System.nanoTime();
         HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, -1,
@@ -352,6 +356,7 @@ public class HFileReaderV2 extends Abstr
         return hfileBlock;
       }
     } finally {
+      traceScope.close();
       if (lockEntry != null) {
         offsetLock.releaseLockEntry(lockEntry);
       }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java Wed Aug 14 23:28:21 2013
@@ -24,6 +24,8 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.hbase.security.User;
 
 import com.google.protobuf.BlockingService;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.cloudera.htrace.Trace;
 
 import java.net.InetAddress;
 
@@ -97,6 +99,16 @@ public class RequestContext {
     ctx.remoteAddress = remoteAddress;
     ctx.service = service;
     ctx.inRequest = true;
+    if (Trace.isTracing()) {
+      if (user != null) {
+        Trace.currentSpan().addKVAnnotation(Bytes.toBytes("user"), Bytes.toBytes(user.getName()));
+      }
+      if (remoteAddress != null) {
+        Trace.currentSpan().addKVAnnotation(
+            Bytes.toBytes("remoteAddress"),
+            Bytes.toBytes(remoteAddress.getHostAddress()));
+      }
+    }
   }
 
   /**

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java Wed Aug 14 23:28:21 2013
@@ -118,6 +118,7 @@ import org.cloudera.htrace.Sampler;
 import org.cloudera.htrace.Span;
 import org.cloudera.htrace.Trace;
 import org.cloudera.htrace.TraceInfo;
+import org.cloudera.htrace.TraceScope;
 import org.cloudera.htrace.impl.NullSpan;
 import org.codehaus.jackson.map.ObjectMapper;
 
@@ -320,6 +321,14 @@ public class RpcServer implements RpcSer
       return sb.toString();
     }
 
+    String toTraceString() {
+      String serviceName = this.connection.service != null ?
+                           this.connection.service.getDescriptorForType().getName() : "";
+      String methodName = (this.md != null) ? this.md.getName() : "";
+      String result = serviceName + "." + methodName;
+      return result;
+    }
+
     protected synchronized void setSaslTokenResponse(ByteBuffer response) {
       this.response = response;
     }
@@ -1828,14 +1837,13 @@ public class RpcServer implements RpcSer
           String error = null;
           Pair<Message, CellScanner> resultPair = null;
           CurCall.set(call);
-          Span currentRequestSpan = NullSpan.getInstance();
+          TraceScope traceScope = null;
           try {
             if (!started) {
               throw new ServerNotRunningYetException("Server is not running yet");
             }
             if (call.tinfo != null) {
-              currentRequestSpan = Trace.startSpan(
-                  "handling " + call.toShortString(), call.tinfo, Sampler.ALWAYS);
+              traceScope = Trace.startSpan(call.toTraceString(), call.tinfo);
             }
             User user;
             if (call.effectiveUser == null) {
@@ -1860,7 +1868,9 @@ public class RpcServer implements RpcSer
             errorThrowable = e;
             error = StringUtils.stringifyException(e);
           } finally {
-            currentRequestSpan.stop();
+            if (traceScope != null) {
+              traceScope.close();
+            }
             // Must always clear the request context to avoid leaking
             // credentials between requests.
             RequestContext.clear();

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Wed Aug 14 23:28:21 2013
@@ -788,8 +788,7 @@ MasterServices, Server {
       status.setStatus("Initializing master coprocessors");
       this.cpHost = new MasterCoprocessorHost(this, this.conf);
 
-      spanReceiverHost = new SpanReceiverHost(getConfiguration());
-      spanReceiverHost.loadSpanReceivers();
+      spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration());
 
       // start up all service threads.
       status.setStatus("Initializing master service threads");

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java Wed Aug 14 23:28:21 2013
@@ -210,7 +210,7 @@ public class DisableTableHandler extends
           continue;
         }
         final HRegionInfo hri = region;
-        pool.execute(Trace.wrap(new Runnable() {
+        pool.execute(Trace.wrap("DisableTableHandler.BulkDisabler",new Runnable() {
           public void run() {
             assignmentManager.unassign(hri, true);
           }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java Wed Aug 14 23:28:21 2013
@@ -268,7 +268,7 @@ public class EnableTableHandler extends 
             continue;
           }
           final HRegionInfo hri = region;
-          pool.execute(Trace.wrap(new Runnable() {
+          pool.execute(Trace.wrap("BulkEnabler.populatePool",new Runnable() {
             public void run() {
               assignmentManager.assign(hri, true);
             }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Wed Aug 14 23:28:21 2013
@@ -3421,7 +3421,7 @@ public class HRegion implements HeapSize
 
     RegionScannerImpl(Scan scan, List<KeyValueScanner> additionalScanners, HRegion region)
         throws IOException {
-      // DebugPrint.println("HRegionScanner.<init>");
+
       this.region = region;
       this.maxResultSize = scan.getMaxResultSize();
       if (scan.hasFilter()) {

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Wed Aug 14 23:28:21 2013
@@ -190,6 +190,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.trace.SpanReceiverHost;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -375,6 +376,7 @@ public class HRegionServer implements Cl
   public static final String REGIONSERVER_CONF = "regionserver_conf";
 
   private MetricsRegionServer metricsRegionServer;
+  private SpanReceiverHost spanReceiverHost;
 
   /*
    * Check for compactions requests.
@@ -1180,6 +1182,9 @@ public class HRegionServer implements Cl
       this.hlog = setupWALAndReplication();
       // Init in here rather than in constructor after thread name has been set
       this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this));
+
+      spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration());
+
       startServiceThreads();
       LOG.info("Serving as " + this.serverNameFromMasterPOV +
         ", RpcServer on " + this.isa +
@@ -1784,6 +1789,9 @@ public class HRegionServer implements Cl
     if (this.healthCheckChore != null) {
       Threads.shutdown(this.healthCheckChore.getThread());
     }
+    if (this.spanReceiverHost != null) {
+      this.spanReceiverHost.closeReceivers();
+    }
     if (this.hlogRoller != null) {
       Threads.shutdown(this.hlogRoller.getThread());
     }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java Wed Aug 14 23:28:21 2013
@@ -50,6 +50,8 @@ import org.apache.hadoop.util.StringUtil
 import org.cliffc.high_scale_lib.Counter;
 
 import com.google.common.base.Preconditions;
+import org.cloudera.htrace.Trace;
+import org.cloudera.htrace.TraceScope;
 
 /**
  * Thread that flushes cache on request
@@ -505,7 +507,11 @@ class MemStoreFlusher implements FlushRe
    * amount of memstore consumption.
    */
   public void reclaimMemStoreMemory() {
+    TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory");
     if (isAboveHighWaterMark()) {
+      if (Trace.isTracing()) {
+        scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark.");
+      }
       long start = System.currentTimeMillis();
       synchronized (this.blockSignal) {
         boolean blocked = false;
@@ -542,6 +548,7 @@ class MemStoreFlusher implements FlushRe
     } else if (isAboveLowWaterMark()) {
       wakeupFlushThread();
     }
+    scope.close();
   }
   @Override
   public String toString() {

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java Wed Aug 14 23:28:21 2013
@@ -63,6 +63,8 @@ import org.apache.hadoop.hbase.util.FSUt
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.util.StringUtils;
+import org.cloudera.htrace.Trace;
+import org.cloudera.htrace.TraceScope;
 
 /**
  * HLog stores all the edits to the HStore.  Its the hbase write-ahead-log
@@ -874,35 +876,40 @@ class FSHLog implements HLog, Syncable {
       if (this.closed) {
         throw new IOException("Cannot append; log is closed");
       }
-      long txid = 0;
-      synchronized (this.updateLock) {
-        long seqNum = obtainSeqNum();
-        // The 'lastSeqWritten' map holds the sequence number of the oldest
-        // write for each region (i.e. the first edit added to the particular
-        // memstore). . When the cache is flushed, the entry for the
-        // region being flushed is removed if the sequence number of the flush
-        // is greater than or equal to the value in lastSeqWritten.
-        // Use encoded name.  Its shorter, guaranteed unique and a subset of
-        // actual  name.
-        byte [] encodedRegionName = info.getEncodedNameAsBytes();
-        if (isInMemstore) this.oldestUnflushedSeqNums.putIfAbsent(encodedRegionName, seqNum);
-        HLogKey logKey = makeKey(encodedRegionName, tableName, seqNum, now, clusterId);
-        doWrite(info, logKey, edits, htd);
-        this.numEntries.incrementAndGet();
-        txid = this.unflushedEntries.incrementAndGet();
-        if (htd.isDeferredLogFlush()) {
-          lastDeferredTxid = txid;
-        }
-      }
-      // Sync if catalog region, and if not then check if that table supports
-      // deferred log flushing
-      if (doSync &&
-          (info.isMetaRegion() ||
-          !htd.isDeferredLogFlush())) {
-        // sync txn to file system
-        this.sync(txid);
+      TraceScope traceScope = Trace.startSpan("FSHlog.append");
+      try {
+        long txid = 0;
+        synchronized (this.updateLock) {
+          long seqNum = obtainSeqNum();
+          // The 'lastSeqWritten' map holds the sequence number of the oldest
+          // write for each region (i.e. the first edit added to the particular
+          // memstore). . When the cache is flushed, the entry for the
+          // region being flushed is removed if the sequence number of the flush
+          // is greater than or equal to the value in lastSeqWritten.
+          // Use encoded name.  Its shorter, guaranteed unique and a subset of
+          // actual  name.
+          byte [] encodedRegionName = info.getEncodedNameAsBytes();
+          if (isInMemstore) this.oldestUnflushedSeqNums.putIfAbsent(encodedRegionName, seqNum);
+          HLogKey logKey = makeKey(encodedRegionName, tableName, seqNum, now, clusterId);
+          doWrite(info, logKey, edits, htd);
+          this.numEntries.incrementAndGet();
+          txid = this.unflushedEntries.incrementAndGet();
+          if (htd.isDeferredLogFlush()) {
+            lastDeferredTxid = txid;
+          }
+        }
+        // Sync if catalog region, and if not then check if that table supports
+        // deferred log flushing
+        if (doSync &&
+            (info.isMetaRegion() ||
+            !htd.isDeferredLogFlush())) {
+          // sync txn to file system
+          this.sync(txid);
+        }
+        return txid;
+      } finally {
+        traceScope.close();
       }
-      return txid;
     }
 
   @Override

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java Wed Aug 14 23:28:21 2013
@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.util.ReflectionUtils;
 import org.cloudera.htrace.SpanReceiver;
 import org.cloudera.htrace.Trace;
+import org.cloudera.htrace.impl.ZipkinSpanReceiver;
 
 /**
  * This class provides functions for reading the names of SpanReceivers from
@@ -38,8 +39,32 @@ public class SpanReceiverHost {
   private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
   private Collection<SpanReceiver> receivers;
   private Configuration conf;
+  private boolean closed = false;
 
-  public SpanReceiverHost(Configuration conf) {
+  private static enum SingleTonholder {
+    INSTANCE;
+    Object lock = new Object();
+    SpanReceiverHost host = null;
+  }
+
+  public static SpanReceiverHost getInstance(Configuration conf) {
+    if (SingleTonholder.INSTANCE.host != null) {
+      return SingleTonholder.INSTANCE.host;
+    }
+    synchronized (SingleTonholder.INSTANCE.lock) {
+      if (SingleTonholder.INSTANCE.host != null) {
+        return SingleTonholder.INSTANCE.host;
+      }
+
+      SpanReceiverHost host = new SpanReceiverHost(conf);
+      host.loadSpanReceivers();
+      SingleTonholder.INSTANCE.host = host;
+      return SingleTonholder.INSTANCE.host;
+    }
+
+  }
+
+  SpanReceiverHost(Configuration conf) {
     receivers = new HashSet<SpanReceiver>();
     this.conf = conf;
   }
@@ -48,13 +73,7 @@ public class SpanReceiverHost {
    * Reads the names of classes specified in the
    * "hbase.trace.spanreceiver.classes" property and instantiates and registers
    * them with the Tracer as SpanReceiver's.
-   * 
-   * The nullary constructor is called during construction, but if the classes
-   * specified implement the Configurable interface, setConfiguration() will be
-   * called on them. This allows SpanReceivers to use values from
-   * hbase-site.xml. See
-   * {@link org.apache.hadoop.hbase.trace.HBaseLocalFileSpanReceiver} for an
-   * example.
+   *
    */
   public void loadSpanReceivers() {
     Class<?> implClass = null;
@@ -67,8 +86,12 @@ public class SpanReceiverHost {
 
       try {
         implClass = Class.forName(className);
-        receivers.add(loadInstance(implClass));
-        LOG.info("SpanReceiver " + className + " was loaded successfully.");
+        SpanReceiver receiver = loadInstance(implClass);
+        if (receiver != null) {
+          receivers.add(receiver);
+          LOG.info("SpanReceiver " + className + " was loaded successfully.");
+        }
+
       } catch (ClassNotFoundException e) {
         LOG.warn("Class " + className + " cannot be found. " + e.getMessage());
       } catch (IOException e) {
@@ -83,16 +106,21 @@ public class SpanReceiverHost {
 
   private SpanReceiver loadInstance(Class<?> implClass)
       throws IOException {
-    SpanReceiver impl;
+    SpanReceiver impl = null;
     try {
-      Object o = ReflectionUtils.newInstance(implClass, conf);
+      Object o = implClass.newInstance();
       impl = (SpanReceiver)o;
+      impl.configure(new HBaseHTraceConfiguration(this.conf));
     } catch (SecurityException e) {
       throw new IOException(e);
     } catch (IllegalArgumentException e) {
       throw new IOException(e);
     } catch (RuntimeException e) {
       throw new IOException(e);
+    } catch (InstantiationException e) {
+      e.printStackTrace();
+    } catch (IllegalAccessException e) {
+      e.printStackTrace();
     }
 
     return impl;
@@ -101,7 +129,9 @@ public class SpanReceiverHost {
   /**
    * Calls close() on all SpanReceivers created by this SpanReceiverHost.
    */
-  public void closeReceivers() {
+  public synchronized void closeReceivers() {
+    if (closed) return;
+    closed = true;
     for (SpanReceiver rcvr : receivers) {
       try {
         rcvr.close();

Modified: hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java (original)
+++ hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java Wed Aug 14 23:28:21 2013
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.client.Pu
 import org.cloudera.htrace.Sampler;
 import org.cloudera.htrace.Span;
 import org.cloudera.htrace.Trace;
+import org.cloudera.htrace.TraceScope;
 import org.cloudera.htrace.TraceTree;
 import org.cloudera.htrace.impl.POJOSpanReceiver;
 import org.junit.AfterClass;
@@ -60,13 +61,14 @@ public class TestHTraceHooks {
 
   @Test
   public void testTraceCreateTable() throws Exception {
-    Span tableCreationSpan = Trace.startSpan("creating table", Sampler.ALWAYS);
+    TraceScope tableCreationSpan = Trace.startSpan("creating table", Sampler.ALWAYS);
     HTable table; 
     try {
+
       table = TEST_UTIL.createTable("table".getBytes(),
         FAMILY_BYTES);
     } finally {
-      tableCreationSpan.stop();
+      tableCreationSpan.close();
     }
 
     Collection<Span> spans = rcvr.getSpans();
@@ -80,26 +82,26 @@ public class TestHTraceHooks {
     Multimap<Long, Span> spansByParentIdMap = traceTree
         .getSpansByParentIdMap();
 
-    int startsWithHandlingCount = 0;
+    int createTableCount = 0;
 
     for (Span s : spansByParentIdMap.get(createTableRoot.getSpanId())) {
-      if (s.getDescription().startsWith("handling")) {
-        startsWithHandlingCount++;
+      if (s.getDescription().startsWith("MasterAdminService.CreateTable")) {
+        createTableCount++;
       }
     }
 
-    assertTrue(startsWithHandlingCount > 3);
+    assertTrue(createTableCount >= 1);
     assertTrue(spansByParentIdMap.get(createTableRoot.getSpanId()).size() > 3);
     assertTrue(spans.size() > 5);
     
     Put put = new Put("row".getBytes());
     put.add(FAMILY_BYTES, "col".getBytes(), "value".getBytes());
 
-    Span putSpan = Trace.startSpan("doing put", Sampler.ALWAYS);
+    TraceScope putSpan = Trace.startSpan("doing put", Sampler.ALWAYS);
     try {
       table.put(put);
     } finally {
-      putSpan.stop();
+      putSpan.close();
     }
 
     spans = rcvr.getSpans();

Modified: hbase/branches/0.95/pom.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/pom.xml?rev=1514092&r1=1514091&r2=1514092&view=diff
==============================================================================
--- hbase/branches/0.95/pom.xml (original)
+++ hbase/branches/0.95/pom.xml Wed Aug 14 23:28:21 2013
@@ -889,7 +889,7 @@
     <jersey.version>1.8</jersey.version>
     <jruby.version>1.6.8</jruby.version>
     <junit.version>4.11</junit.version>
-    <htrace.version>1.50</htrace.version>
+    <htrace.version>2.00</htrace.version>
     <log4j.version>1.2.17</log4j.version>
     <mockito-all.version>1.9.0</mockito-all.version>
     <protobuf.version>2.4.1</protobuf.version>
@@ -1313,7 +1313,12 @@
       </dependency>
       <dependency>
         <groupId>org.cloudera.htrace</groupId>
-        <artifactId>htrace</artifactId>
+        <artifactId>htrace-core</artifactId>
+        <version>${htrace.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.cloudera.htrace</groupId>
+        <artifactId>htrace-zipkin</artifactId>
         <version>${htrace.version}</version>
       </dependency>
     </dependencies>