You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/11/13 18:39:08 UTC

[2/3] hbase git commit: HBASE-18601: Update Htrace to 4.2

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
index 21c7f51..a91aafb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
 
 /**
  * RpcConnection implementation for netty rpc server.
@@ -119,9 +118,9 @@ class NettyServerRpcConnection extends ServerRpcConnection {
   @Override
   public NettyServerCall createCall(int id, final BlockingService service,
       final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner,
-      long size, TraceInfo tinfo, final InetAddress remoteAddress, int timeout,
+      long size, final InetAddress remoteAddress, int timeout,
       CallCleanup reqCleanup) {
-    return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, tinfo,
+    return new NettyServerCall(id, service, md, header, param, cellScanner, this, size,
         remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
         this.rpcServer.cellBlockBuilder, reqCleanup);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
index 3562d86..51b1684 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
 
 /**
  * Interface of all necessary to carry out a RPC method invocation on the server.
@@ -133,9 +132,4 @@ public interface RpcCall extends RpcCallContext {
    * @return A short string format of this call without possibly lengthy params
    */
   String toShortString();
-
-  /**
-   * @return TraceInfo attached to this call.
-   */
-  TraceInfo getTraceInfo();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
index 60fe30e..2fca7f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeade
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.TraceInfo;
 
 /**
  * Datastructure that holds all necessary to a method invocation and then afterward, carries
@@ -79,7 +78,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
 
   protected final long size;                          // size of current call
   protected boolean isError;
-  protected final TraceInfo tinfo;
   protected ByteBufferListOutputStream cellBlockStream = null;
   protected CallCleanup reqCleanup = null;
 
@@ -96,7 +94,7 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
       justification="Can't figure why this complaint is happening... see below")
   ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
-      Message param, CellScanner cellScanner, T connection, long size, TraceInfo tinfo,
+      Message param, CellScanner cellScanner, T connection, long size,
       InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
       CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
     this.id = id;
@@ -110,7 +108,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
     this.response = null;
     this.isError = false;
     this.size = size;
-    this.tinfo = tinfo;
     if (connection != null) {
       this.user =  connection.user;
       this.retryImmediatelySupported = connection.retryImmediatelySupported;
@@ -507,11 +504,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
   }
 
   @Override
-  public TraceInfo getTraceInfo() {
-    return tinfo;
-  }
-
-  @Override
   public synchronized BufferChain getResponse() {
     return response;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
index e1ac741..4d0239f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
@@ -77,7 +77,6 @@ import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.htrace.TraceInfo;
 
 /** Reads calls from a connection and queues them for handling. */
 @edu.umd.cs.findbugs.annotations.SuppressWarnings(
@@ -632,7 +631,7 @@ abstract class ServerRpcConnection implements Closeable {
     if ((totalRequestSize +
         this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) {
       final ServerCall<?> callTooBig = createCall(id, this.service, null, null, null, null,
-        totalRequestSize, null, null, 0, this.callCleanup);
+        totalRequestSize, null, 0, this.callCleanup);
       this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
       callTooBig.setResponse(null, null,  RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION,
         "Call queue is full on " + this.rpcServer.server.getServerName() +
@@ -694,21 +693,18 @@ abstract class ServerRpcConnection implements Closeable {
       }
 
       ServerCall<?> readParamsFailedCall = createCall(id, this.service, null, null, null, null,
-        totalRequestSize, null, null, 0, this.callCleanup);
+        totalRequestSize, null, 0, this.callCleanup);
       readParamsFailedCall.setResponse(null, null, t, msg + "; " + t.getMessage());
       readParamsFailedCall.sendResponseIfReady();
       return;
     }
 
-    TraceInfo traceInfo = header.hasTraceInfo() ? new TraceInfo(header
-        .getTraceInfo().getTraceId(), header.getTraceInfo().getParentId())
-        : null;
     int timeout = 0;
     if (header.hasTimeout() && header.getTimeout() > 0) {
       timeout = Math.max(this.rpcServer.minClientRequestTimeout, header.getTimeout());
     }
     ServerCall<?> call = createCall(id, this.service, md, header, param, cellScanner, totalRequestSize,
-      traceInfo, this.addr, timeout, this.callCleanup);
+      this.addr, timeout, this.callCleanup);
 
     if (!this.rpcServer.scheduler.dispatch(new CallRunner(this.rpcServer, call))) {
       this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize());
@@ -790,7 +786,7 @@ abstract class ServerRpcConnection implements Closeable {
   public abstract boolean isConnectionOpen();
 
   public abstract ServerCall<?> createCall(int id, BlockingService service, MethodDescriptor md,
-      RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo,
+      RequestHeader header, Message param, CellScanner cellScanner, long size,
       InetAddress remoteAddress, int timeout, CallCleanup reqCleanup);
 
   private static class ByteBuffByteInput extends ByteInput {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
index 69cc48d..36ae74a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
@@ -489,7 +489,7 @@ public class SimpleRpcServer extends RpcServer {
       Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
       long startTime, int timeout) throws IOException {
     SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner,
-        null, -1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null);
+        null, -1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null);
     return call(fakeCall, status);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java
index 5a26c05..46295fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
 
 /**
  * Datastructure that holds all necessary to a method invocation and then afterward, carries the
@@ -43,10 +42,10 @@ class SimpleServerCall extends ServerCall<SimpleServerRpcConnection> {
       justification = "Can't figure why this complaint is happening... see below")
   SimpleServerCall(int id, final BlockingService service, final MethodDescriptor md,
       RequestHeader header, Message param, CellScanner cellScanner,
-      SimpleServerRpcConnection connection, long size, TraceInfo tinfo,
+      SimpleServerRpcConnection connection, long size,
       final InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
       CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup, SimpleRpcServerResponder responder) {
-    super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress,
+    super(id, service, md, header, param, cellScanner, connection, size, remoteAddress,
         receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
     this.responder = responder;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
index 68545f3..c8dfe4a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.htrace.TraceInfo;
 
 /** Reads calls from a connection and queues them for handling. */
 @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT",
@@ -212,7 +211,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
 
           // Notify the client about the offending request
           SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null,
-              null, null, null, this, 0, null, this.addr, System.currentTimeMillis(), 0,
+              null, null, null, this, 0, this.addr, System.currentTimeMillis(), 0,
               this.rpcServer.reservoir, this.rpcServer.cellBlockBuilder, null, responder);
           this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION);
           // Make sure the client recognizes the underlying exception
@@ -343,9 +342,9 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
 
   @Override
   public SimpleServerCall createCall(int id, BlockingService service, MethodDescriptor md,
-      RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo,
+      RequestHeader header, Message param, CellScanner cellScanner, long size,
       InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
-    return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, tinfo,
+    return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size,
         remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
         this.rpcServer.cellBlockBuilder, reqCleanup, this.responder);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 91c5218..cad77e5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -161,6 +161,7 @@ import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
@@ -470,6 +471,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   public HMaster(final Configuration conf)
       throws IOException, KeeperException {
     super(conf);
+    TraceUtil.initTracer(conf);
     try {
       this.rsFatals = new MemoryBoundedLogMessageBuffer(
           conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
index f9a441d..093412a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.LocalHBaseCluster;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.ZNodeClearer;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
@@ -147,6 +148,8 @@ public class HMasterCommandLine extends ServerCommandLine {
 
   private int startMaster() {
     Configuration conf = getConf();
+    TraceUtil.initTracer(conf);
+
     try {
       // If 'local', defer to LocalHBaseCluster instance.  Starts master
       // and regionserver both in the one JVM.

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d3cd087..197aa3c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -91,11 +91,11 @@ import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.RegionTooBusyException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
@@ -149,8 +149,31 @@ import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
+import org.apache.hadoop.hbase.shaded.com.google.common.io.Closeables;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Service;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -172,33 +195,9 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-import org.apache.hadoop.hbase.shaded.com.google.common.io.Closeables;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Service;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
-
 import edu.umd.cs.findbugs.annotations.Nullable;
 
 /**
@@ -3727,6 +3726,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     return batchMutate(new MutationBatchOperation(this, mutations, atomic, nonceGroup, nonce));
   }
 
+  @Override
   public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException {
     return batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
   }
@@ -5560,16 +5560,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
     RowLockContext rowLockContext = null;
     RowLockImpl result = null;
-    TraceScope traceScope = null;
-
-    // If we're tracing start a span to show how long this took.
-    if (Trace.isTracing()) {
-      traceScope = Trace.startSpan("HRegion.getRowLock");
-      traceScope.getSpan().addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock"));
-    }
 
     boolean success = false;
-    try {
+    try (TraceScope scope = TraceUtil.createTrace("HRegion.getRowLock")) {
+      TraceUtil.addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock"));
       // Keep trying until we have a lock or error out.
       // TODO: do we need to add a time component here?
       while (result == null) {
@@ -5598,9 +5592,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       }
 
       if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) {
-        if (traceScope != null) {
-          traceScope.getSpan().addTimelineAnnotation("Failed to get row lock");
-        }
+        TraceUtil.addTimelineAnnotation("Failed to get row lock");
         result = null;
         String message = "Timed out waiting for lock for row: " + rowKey + " in region "
             + getRegionInfo().getEncodedName();
@@ -5618,9 +5610,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
       InterruptedIOException iie = new InterruptedIOException();
       iie.initCause(ie);
-      if (traceScope != null) {
-        traceScope.getSpan().addTimelineAnnotation("Interrupted exception getting row lock");
-      }
+      TraceUtil.addTimelineAnnotation("Interrupted exception getting row lock");
       Thread.currentThread().interrupt();
       throw iie;
     } finally {
@@ -5628,9 +5618,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
       if (!success && rowLockContext != null) {
         rowLockContext.cleanUp();
       }
-      if (traceScope != null) {
-        traceScope.close();
-      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 6ad595f..4c34fe0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -139,6 +139,7 @@ import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.trace.SpanReceiverHost;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
@@ -526,6 +527,7 @@ public class HRegionServer extends HasThread implements
   // Defer till after we register with the Master as much as possible. See #startServices.
   public HRegionServer(Configuration conf) throws IOException {
     super("RegionServer");  // thread name
+    TraceUtil.initTracer(conf);
     try {
       this.startcode = System.currentTimeMillis();
       this.conf = conf;

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
index 1212668..c2e1111 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
@@ -50,6 +51,7 @@ public class HRegionServerCommandLine extends ServerCommandLine {
 
   private int start() throws Exception {
     Configuration conf = getConf();
+    TraceUtil.initTracer(conf);
     try {
       // If 'local', don't start a region server here. Defer to
       // LocalHBaseCluster. It manages 'local' clusters.

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index ae4c8eb..a314848 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -44,6 +44,8 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HasThread;
@@ -51,12 +53,9 @@ import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-
 /**
  * Thread that flushes cache on request
  *
@@ -447,7 +446,7 @@ class MemStoreFlusher implements FlushRequester {
             "store files; delaying flush up to " + this.blockingWaitTime + "ms");
           if (!this.server.compactSplitThread.requestSplit(region)) {
             try {
-              this.server.compactSplitThread.requestSystemCompaction((HRegion) region,
+              this.server.compactSplitThread.requestSystemCompaction(region,
                 Thread.currentThread().getName());
             } catch (IOException e) {
               e = e instanceof RemoteException ?
@@ -572,12 +571,10 @@ class MemStoreFlusher implements FlushRequester {
    * amount of memstore consumption.
    */
   public void reclaimMemStoreMemory() {
-    TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory");
+    TraceScope scope = TraceUtil.createTrace("MemStoreFluser.reclaimMemStoreMemory");
     FlushType flushType = isAboveHighWaterMark();
     if (flushType != FlushType.NORMAL) {
-      if (Trace.isTracing()) {
-        scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark.");
-      }
+      TraceUtil.addTimelineAnnotation("Force Flush. We're above high water mark.");
       long start = EnvironmentEdgeManager.currentTime();
       synchronized (this.blockSignal) {
         boolean blocked = false;
@@ -640,7 +637,9 @@ class MemStoreFlusher implements FlushRequester {
     } else if (isAboveLowWaterMark() != FlushType.NORMAL) {
       wakeupFlushThread();
     }
-    scope.close();
+    if(scope!= null) {
+      scope.close();
+    }
   }
 
   private void logMsg(String string1, long val, long max) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index ad54cab..f7fbd86 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
@@ -72,14 +74,10 @@ import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider.WriterBase;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.NullScope;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-
 import com.lmax.disruptor.RingBuffer;
 
 /**
@@ -681,8 +679,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
    * @throws IOException if there is a problem flushing or closing the underlying FS
    */
   Path replaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException {
-    TraceScope scope = Trace.startSpan("FSHFile.replaceWriter");
-    try {
+    try (TraceScope scope = TraceUtil.createTrace("FSHFile.replaceWriter")) {
       long oldFileLen = doReplaceWriter(oldPath, newPath, nextWriter);
       int oldNumEntries = this.numEntries.getAndSet(0);
       final String newPathString = (null == newPath ? null : CommonFSUtils.getPath(newPath));
@@ -696,16 +693,16 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
         LOG.info("New WAL " + newPathString);
       }
       return newPath;
-    } finally {
-      scope.close();
     }
   }
 
   protected Span blockOnSync(final SyncFuture syncFuture) throws IOException {
     // Now we have published the ringbuffer, halt the current thread until we get an answer back.
     try {
-      syncFuture.get(walSyncTimeoutNs);
-      return syncFuture.getSpan();
+      if (syncFuture != null) {
+        syncFuture.get(walSyncTimeoutNs);
+      }
+      return (syncFuture == null) ? null : syncFuture.getSpan();
     } catch (TimeoutIOException tioe) {
       // SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
       // still refer to it, so if this thread use it next time may get a wrong
@@ -748,8 +745,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
         LOG.debug("WAL closing. Skipping rolling of writer");
         return regionsToFlush;
       }
-      TraceScope scope = Trace.startSpan("FSHLog.rollWriter");
-      try {
+      try (TraceScope scope = TraceUtil.createTrace("FSHLog.rollWriter")) {
         Path oldPath = getOldPath();
         Path newPath = getNewPath();
         // Any exception from here on is catastrophic, non-recoverable so we currently abort.
@@ -774,8 +770,6 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
             "for details.", exception);
       } finally {
         closeBarrier.endOp();
-        assert scope == NullScope.INSTANCE || !scope.isDetached();
-        scope.close();
       }
       return regionsToFlush;
     } finally {
@@ -950,7 +944,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
     if (timeInNanos > this.slowSyncNs) {
       String msg = new StringBuilder().append("Slow sync cost: ").append(timeInNanos / 1000000)
           .append(" ms, current pipeline: ").append(Arrays.toString(getPipeline())).toString();
-      Trace.addTimelineAnnotation(msg);
+      TraceUtil.addTimelineAnnotation(msg);
       LOG.info(msg);
     }
     if (!listeners.isEmpty()) {
@@ -966,16 +960,20 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
     if (this.closed) {
       throw new IOException("Cannot append; log is closed, regionName = " + hri.getRegionNameAsString());
     }
-    TraceScope scope = Trace.startSpan(implClassName + ".append");
     MutableLong txidHolder = new MutableLong();
     MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(() -> {
       txidHolder.setValue(ringBuffer.next());
     });
     long txid = txidHolder.longValue();
-    try {
+    try (TraceScope scope = TraceUtil.createTrace(implClassName + ".append")) {
       FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore);
       entry.stampRegionSequenceId(we);
-      ringBuffer.get(txid).load(entry, scope.detach());
+      if(scope!=null){
+        ringBuffer.get(txid).load(entry, scope.getSpan());
+      }
+      else{
+        ringBuffer.get(txid).load(entry, null);
+      }
     } finally {
       ringBuffer.publish(txid);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index cff3f70..d4e113a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -44,6 +44,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput;
@@ -52,18 +54,14 @@ import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.htrace.NullScope;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.yetus.audience.InterfaceAudience;
-
 import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
 import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
 import org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.SingleThreadEventExecutor;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
 
 import com.lmax.disruptor.RingBuffer;
 import com.lmax.disruptor.Sequence;
@@ -342,9 +340,9 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
   }
 
   private void addTimeAnnotation(SyncFuture future, String annotation) {
-    TraceScope scope = Trace.continueSpan(future.getSpan());
-    Trace.addTimelineAnnotation(annotation);
-    future.setSpan(scope.detach());
+    TraceUtil.addTimelineAnnotation(annotation);
+    //TODO handle htrace API change, see HBASE-18895
+    //future.setSpan(scope.getSpan());
   }
 
   private int finishSyncLowerThanTxid(long txid, boolean addSyncTrace) {
@@ -415,14 +413,16 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
       Span span = entry.detachSpan();
       // the span maybe null if this is a retry after rolling.
       if (span != null) {
-        TraceScope scope = Trace.continueSpan(span);
+        //TODO handle htrace API change, see HBASE-18895
+        //TraceScope scope = Trace.continueSpan(span);
         try {
           appended = append(writer, entry);
         } catch (IOException e) {
           throw new AssertionError("should not happen", e);
         } finally {
-          assert scope == NullScope.INSTANCE || !scope.isDetached();
-          scope.close(); // append scope is complete
+          //TODO handle htrace API change, see HBASE-18895
+          //assert scope == NullScope.INSTANCE || !scope.isDetached();
+          //scope.close(); // append scope is complete
         }
       } else {
         try {
@@ -559,24 +559,26 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
 
   @Override
   public void sync() throws IOException {
-    TraceScope scope = Trace.startSpan("AsyncFSWAL.sync");
-    try {
+    try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")){
       long txid = waitingConsumePayloads.next();
-      SyncFuture future;
+      SyncFuture future = null;
       try {
-        future = getSyncFuture(txid, scope.detach());
-        RingBufferTruck truck = waitingConsumePayloads.get(txid);
-        truck.load(future);
+        if (scope != null) {
+          future = getSyncFuture(txid, scope.getSpan());
+          RingBufferTruck truck = waitingConsumePayloads.get(txid);
+          truck.load(future);
+        }
       } finally {
         waitingConsumePayloads.publish(txid);
       }
       if (shouldScheduleConsumer()) {
         eventLoop.execute(consumer);
       }
-      scope = Trace.continueSpan(blockOnSync(future));
-    } finally {
-      assert scope == NullScope.INSTANCE || !scope.isDetached();
-      scope.close();
+      //TODO handle htrace API change, see HBASE-18895
+      //scope = Trace.continueSpan(blockOnSync(future));
+      if (future != null) {
+        blockOnSync(future);
+      }
     }
   }
 
@@ -585,25 +587,27 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
     if (highestSyncedTxid.get() >= txid) {
       return;
     }
-    TraceScope scope = Trace.startSpan("AsyncFSWAL.sync");
-    try {
+    try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")) {
       // here we do not use ring buffer sequence as txid
       long sequence = waitingConsumePayloads.next();
-      SyncFuture future;
+      SyncFuture future = null;
       try {
-        future = getSyncFuture(txid, scope.detach());
-        RingBufferTruck truck = waitingConsumePayloads.get(sequence);
-        truck.load(future);
+        if(scope!= null) {
+          future = getSyncFuture(txid, scope.getSpan());
+          RingBufferTruck truck = waitingConsumePayloads.get(sequence);
+          truck.load(future);
+        }
       } finally {
         waitingConsumePayloads.publish(sequence);
       }
       if (shouldScheduleConsumer()) {
         eventLoop.execute(consumer);
       }
-      scope = Trace.continueSpan(blockOnSync(future));
-    } finally {
-      assert scope == NullScope.INSTANCE || !scope.isDetached();
-      scope.close();
+      //TODO handle htrace API change, see HBASE-18895
+      //scope = Trace.continueSpan(blockOnSync(future));
+      if (future != null) {
+        blockOnSync(future);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index cc9601b..c4e23da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -54,10 +55,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.htrace.NullScope;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 
@@ -345,7 +344,7 @@ public class FSHLog extends AbstractFSWAL<Writer> {
           // use assert to make sure no change breaks the logic that
           // sequence and zigzagLatch will be set together
           assert sequence > 0L : "Failed to get sequence from ring buffer";
-          Trace.addTimelineAnnotation("awaiting safepoint");
+          TraceUtil.addTimelineAnnotation("awaiting safepoint");
           syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer(sequence));
         }
       } catch (FailedSyncBeforeLogCloseException e) {
@@ -361,9 +360,9 @@ public class FSHLog extends AbstractFSWAL<Writer> {
       if (this.writer != null) {
         oldFileLen = this.writer.getLength();
         try {
-          Trace.addTimelineAnnotation("closing writer");
+          TraceUtil.addTimelineAnnotation("closing writer");
           this.writer.close();
-          Trace.addTimelineAnnotation("writer closed");
+          TraceUtil.addTimelineAnnotation("writer closed");
           this.closeErrorCount.set(0);
         } catch (IOException ioe) {
           int errors = closeErrorCount.incrementAndGet();
@@ -595,13 +594,14 @@ public class FSHLog extends AbstractFSWAL<Writer> {
           }
           // I got something. Lets run. Save off current sequence number in case it changes
           // while we run.
-          TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
+          //TODO handle htrace API change, see HBASE-18895
+          //TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
           long start = System.nanoTime();
           Throwable lastException = null;
           try {
-            Trace.addTimelineAnnotation("syncing writer");
+            TraceUtil.addTimelineAnnotation("syncing writer");
             writer.sync();
-            Trace.addTimelineAnnotation("writer synced");
+            TraceUtil.addTimelineAnnotation("writer synced");
             currentSequence = updateHighestSyncedSequence(currentSequence);
           } catch (IOException e) {
             LOG.error("Error syncing, request close of WAL", e);
@@ -611,7 +611,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
             lastException = e;
           } finally {
             // reattach the span to the future before releasing.
-            takeSyncFuture.setSpan(scope.detach());
+            //TODO handle htrace API change, see HBASE-18895
+            // takeSyncFuture.setSpan(scope.getSpan());
             // First release what we 'took' from the queue.
             syncCount += releaseSyncFuture(takeSyncFuture, currentSequence, lastException);
             // Can we release other syncs?
@@ -727,8 +728,15 @@ public class FSHLog extends AbstractFSWAL<Writer> {
   }
 
   // Sync all known transactions
-  private Span publishSyncThenBlockOnCompletion(Span span) throws IOException {
-    return blockOnSync(publishSyncOnRingBuffer(span));
+  private void publishSyncThenBlockOnCompletion(TraceScope scope) throws IOException {
+    if (scope != null) {
+      SyncFuture syncFuture = publishSyncOnRingBuffer(scope.getSpan());
+      blockOnSync(syncFuture);
+    }
+    else {
+      SyncFuture syncFuture = publishSyncOnRingBuffer(null);
+      blockOnSync(syncFuture);
+    }
   }
 
   /**
@@ -754,12 +762,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
 
   @Override
   public void sync() throws IOException {
-    TraceScope scope = Trace.startSpan("FSHLog.sync");
-    try {
-      scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
-    } finally {
-      assert scope == NullScope.INSTANCE || !scope.isDetached();
-      scope.close();
+    try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) {
+      publishSyncThenBlockOnCompletion(scope);
     }
   }
 
@@ -769,12 +773,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
       // Already sync'd.
       return;
     }
-    TraceScope scope = Trace.startSpan("FSHLog.sync");
-    try {
-      scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
-    } finally {
-      assert scope == NullScope.INSTANCE || !scope.isDetached();
-      scope.close();
+    try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) {
+      publishSyncThenBlockOnCompletion(scope);
     }
   }
 
@@ -996,7 +996,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
           }
         } else if (truck.type() == RingBufferTruck.Type.APPEND) {
           FSWALEntry entry = truck.unloadAppend();
-          TraceScope scope = Trace.continueSpan(entry.detachSpan());
+          //TODO handle htrace API change, see HBASE-18895
+          //TraceScope scope = Trace.continueSpan(entry.detachSpan());
           try {
 
             if (this.exception != null) {
@@ -1015,9 +1016,6 @@ public class FSHLog extends AbstractFSWAL<Writer> {
                     : new DamagedWALException("On sync", this.exception));
             // Return to keep processing events coming off the ringbuffer
             return;
-          } finally {
-            assert scope == NullScope.INSTANCE || !scope.isDetached();
-            scope.close(); // append scope is complete
           }
         } else {
           // What is this if not an append or sync. Fail all up to this!!!

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
index 03ef008..debe9e4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.htrace.Span;
+import org.apache.htrace.core.Span;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java
index a63b281..021f6a1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java
@@ -18,8 +18,8 @@
  */
 package org.apache.hadoop.hbase.regionserver.wal;
 
+import org.apache.htrace.core.Span;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.htrace.Span;
 
 /**
  * A 'truck' to carry a payload across the ring buffer from Handler to WAL. Has EITHER a

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
index 13d103b..0dbd020 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
@@ -22,7 +22,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-import org.apache.htrace.Span;
+import org.apache.htrace.core.Span;
 
 /**
  * A Future on a filesystem sync call. It given to a client or 'Handler' for it to wait on till the

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 0a1c60f..3b3d568 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.Waiter.Predicate;
 import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.BufferedMutator;
@@ -657,6 +658,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
         setLevel(org.apache.log4j.Level.ERROR);
 
+    TraceUtil.initTracer(conf);
 
     this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
       true, null, null, hosts, null);
@@ -1125,6 +1127,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
     }
 
     Configuration c = new Configuration(this.conf);
+    TraceUtil.initTracer(c);
     this.hbaseCluster =
         new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
     // Don't leave here till we've done a successful scan of the hbase:meta

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
index bb91770..8521e65 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
@@ -563,7 +563,7 @@ public class TestSimpleRpcScheduler {
     ServerCall putCall = new ServerCall(1, null, null,
         RPCProtos.RequestHeader.newBuilder().setMethodName("mutate").build(),
         RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))),
-        null, null, 9, null, null, timestamp, 0, null, null, null) {
+        null, null, 9, null, timestamp, 0, null, null, null) {
 
       @Override
       public void sendResponseIfReady() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java
index 63bcbdf..33f77ef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java
@@ -18,25 +18,21 @@
 package org.apache.hadoop.hbase.trace;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
-import java.lang.reflect.Method;
-import java.util.Collection;
-
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.TraceTree;
-import org.apache.htrace.impl.POJOSpanReceiver;
+import org.apache.htrace.core.POJOSpanReceiver;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.SpanId;
+import org.apache.htrace.core.TraceScope;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -44,103 +40,84 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.List;
+
 @Category({MiscTests.class, MediumTests.class})
 public class TestHTraceHooks {
 
   private static final byte[] FAMILY_BYTES = "family".getBytes();
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private static POJOSpanReceiver rcvr;
-  private static long ROOT_SPAN_ID = 0;
+  private static SpanId ROOT_SPAN_ID = new SpanId(0, 0);
 
   @Rule
   public TestName name = new TestName();
 
   @BeforeClass
   public static void before() throws Exception {
-
-    // Find out what the right value to use fo SPAN_ROOT_ID after HTRACE-111. We use HTRACE-32
-    // to find out to detect if we are using HTrace 3.2 or not.
-    try {
-        Method m = Span.class.getMethod("addKVAnnotation", String.class, String.class);
-    } catch (NoSuchMethodException e) {
-      ROOT_SPAN_ID = 0x74aceL; // Span.SPAN_ROOT_ID pre HTrace-3.2
-    }
-
     TEST_UTIL.startMiniCluster(2, 3);
     rcvr = new POJOSpanReceiver(new HBaseHTraceConfiguration(TEST_UTIL.getConfiguration()));
-    Trace.addReceiver(rcvr);
+    TraceUtil.addReceiver(rcvr);
+    TraceUtil.addSampler(new Sampler() {
+      @Override
+      public boolean next() {
+        return true;
+      }
+    });
   }
 
   @AfterClass
   public static void after() throws Exception {
     TEST_UTIL.shutdownMiniCluster();
-    Trace.removeReceiver(rcvr);
+    TraceUtil.removeReceiver(rcvr);
     rcvr = null;
   }
 
   @Test
   public void testTraceCreateTable() throws Exception {
-    TraceScope tableCreationSpan = Trace.startSpan("creating table", Sampler.ALWAYS);
     Table table;
-    try {
-
+    Span createTableSpan;
+    try (TraceScope scope = TraceUtil.createTrace("creating table")) {
+      createTableSpan = scope.getSpan();
       table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), FAMILY_BYTES);
-    } finally {
-      tableCreationSpan.close();
     }
 
     // Some table creation is async.  Need to make sure that everything is full in before
     // checking to see if the spans are there.
-    TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
-      @Override
-      public boolean evaluate() throws Exception {
-        return rcvr.getSpans().size() >= 5;
+    TEST_UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
+      @Override public boolean evaluate() throws Exception {
+        return (rcvr == null) ? true : rcvr.getSpans().size() >= 5;
       }
     });
 
-    Collection<Span> spans = rcvr.getSpans();
+    Collection<Span> spans = Sets.newHashSet(rcvr.getSpans());
+    List<Span> roots = new LinkedList<>();
     TraceTree traceTree = new TraceTree(spans);
-    Collection<Span> roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID);
-
-    assertEquals(1, roots.size());
-    Span createTableRoot = roots.iterator().next();
+    roots.addAll(traceTree.getSpansByParent().find(createTableSpan.getSpanId()));
 
-    assertEquals("creating table", createTableRoot.getDescription());
+    assertEquals(3, roots.size());
+    assertEquals("creating table", createTableSpan.getDescription());
 
-    int createTableCount = 0;
-
-    for (Span s : traceTree.getSpansByParent().find(createTableRoot.getSpanId())) {
-      if (s.getDescription().startsWith("MasterService.CreateTable")) {
-        createTableCount++;
-      }
+    if (spans != null) {
+      assertTrue(spans.size() > 5);
     }
 
-    assertTrue(createTableCount >= 1);
-    assertTrue(traceTree.getSpansByParent().find(createTableRoot.getSpanId()).size() > 3);
-    assertTrue(spans.size() > 5);
-    
     Put put = new Put("row".getBytes());
     put.addColumn(FAMILY_BYTES, "col".getBytes(), "value".getBytes());
 
-    TraceScope putSpan = Trace.startSpan("doing put", Sampler.ALWAYS);
-    try {
+    Span putSpan;
+
+    try (TraceScope scope = TraceUtil.createTrace("doing put")) {
+      putSpan = scope.getSpan();
       table.put(put);
-    } finally {
-      putSpan.close();
     }
 
     spans = rcvr.getSpans();
     traceTree = new TraceTree(spans);
-    roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID);
-
-    assertEquals(2, roots.size());
-    Span putRoot = null;
-    for (Span root : roots) {
-      if (root.getDescription().equals("doing put")) {
-        putRoot = root;
-      }
-    }
-    
-    assertNotNull(putRoot);
+    roots.clear();
+    roots.addAll(traceTree.getSpansByParent().find(putSpan.getSpanId()));
+    assertEquals(1, roots.size());
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java
new file mode 100644
index 0000000..bba4ee5
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.trace;
+
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.SpanId;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Used to create the graph formed by spans.
+ */
+public class TraceTree {
+
+  public static class SpansByParent {
+    private final Set<Span> set;
+
+    private final HashMap<SpanId, LinkedList<Span>> parentToSpans;
+
+    SpansByParent(Collection<Span> spans) {
+      set = new LinkedHashSet<Span>();
+      parentToSpans = new HashMap<SpanId, LinkedList<Span>>();
+      if(spans == null) {
+        return;
+      }
+      for (Span span : spans) {
+        set.add(span);
+        for (SpanId parent : span.getParents()) {
+          LinkedList<Span> list = parentToSpans.get(parent);
+          if (list == null) {
+            list = new LinkedList<Span>();
+            parentToSpans.put(parent, list);
+          }
+          list.add(span);
+        }
+        if (span.getParents().length == 0) {
+          LinkedList<Span> list = parentToSpans.get(Long.valueOf(0L));
+          if (list == null) {
+            list = new LinkedList<Span>();
+            parentToSpans.put(new SpanId(Long.MIN_VALUE, Long.MIN_VALUE), list);
+          }
+          list.add(span);
+        }
+      }
+
+    }
+
+    public List<Span> find(SpanId parentId) {
+      LinkedList<Span> spans = parentToSpans.get(parentId);
+      if (spans == null) {
+        return new LinkedList<Span>();
+      }
+      return spans;
+    }
+
+    public Iterator<Span> iterator() {
+      return Collections.unmodifiableSet(set).iterator();
+    }
+  }
+
+  public static class SpansByProcessId {
+    private final Set<Span> set;
+
+    SpansByProcessId(Collection<Span> spans) {
+      set = new LinkedHashSet<Span>();
+      if(spans == null) {
+        return;
+      }
+      for (Span span : spans) {
+        set.add(span);
+      }
+    }
+
+    public Iterator<Span> iterator() {
+      return Collections.unmodifiableSet(set).iterator();
+    }
+  }
+
+  private final SpansByParent spansByParent;
+  private final SpansByProcessId spansByProcessId;
+
+  /**
+   * Create a new TraceTree
+   *
+   * @param spans The collection of spans to use to create this TraceTree. Should
+   *              have at least one root span.
+   */
+  public TraceTree(Collection<Span> spans) {
+    this.spansByParent = new SpansByParent(spans);
+    this.spansByProcessId = new SpansByProcessId(spans);
+  }
+
+  public SpansByParent getSpansByParent() {
+    return spansByParent;
+  }
+
+  public SpansByProcessId getSpansByProcessId() {
+    return spansByProcessId;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder bld = new StringBuilder();
+    String prefix = "";
+    for (Iterator<Span> iter = spansByParent.iterator(); iter.hasNext();) {
+      Span span = iter.next();
+      bld.append(prefix).append(span.toString());
+      prefix = "\n";
+    }
+    return bld.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
index 75aba03..9a5c19d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
@@ -58,16 +58,17 @@ import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogWriter;
 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
 import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
 import org.apache.hadoop.hbase.trace.SpanReceiverHost;
+import org.apache.hadoop.hbase.trace.TraceUtil;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WALProvider.Writer;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.impl.ProbabilitySampler;
+import org.apache.htrace.core.ProbabilitySampler;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import com.codahale.metrics.ConsoleReporter;
@@ -172,15 +173,13 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
       Random rand = new Random(Thread.currentThread().getId());
       WAL wal = region.getWAL();
 
-      TraceScope threadScope =
-        Trace.startSpan("WALPerfEval." + Thread.currentThread().getName());
-      try {
+      try (TraceScope threadScope = TraceUtil.createTrace("WALPerfEval." + Thread.currentThread().getName())) {
         long startTime = System.currentTimeMillis();
         int lastSync = 0;
+        TraceUtil.addSampler(loopSampler);
         for (int i = 0; i < numIterations; ++i) {
-          assert Trace.currentSpan() == threadScope.getSpan() : "Span leak detected.";
-          TraceScope loopScope = Trace.startSpan("runLoopIter" + i, loopSampler);
-          try {
+          assert Tracer.getCurrentSpan() == threadScope.getSpan() : "Span leak detected.";
+          try (TraceScope loopScope = TraceUtil.createTrace("runLoopIter" + i)) {
             long now = System.nanoTime();
             Put put = setupPut(rand, key, value, numFamilies);
             WALEdit walEdit = new WALEdit();
@@ -196,16 +195,12 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
               }
             }
             latencyHistogram.update(System.nanoTime() - now);
-          } finally {
-            loopScope.close();
           }
         }
         long totalTime = (System.currentTimeMillis() - startTime);
         logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime);
       } catch (Exception e) {
         LOG.error(getClass().getSimpleName() + " Thread failed", e);
-      } finally {
-        threadScope.close();
       }
     }
   }
@@ -315,8 +310,9 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
     LOG.info("FileSystem: " + fs);
 
     SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null;
-    final Sampler<?> sampler = trace ? Sampler.ALWAYS : Sampler.NEVER;
-    TraceScope scope = Trace.startSpan("WALPerfEval", sampler);
+    final Sampler sampler = trace ? Sampler.ALWAYS : Sampler.NEVER;
+    TraceUtil.addSampler(sampler);
+    TraceScope scope = TraceUtil.createTrace("WALPerfEval");
 
     try {
       if (rootRegionDir == null) {
@@ -338,8 +334,8 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
           // a table per desired region means we can avoid carving up the key space
           final HTableDescriptor htd = createHTableDescriptor(i, numFamilies);
           regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller);
-          benchmarks[i] = Trace.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
-              syncInterval, traceFreq));
+          benchmarks[i] = TraceUtil.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
+              syncInterval, traceFreq), "");
         }
         ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).
           outputTo(System.out).convertRatesTo(TimeUnit.SECONDS).filter(MetricFilter.ALL).build();
@@ -389,9 +385,15 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
       }
     } finally {
       // We may be called inside a test that wants to keep on using the fs.
-      if (!noclosefs) fs.close();
-      scope.close();
-      if (receiverHost != null) receiverHost.closeReceivers();
+      if (!noclosefs) {
+        fs.close();
+      }
+      if (scope != null) {
+        scope.close();
+      }
+      if (receiverHost != null) {
+        receiverHost.closeReceivers();
+      }
     }
 
     return(0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-shell/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index 19ad91b..f42594b 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -305,6 +305,12 @@
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
@@ -353,6 +359,10 @@
           <artifactId>hadoop-hdfs</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
@@ -365,6 +375,10 @@
           <scope>test</scope>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
@@ -376,6 +390,10 @@
           <scope>test</scope>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>
@@ -429,12 +447,22 @@
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-common</artifactId>
+          <exclusions>
+            <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+          </exclusions>
         </dependency>
         <dependency>
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-minicluster</artifactId>
           <exclusions>
             <exclusion>
+              <groupId>org.apache.htrace</groupId>
+              <artifactId>htrace-core</artifactId>
+            </exclusion>
+            <exclusion>
               <groupId>com.google.guava</groupId>
               <artifactId>guava</artifactId>
             </exclusion>

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a69ebc7/hbase-shell/src/main/ruby/shell/commands/trace.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/trace.rb b/hbase-shell/src/main/ruby/shell/commands/trace.rb
index 5ecd28c..b0350be 100644
--- a/hbase-shell/src/main/ruby/shell/commands/trace.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/trace.rb
@@ -16,8 +16,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-HTrace = org.apache.htrace.Trace
-java_import org.apache.htrace.Sampler
+HTrace = org.apache.htrace.core.Tracer
+java_import org.apache.htrace.core.Sampler
 java_import org.apache.hadoop.hbase.trace.SpanReceiverHost
 
 module Shell