You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@servicecomb.apache.org by li...@apache.org on 2020/04/07 00:21:59 UTC

[servicecomb-java-chassis] 02/03: [SCB-1850]add logs to find out unknown error of unit test

This is an automated email from the ASF dual-hosted git repository.

liubao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/servicecomb-java-chassis.git

commit 858251104f6838c62ccb51a2a8db75510ea53626
Author: liubao <bi...@qq.com>
AuthorDate: Fri Apr 3 17:37:43 2020 +0800

    [SCB-1850]add logs to find out unknown error of unit test
---
 demo/docker-build-config/pom.xml                   |   2 +-
 .../core/publish/TestDefaultLogPublisher.java      | 309 +++++++++++----------
 .../publish/TestThreadPoolPublishModelFactory.java |  22 +-
 3 files changed, 174 insertions(+), 159 deletions(-)

diff --git a/demo/docker-build-config/pom.xml b/demo/docker-build-config/pom.xml
index 6e1c028..0b4acee 100644
--- a/demo/docker-build-config/pom.xml
+++ b/demo/docker-build-config/pom.xml
@@ -52,7 +52,7 @@
                     <descriptor>${root.basedir}/demo/assembly/assembly.xml</descriptor>
                   </assembly>
                   <entryPoint>
-                    <shell>java -Xmx64m $JAVA_OPTS -jar $JAR_PATH</shell>
+                    <shell>java -Xmx128m $JAVA_OPTS -jar $JAR_PATH</shell>
                   </entryPoint>
                 </build>
               </image>
diff --git a/metrics/metrics-core/src/test/java/org/apache/servicecomb/metrics/core/publish/TestDefaultLogPublisher.java b/metrics/metrics-core/src/test/java/org/apache/servicecomb/metrics/core/publish/TestDefaultLogPublisher.java
index 9c1d4df..f0948fc 100644
--- a/metrics/metrics-core/src/test/java/org/apache/servicecomb/metrics/core/publish/TestDefaultLogPublisher.java
+++ b/metrics/metrics-core/src/test/java/org/apache/servicecomb/metrics/core/publish/TestDefaultLogPublisher.java
@@ -45,7 +45,9 @@ import org.apache.servicecomb.metrics.core.publish.model.invocation.PerfInfo;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.FixMethodOrder;
 import org.junit.Test;
+import org.junit.runners.MethodSorters;
 
 import com.google.common.eventbus.EventBus;
 import com.netflix.spectator.api.Measurement;
@@ -56,6 +58,7 @@ import mockit.Mock;
 import mockit.MockUp;
 import mockit.Mocked;
 
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
 public class TestDefaultLogPublisher {
   GlobalRegistry globalRegistry = new GlobalRegistry();
 
@@ -132,158 +135,162 @@ public class TestDefaultLogPublisher {
   }
 
   @Test
-
   public void onPolledEvent(@Mocked VertxImpl vertxImpl, @Mocked MeasurementTree tree,
       @Mocked GlobalRegistry globalRegistry, @Mocked EventBus eventBus, @Mocked MetricsBootstrapConfig config) {
-    ArchaiusUtils.setProperty("servicecomb.metrics.publisher.defaultLog.enabled", true);
-    ArchaiusUtils.setProperty("servicecomb.metrics.invocation.latencyDistribution", "0,1,100");
-    publisher.init(globalRegistry, eventBus, config);
-    new Expectations(VertxUtils.class) {
-      {
-        VertxUtils.getVertxMap();
-        result = Collections.singletonMap("v", vertxImpl);
-        // TODO will be fixed by next vertx update.
-//        vertxImpl.getEventLoopContextCreatedCount();;
-//        result = 1;
-      }
-    };
-    DefaultPublishModel model = new DefaultPublishModel();
-    PerfInfo perfTotal = new PerfInfo();
-    perfTotal.setTps(10_0000);
-    perfTotal.setMsTotalTime(30000L * 1_0000);
-    perfTotal.setMsMaxLatency(30000);
-    OperationPerf operationPerf = new OperationPerf();
-    operationPerf.setOperation("op");
-    operationPerf.setLatencyDistribution(new Integer[] {12, 120, 1200});
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_TOTAL, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_EXECUTOR_QUEUE, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_EXECUTION, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_PREPARE, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_HANDLERS_REQUEST, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_HANDLERS_RESPONSE, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_CLIENT_FILTERS_REQUEST, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_CLIENT_FILTERS_RESPONSE, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_CONSUMER_SEND_REQUEST, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_PRODUCER_SEND_RESPONSE, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_CONSUMER_GET_CONNECTION, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_CONSUMER_WRITE_TO_BUF, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_CONSUMER_WAIT_RESPONSE, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_CONSUMER_WAKE_CONSUMER, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_SERVER_FILTERS_REQUEST, perfTotal);
-    operationPerf.getStages().put(MeterInvocationConst.STAGE_SERVER_FILTERS_RESPONSE, perfTotal);
-
-    OperationPerfGroup operationPerfGroup = new OperationPerfGroup(Const.RESTFUL, Status.OK.name());
-    operationPerfGroup.addOperationPerf(operationPerf);
-
-    OperationPerfGroups operationPerfGroups = new OperationPerfGroups();
-    operationPerfGroups.getGroups().put(operationPerfGroup.getTransport(),
-        Collections.singletonMap(operationPerfGroup.getStatus(), operationPerfGroup));
-    model.getConsumer().setOperationPerfGroups(operationPerfGroups);
-    model.getProducer().setOperationPerfGroups(operationPerfGroups);
-    model.getEdge().setOperationPerfGroups(operationPerfGroups);
-
-    model.getThreadPools().put("test", new ThreadPoolPublishModel());
-    Measurement measurement = new Measurement(null, 0L, 1.0);
-
-    MeasurementNode measurementNodeCpuAll = new MeasurementNode("allProcess", new HashMap<>());
-    MeasurementNode measurementNodeCpuProcess = new MeasurementNode("currentProcess", new HashMap<>());
-    MeasurementNode measurementNodeSend = new MeasurementNode("send", new HashMap<>());
-    MeasurementNode measurementNodeSendPacket = new MeasurementNode("sendPackets", new HashMap<>());
-    MeasurementNode measurementNodeRecv = new MeasurementNode("receive", new HashMap<>());
-    MeasurementNode measurementNodeRecvPacket = new MeasurementNode("receivePackets", new HashMap<>());
-    MeasurementNode measurementNodeEth0 = new MeasurementNode("eth0", new HashMap<>());
-    MeasurementNode measurementNodeNet = new MeasurementNode("net", new HashMap<>());
-    MeasurementNode measurementNodeOs = new MeasurementNode("os", new HashMap<>());
-
-    measurementNodeSend.getMeasurements().add(measurement);
-    measurementNodeRecv.getMeasurements().add(measurement);
-    measurementNodeCpuAll.getMeasurements().add(measurement);
-    measurementNodeCpuProcess.getMeasurements().add(measurement);
-    measurementNodeRecvPacket.getMeasurements().add(measurement);
-    measurementNodeSendPacket.getMeasurements().add(measurement);
-
-    measurementNodeEth0.getChildren().put("send", measurementNodeSend);
-    measurementNodeEth0.getChildren().put("receive", measurementNodeRecv);
-    measurementNodeEth0.getChildren().put("receivePackets", measurementNodeRecvPacket);
-    measurementNodeEth0.getChildren().put("sendPackets", measurementNodeSendPacket);
-
-    measurementNodeNet.getChildren().put("eth0", measurementNodeEth0);
-    measurementNodeOs.getChildren().put("cpu", measurementNodeCpuAll);
-    measurementNodeOs.getChildren().put("processCpu", measurementNodeCpuProcess);
-    measurementNodeOs.getChildren().put("net", measurementNodeNet);
-
-    measurementNodeOs.getMeasurements().add(measurement);
-    measurementNodeNet.getMeasurements().add(measurement);
-    measurementNodeEth0.getMeasurements().add(measurement);
-
-    new MockUp<PublishModelFactory>() {
-      @Mock
-      DefaultPublishModel createDefaultPublishModel() {
-        return model;
-      }
-
-      @Mock
-      MeasurementTree getTree() {
-        return tree;
-      }
-    };
-    new Expectations() {
-      {
-        tree.findChild(OsMeter.OS_NAME);
-        result = measurementNodeOs;
-      }
-    };
-    publisher.onPolledEvent(new PolledEvent(Collections.emptyList(), Collections.emptyList()));
-    List<LoggingEvent> events = collector.getEvents().stream()
-        .filter(e -> DefaultLogPublisher.class.getName().equals(e.getLoggerName())).collect(Collectors.toList());
-    LoggingEvent event = events.get(0);
-    Assert.assertEquals("\n"
-            + "os:\n"
-            + "  cpu:\n"
-            + "    all usage: 100.00%    all idle: 0.00%    process: 100.00%\n"
-            + "  net:\n"
-            + "    send(Bps)    recv(Bps)    send(pps)    recv(pps)    interface\n"
-            + "    1            1            1            1            eth0\n"
-            + "vertx:\n"
-            + "  instances:\n"
-            + "    name       eventLoopContext-created\n"
-            + "    v          0\n"
-            + "threadPool:\n"
-            + "  coreSize maxThreads poolSize currentBusy rejected queueSize taskCount taskFinished name\n"
-            + "  0        0          0        0           NaN      0         0.0       0.0          test\n"
-            + "consumer:\n"
-            + " simple:\n"
-            + "  status      tps      latency            [0,1)  [1,100) [100,) operation\n"
-            + "  rest.OK     100000.0 3000.000/30000.000 12     120     1200   op\n"
-            + "              100000.0 3000.000/30000.000 12     120     1200   (summary)\n"
-            + " details:\n"
-            + "    rest.OK:\n"
-            + "      op:\n"
-            + "        prepare     : 3000.000/30000.000 handlersReq : 3000.000/30000.000 cFiltersReq: 3000.000/30000.000 sendReq     : 3000.000/30000.000\n"
-            + "        getConnect  : 3000.000/30000.000 writeBuf    : 3000.000/30000.000 waitResp   : 3000.000/30000.000 wakeConsumer: 3000.000/30000.000\n"
-            + "        cFiltersResp: 3000.000/30000.000 handlersResp: 3000.000/30000.000\n"
-            + "producer:\n"
-            + " simple:\n"
-            + "  status      tps      latency            [0,1)  [1,100) [100,) operation\n"
-            + "  rest.OK     100000.0 3000.000/30000.000 12     120     1200   op\n"
-            + "              100000.0 3000.000/30000.000 12     120     1200   (summary)\n"
-            + " details:\n"
-            + "    rest.OK:\n"
-            + "      op:\n"
-            + "        prepare: 3000.000/30000.000 queue       : 3000.000/30000.000 filtersReq : 3000.000/30000.000 handlersReq: 3000.000/30000.000\n"
-            + "        execute: 3000.000/30000.000 handlersResp: 3000.000/30000.000 filtersResp: 3000.000/30000.000 sendResp   : 3000.000/30000.000\n"
-            + "edge:\n"
-            + " simple:\n"
-            + "  status      tps      latency            [0,1)  [1,100) [100,) operation\n"
-            + "  rest.OK     100000.0 3000.000/30000.000 12     120     1200   op\n"
-            + "              100000.0 3000.000/30000.000 12     120     1200   (summary)\n"
-            + " details:\n"
-            + "    rest.OK:\n"
-            + "      op:\n"
-            + "        prepare     : 3000.000/30000.000 queue       : 3000.000/30000.000 sFiltersReq : 3000.000/30000.000 handlersReq : 3000.000/30000.000\n"
-            + "        cFiltersReq : 3000.000/30000.000 sendReq     : 3000.000/30000.000 getConnect  : 3000.000/30000.000 writeBuf    : 3000.000/30000.000\n"
-            + "        waitResp    : 3000.000/30000.000 wakeConsumer: 3000.000/30000.000 cFiltersResp: 3000.000/30000.000 handlersResp: 3000.000/30000.000\n"
-            + "        sFiltersResp: 3000.000/30000.000 sendResp    : 3000.000/30000.000\n",
-        event.getMessage());
+    try {
+      ArchaiusUtils.setProperty("servicecomb.metrics.publisher.defaultLog.enabled", true);
+      ArchaiusUtils.setProperty("servicecomb.metrics.invocation.latencyDistribution", "0,1,100");
+      publisher.init(globalRegistry, eventBus, config);
+      new Expectations(VertxUtils.class) {
+        {
+          VertxUtils.getVertxMap();
+          result = Collections.singletonMap("v", vertxImpl);
+          // TODO will be fixed by next vertx update.
+          //        vertxImpl.getEventLoopContextCreatedCount();;
+          //        result = 1;
+        }
+      };
+      DefaultPublishModel model = new DefaultPublishModel();
+      PerfInfo perfTotal = new PerfInfo();
+      perfTotal.setTps(10_0000);
+      perfTotal.setMsTotalTime(30000L * 1_0000);
+      perfTotal.setMsMaxLatency(30000);
+      OperationPerf operationPerf = new OperationPerf();
+      operationPerf.setOperation("op");
+      operationPerf.setLatencyDistribution(new Integer[] {12, 120, 1200});
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_TOTAL, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_EXECUTOR_QUEUE, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_EXECUTION, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_PREPARE, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_HANDLERS_REQUEST, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_HANDLERS_RESPONSE, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_CLIENT_FILTERS_REQUEST, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_CLIENT_FILTERS_RESPONSE, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_CONSUMER_SEND_REQUEST, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_PRODUCER_SEND_RESPONSE, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_CONSUMER_GET_CONNECTION, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_CONSUMER_WRITE_TO_BUF, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_CONSUMER_WAIT_RESPONSE, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_CONSUMER_WAKE_CONSUMER, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_SERVER_FILTERS_REQUEST, perfTotal);
+      operationPerf.getStages().put(MeterInvocationConst.STAGE_SERVER_FILTERS_RESPONSE, perfTotal);
+
+      OperationPerfGroup operationPerfGroup = new OperationPerfGroup(Const.RESTFUL, Status.OK.name());
+      operationPerfGroup.addOperationPerf(operationPerf);
+
+      OperationPerfGroups operationPerfGroups = new OperationPerfGroups();
+      operationPerfGroups.getGroups().put(operationPerfGroup.getTransport(),
+          Collections.singletonMap(operationPerfGroup.getStatus(), operationPerfGroup));
+      model.getConsumer().setOperationPerfGroups(operationPerfGroups);
+      model.getProducer().setOperationPerfGroups(operationPerfGroups);
+      model.getEdge().setOperationPerfGroups(operationPerfGroups);
+
+      model.getThreadPools().put("test", new ThreadPoolPublishModel());
+      Measurement measurement = new Measurement(null, 0L, 1.0);
+
+      MeasurementNode measurementNodeCpuAll = new MeasurementNode("allProcess", new HashMap<>());
+      MeasurementNode measurementNodeCpuProcess = new MeasurementNode("currentProcess", new HashMap<>());
+      MeasurementNode measurementNodeSend = new MeasurementNode("send", new HashMap<>());
+      MeasurementNode measurementNodeSendPacket = new MeasurementNode("sendPackets", new HashMap<>());
+      MeasurementNode measurementNodeRecv = new MeasurementNode("receive", new HashMap<>());
+      MeasurementNode measurementNodeRecvPacket = new MeasurementNode("receivePackets", new HashMap<>());
+      MeasurementNode measurementNodeEth0 = new MeasurementNode("eth0", new HashMap<>());
+      MeasurementNode measurementNodeNet = new MeasurementNode("net", new HashMap<>());
+      MeasurementNode measurementNodeOs = new MeasurementNode("os", new HashMap<>());
+
+      measurementNodeSend.getMeasurements().add(measurement);
+      measurementNodeRecv.getMeasurements().add(measurement);
+      measurementNodeCpuAll.getMeasurements().add(measurement);
+      measurementNodeCpuProcess.getMeasurements().add(measurement);
+      measurementNodeRecvPacket.getMeasurements().add(measurement);
+      measurementNodeSendPacket.getMeasurements().add(measurement);
+
+      measurementNodeEth0.getChildren().put("send", measurementNodeSend);
+      measurementNodeEth0.getChildren().put("receive", measurementNodeRecv);
+      measurementNodeEth0.getChildren().put("receivePackets", measurementNodeRecvPacket);
+      measurementNodeEth0.getChildren().put("sendPackets", measurementNodeSendPacket);
+
+      measurementNodeNet.getChildren().put("eth0", measurementNodeEth0);
+      measurementNodeOs.getChildren().put("cpu", measurementNodeCpuAll);
+      measurementNodeOs.getChildren().put("processCpu", measurementNodeCpuProcess);
+      measurementNodeOs.getChildren().put("net", measurementNodeNet);
+
+      measurementNodeOs.getMeasurements().add(measurement);
+      measurementNodeNet.getMeasurements().add(measurement);
+      measurementNodeEth0.getMeasurements().add(measurement);
+
+      new MockUp<PublishModelFactory>() {
+        @Mock
+        DefaultPublishModel createDefaultPublishModel() {
+          return model;
+        }
+
+        @Mock
+        MeasurementTree getTree() {
+          return tree;
+        }
+      };
+      new Expectations() {
+        {
+          tree.findChild(OsMeter.OS_NAME);
+          result = measurementNodeOs;
+        }
+      };
+      publisher.onPolledEvent(new PolledEvent(Collections.emptyList(), Collections.emptyList()));
+      List<LoggingEvent> events = collector.getEvents().stream()
+          .filter(e -> DefaultLogPublisher.class.getName().equals(e.getLoggerName())).collect(Collectors.toList());
+      LoggingEvent event = events.get(0);
+      Assert.assertEquals("\n"
+              + "os:\n"
+              + "  cpu:\n"
+              + "    all usage: 100.00%    all idle: 0.00%    process: 100.00%\n"
+              + "  net:\n"
+              + "    send(Bps)    recv(Bps)    send(pps)    recv(pps)    interface\n"
+              + "    1            1            1            1            eth0\n"
+              + "vertx:\n"
+              + "  instances:\n"
+              + "    name       eventLoopContext-created\n"
+              + "    v          0\n"
+              + "threadPool:\n"
+              + "  coreSize maxThreads poolSize currentBusy rejected queueSize taskCount taskFinished name\n"
+              + "  0        0          0        0           NaN      0         0.0       0.0          test\n"
+              + "consumer:\n"
+              + " simple:\n"
+              + "  status      tps      latency            [0,1)  [1,100) [100,) operation\n"
+              + "  rest.OK     100000.0 3000.000/30000.000 12     120     1200   op\n"
+              + "              100000.0 3000.000/30000.000 12     120     1200   (summary)\n"
+              + " details:\n"
+              + "    rest.OK:\n"
+              + "      op:\n"
+              + "        prepare     : 3000.000/30000.000 handlersReq : 3000.000/30000.000 cFiltersReq: 3000.000/30000.000 sendReq     : 3000.000/30000.000\n"
+              + "        getConnect  : 3000.000/30000.000 writeBuf    : 3000.000/30000.000 waitResp   : 3000.000/30000.000 wakeConsumer: 3000.000/30000.000\n"
+              + "        cFiltersResp: 3000.000/30000.000 handlersResp: 3000.000/30000.000\n"
+              + "producer:\n"
+              + " simple:\n"
+              + "  status      tps      latency            [0,1)  [1,100) [100,) operation\n"
+              + "  rest.OK     100000.0 3000.000/30000.000 12     120     1200   op\n"
+              + "              100000.0 3000.000/30000.000 12     120     1200   (summary)\n"
+              + " details:\n"
+              + "    rest.OK:\n"
+              + "      op:\n"
+              + "        prepare: 3000.000/30000.000 queue       : 3000.000/30000.000 filtersReq : 3000.000/30000.000 handlersReq: 3000.000/30000.000\n"
+              + "        execute: 3000.000/30000.000 handlersResp: 3000.000/30000.000 filtersResp: 3000.000/30000.000 sendResp   : 3000.000/30000.000\n"
+              + "edge:\n"
+              + " simple:\n"
+              + "  status      tps      latency            [0,1)  [1,100) [100,) operation\n"
+              + "  rest.OK     100000.0 3000.000/30000.000 12     120     1200   op\n"
+              + "              100000.0 3000.000/30000.000 12     120     1200   (summary)\n"
+              + " details:\n"
+              + "    rest.OK:\n"
+              + "      op:\n"
+              + "        prepare     : 3000.000/30000.000 queue       : 3000.000/30000.000 sFiltersReq : 3000.000/30000.000 handlersReq : 3000.000/30000.000\n"
+              + "        cFiltersReq : 3000.000/30000.000 sendReq     : 3000.000/30000.000 getConnect  : 3000.000/30000.000 writeBuf    : 3000.000/30000.000\n"
+              + "        waitResp    : 3000.000/30000.000 wakeConsumer: 3000.000/30000.000 cFiltersResp: 3000.000/30000.000 handlersResp: 3000.000/30000.000\n"
+              + "        sFiltersResp: 3000.000/30000.000 sendResp    : 3000.000/30000.000\n",
+          event.getMessage());
+    } catch (Exception e) {
+      e.printStackTrace();
+      Assert.fail("unexpected error happen. " + e.getMessage());
+    }
   }
 }
diff --git a/metrics/metrics-core/src/test/java/org/apache/servicecomb/metrics/core/publish/TestThreadPoolPublishModelFactory.java b/metrics/metrics-core/src/test/java/org/apache/servicecomb/metrics/core/publish/TestThreadPoolPublishModelFactory.java
index 6468fd1..c48acae 100644
--- a/metrics/metrics-core/src/test/java/org/apache/servicecomb/metrics/core/publish/TestThreadPoolPublishModelFactory.java
+++ b/metrics/metrics-core/src/test/java/org/apache/servicecomb/metrics/core/publish/TestThreadPoolPublishModelFactory.java
@@ -24,7 +24,9 @@ import java.util.concurrent.ThreadPoolExecutor;
 import org.apache.servicecomb.foundation.common.utils.JsonUtils;
 import org.apache.servicecomb.metrics.core.publish.model.DefaultPublishModel;
 import org.junit.Assert;
+import org.junit.FixMethodOrder;
 import org.junit.Test;
+import org.junit.runners.MethodSorters;
 
 import com.fasterxml.jackson.core.JsonProcessingException;
 import com.google.common.collect.Lists;
@@ -40,6 +42,7 @@ import mockit.Mock;
 import mockit.MockUp;
 import mockit.Mocked;
 
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
 public class TestThreadPoolPublishModelFactory {
   protected EventBus eventBus = new EventBus();
 
@@ -67,14 +70,19 @@ public class TestThreadPoolPublishModelFactory {
 
       }
     };
-    ThreadPoolMonitor.attach(registry, threadPoolExecutor, "test");
+    try {
+      ThreadPoolMonitor.attach(registry, threadPoolExecutor, "test");
 
-    PolledMeter.update(registry);
-    PublishModelFactory factory = new PublishModelFactory(Lists.newArrayList(registry.iterator()));
-    DefaultPublishModel model = factory.createDefaultPublishModel();
+      PolledMeter.update(registry);
+      PublishModelFactory factory = new PublishModelFactory(Lists.newArrayList(registry.iterator()));
+      DefaultPublishModel model = factory.createDefaultPublishModel();
 
-    Assert.assertEquals(
-        "{\"test\":{\"avgTaskCount\":0.0,\"avgCompletedTaskCount\":0.0,\"currentThreadsBusy\":0,\"maxThreads\":0,\"poolSize\":0,\"corePoolSize\":0,\"queueSize\":10,\"rejected\":\"NaN\"}}",
-        JsonUtils.writeValueAsString(model.getThreadPools()));
+      Assert.assertEquals(
+          "{\"test\":{\"avgTaskCount\":0.0,\"avgCompletedTaskCount\":0.0,\"currentThreadsBusy\":0,\"maxThreads\":0,\"poolSize\":0,\"corePoolSize\":0,\"queueSize\":10,\"rejected\":\"NaN\"}}",
+          JsonUtils.writeValueAsString(model.getThreadPools()));
+    } catch (Throwable e) {
+      e.printStackTrace();
+      Assert.fail("unexpected error happen. " + e.getMessage());
+    }
   }
 }