You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by an...@apache.org on 2015/08/08 15:40:00 UTC

svn commit: r1694798 [3/3] - in /lucene/dev/trunk: lucene/tools/forbiddenApis/ solr/ solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/ solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/ solr/con...

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java Sat Aug  8 13:39:58 2015
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertEqu
 
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.LuceneTestCase.Slow;
@@ -101,7 +102,7 @@ public class SoftAutoCommitTest extends
     hardTracker.setDocsUpperBound(-1);
     
     // Add a single document
-    long add529 = System.currentTimeMillis();
+    long add529 = System.nanoTime();
     assertU(adoc("id", "529", "subject", "the doc we care about in this test"));
 
     monitor.assertSaneOffers();
@@ -123,22 +124,25 @@ public class SoftAutoCommitTest extends
     Long hard529 = monitor.hard.poll(hardCommitWaitMillis * 5, MILLISECONDS);
     assertNotNull("hard529 wasn't fast enough", hard529);
     monitor.assertSaneOffers();
-    
-    assertTrue("soft529 occured too fast: " + 
-               add529 + " + " + softCommitWaitMillis + " !<= " + soft529,
-               add529 + softCommitWaitMillis <= soft529);
-    assertTrue("hard529 occured too fast: " + 
-               add529 + " + " + hardCommitWaitMillis + " !<= " + hard529,
-               add529 + hardCommitWaitMillis <= hard529);
+
+    final long soft529Ms = TimeUnit.MILLISECONDS.convert(soft529 - add529, TimeUnit.NANOSECONDS);
+    assertTrue("soft529 occured too fast, in " +
+            soft529Ms + "ms, less than soft commit interval " + softCommitWaitMillis,
+        soft529Ms >= softCommitWaitMillis);
+    final long hard529Ms = TimeUnit.MILLISECONDS.convert(hard529 - add529, TimeUnit.NANOSECONDS);
+    assertTrue("hard529 occured too fast, in " +
+            hard529Ms + "ms, less than hard commit interval " + hardCommitWaitMillis,
+        hard529Ms >= hardCommitWaitMillis);
 
     // however slow the machine was to do the soft commit compared to expected,
     // assume newSearcher had some magnitude of that much overhead as well 
-    long slowTestFudge = Math.max(300, 12 * (soft529 - add529 - softCommitWaitMillis));
-    assertTrue("searcher529 wasn't soon enough after soft529: " +
-               searcher529 + " !< " + soft529 + " + " + slowTestFudge + " (fudge)",
-               searcher529 < soft529 + slowTestFudge );
+    long slowTestFudge = Math.max(300, 12 * (soft529Ms - softCommitWaitMillis));
+    final long softCommitToSearcherOpenMs = TimeUnit.MILLISECONDS.convert(searcher529 - soft529, TimeUnit.NANOSECONDS);
+    assertTrue("searcher529 wasn't soon enough after soft529: Took " +
+            softCommitToSearcherOpenMs + "ms, >= acceptable " + slowTestFudge + "ms (fudge)",
+        softCommitToSearcherOpenMs < slowTestFudge);
 
-    assertTrue("hard529 was before searcher529: " + 
+    assertTrue("hard529 was before searcher529: " +
                searcher529 + " !<= " + hard529,
                searcher529 <= hard529);
 
@@ -147,7 +151,7 @@ public class SoftAutoCommitTest extends
     // there may have been (or will be) a second hard commit for 530
     Long hard530 = monitor.hard.poll(hardCommitWaitMillis, MILLISECONDS);
     assertEquals("Tracker reports too many hard commits",
-                 (null == hard530 ? 1 : 2), 
+                 (null == hard530 ? 1 : 2),
                  hardTracker.getCommitCount());
 
     // there may have been a second soft commit for 530, 
@@ -229,7 +233,7 @@ public class SoftAutoCommitTest extends
     monitor.clear();
 
     // Delete the document
-    long del529 = System.currentTimeMillis();
+    long del529 = System.nanoTime();
     assertU( delI("529") );
 
     monitor.assertSaneOffers();
@@ -251,22 +255,25 @@ public class SoftAutoCommitTest extends
     hard529 = monitor.hard.poll(hardCommitWaitMillis * 3, MILLISECONDS);
     assertNotNull("hard529 wasn't fast enough", hard529);
     monitor.assertSaneOffers();
-    
-    assertTrue("soft529 occured too fast: " + 
-               del529 + " + " + softCommitWaitMillis + " !<= " + soft529,
-               del529 + softCommitWaitMillis <= soft529);
-    assertTrue("hard529 occured too fast: " + 
-               del529 + " + " + hardCommitWaitMillis + " !<= " + hard529,
-               del529 + hardCommitWaitMillis <= hard529);
+
+    final long soft529Ms = TimeUnit.MILLISECONDS.convert(soft529 - del529, TimeUnit.NANOSECONDS);
+    assertTrue("soft529 occured too fast, in " + soft529Ms +
+            "ms, less than soft commit interval " + softCommitWaitMillis,
+        soft529Ms >= softCommitWaitMillis);
+    final long hard529Ms = TimeUnit.MILLISECONDS.convert(hard529 - del529, TimeUnit.NANOSECONDS);
+    assertTrue("hard529 occured too fast, in " +
+            hard529Ms + "ms, less than hard commit interval " + hardCommitWaitMillis,
+        hard529Ms >= hardCommitWaitMillis);
 
     // however slow the machine was to do the soft commit compared to expected,
-    // assume newSearcher had some magnitude of that much overhead as well 
-    long slowTestFudge = Math.max(150, 3 * (soft529 - del529 - softCommitWaitMillis));
-    assertTrue("searcher529 wasn't soon enough after soft529: " +
-               searcher529 + " !< " + soft529 + " + " + slowTestFudge + " (fudge)",
-               searcher529 < soft529 + slowTestFudge );
+    // assume newSearcher had some magnitude of that much overhead as well
+    long slowTestFudge = Math.max(300, 12 * (soft529Ms - softCommitWaitMillis));
+    final long softCommitToSearcherOpenMs = TimeUnit.MILLISECONDS.convert(searcher529 - soft529, TimeUnit.NANOSECONDS);
+    assertTrue("searcher529 wasn't soon enough after soft529: Took " +
+            softCommitToSearcherOpenMs + "ms, >= acceptable " + slowTestFudge + "ms (fudge)",
+        softCommitToSearcherOpenMs < slowTestFudge);
 
-    assertTrue("hard529 was before searcher529: " + 
+    assertTrue("hard529 was before searcher529: " +
                searcher529 + " !<= " + hard529,
                searcher529 <= hard529);
 
@@ -298,17 +305,17 @@ public class SoftAutoCommitTest extends
     hardTracker.setDocsUpperBound(-1);
     
     // try to add 5 docs really fast
-    long fast5start = System.currentTimeMillis();
+    long fast5start = System.nanoTime();
     for( int i=0;i<5; i++ ) {
       assertU(adoc("id", ""+500 + i, "subject", "five fast docs"));
     }
-    long fast5end = System.currentTimeMillis() - 200; // minus a tad of slop
-    long fast5time = 1 + fast5end - fast5start;
+    long fast5end = System.nanoTime() - TimeUnit.NANOSECONDS.convert(200, TimeUnit.MILLISECONDS); // minus a tad of slop
+    long fast5time = 1 + TimeUnit.MILLISECONDS.convert(fast5end - fast5start, TimeUnit.NANOSECONDS);
 
     // total time for all 5 adds determines the number of soft to expect
-    long expectedSoft = (long)Math.ceil(fast5time / softCommitWaitMillis);
-    long expectedHard = (long)Math.ceil(fast5time / hardCommitWaitMillis);
-    
+    long expectedSoft = (long)Math.ceil((double) fast5time / softCommitWaitMillis);
+    long expectedHard = (long)Math.ceil((double) fast5time / hardCommitWaitMillis);
+
     // note: counting from 1 for multiplication
     for (int i = 1; i <= expectedSoft; i++) {
       // Wait for the soft commit with some fudge
@@ -318,10 +325,10 @@ public class SoftAutoCommitTest extends
 
       // have to assume none of the docs were added until
       // very end of the add window
-      assertTrue(i + ": soft occured too fast: " + 
-                 fast5end + " + (" + softCommitWaitMillis + " * " + i +
-                 ") !<= " + soft,
-                 fast5end + (softCommitWaitMillis * i) <= soft);
+      long softMs = TimeUnit.MILLISECONDS.convert(soft - fast5end, TimeUnit.NANOSECONDS);
+      assertTrue(i + ": soft occured too fast: " +
+              softMs + " < (" + softCommitWaitMillis + " * " + i + ")",
+          softMs >= (softCommitWaitMillis * i));
     }
 
     // note: counting from 1 for multiplication
@@ -334,10 +341,10 @@ public class SoftAutoCommitTest extends
       
       // have to assume none of the docs were added until
       // very end of the add window
-      assertTrue(i + ": soft occured too fast: " + 
-                 fast5end + " + (" + hardCommitWaitMillis + " * " + i +
-                 ") !<= " + hard,
-                 fast5end + (hardCommitWaitMillis * i) <= hard);
+      long hardMs = TimeUnit.MILLISECONDS.convert(hard - fast5end, TimeUnit.NANOSECONDS);
+      assertTrue(i + ": hard occured too fast: " +
+              hardMs + " < (" + hardCommitWaitMillis + " * " + i + ")",
+          hardMs >= (hardCommitWaitMillis * i));
     }
  
   }
@@ -361,19 +368,19 @@ class MockEventListener implements SolrE
   @Override
   public void newSearcher(SolrIndexSearcher newSearcher,
                           SolrIndexSearcher currentSearcher) {
-    Long now = System.currentTimeMillis();
+    Long now = System.nanoTime();
     if (!searcher.offer(now)) fail.append(", newSearcher @ " + now);
   }
   
   @Override
   public void postCommit() {
-    Long now = System.currentTimeMillis();
+    Long now = System.nanoTime();
     if (!hard.offer(now)) fail.append(", hardCommit @ " + now);
   }
   
   @Override
   public void postSoftCommit() {
-    Long now = System.currentTimeMillis();
+    Long now = System.nanoTime();
     if (!soft.offer(now)) fail.append(", softCommit @ " + now);
   }
   

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/update/TestIndexingPerformance.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/update/TestIndexingPerformance.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/update/TestIndexingPerformance.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/update/TestIndexingPerformance.java Sat Aug  8 13:39:58 2015
@@ -21,6 +21,7 @@ import org.apache.solr.common.SolrInputD
 import org.apache.solr.request.SolrQueryRequest;
 import org.apache.solr.util.AbstractSolrTestCase;
 import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.util.RTimer;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
@@ -100,7 +101,7 @@ public class TestIndexingPerformance ext
     };
    ***/
 
-    long start = System.currentTimeMillis();
+    final RTimer timer = new RTimer();
 
     AddUpdateCommand add = new AddUpdateCommand(req);
     add.overwrite = overwrite;
@@ -116,9 +117,9 @@ public class TestIndexingPerformance ext
       }
       updateHandler.addDoc(add);
     }
-    long end = System.currentTimeMillis();
     log.info("doc="+ Arrays.toString(fields));
-    log.info("iter="+iter +" time=" + (end-start) + " throughput=" + ((long)iter*1000)/(end-start));
+    double elapsed = timer.getTime();
+    log.info("iter="+iter +" time=" + elapsed + " throughput=" + ((long)iter*1000)/elapsed);
 
     //discard all the changes
     updateHandler.rollback(new RollbackUpdateCommand(req));

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/util/BitSetPerf.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/util/BitSetPerf.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/util/BitSetPerf.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/util/BitSetPerf.java Sat Aug  8 13:39:58 2015
@@ -77,7 +77,7 @@ public class BitSetPerf {
 
     int ret=0;
 
-    long start = System.currentTimeMillis();
+    final RTimer timer = new RTimer();
 
     if ("union".equals(test)) {
       for (int it=0; it<iter; it++) {
@@ -187,9 +187,8 @@ public class BitSetPerf {
       }
     }
 
-    long end = System.currentTimeMillis();
     System.out.println("ret="+ret);
-    System.out.println("TIME="+(end-start));
+    System.out.println("TIME="+timer.getTime());
 
   }
 

Modified: lucene/dev/trunk/solr/core/src/test/org/apache/solr/util/TestUtils.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/core/src/test/org/apache/solr/util/TestUtils.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/core/src/test/org/apache/solr/util/TestUtils.java (original)
+++ lucene/dev/trunk/solr/core/src/test/org/apache/solr/util/TestUtils.java Sat Aug  8 13:39:58 2015
@@ -149,7 +149,7 @@ public class TestUtils extends SolrTestC
     String sortable = NumberUtils.double2sortableStr( number );
     assertEquals( number, NumberUtils.SortableStr2double(sortable), 0.001);
     
-    long num = System.currentTimeMillis();
+    long num = System.nanoTime();
     sortable = NumberUtils.long2sortableStr( num );
     assertEquals( num, NumberUtils.SortableStr2long(sortable, 0, sortable.length() ) );
     assertEquals( Long.toString(num), NumberUtils.SortableStr2long(sortable) );

Modified: lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java (original)
+++ lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrClient.java Sat Aug  8 13:39:58 2015
@@ -155,11 +155,12 @@ public class CloudSolrClient extends Sol
 
     ExpiringCachedDocCollection(DocCollection cached) {
       this.cached = cached;
-      this.cachedAt = System.currentTimeMillis();
+      this.cachedAt = System.nanoTime();
     }
 
-    boolean isExpired(long timeToLive) {
-      return (System.currentTimeMillis() - cachedAt) > timeToLive;
+    boolean isExpired(long timeToLiveMs) {
+      return (System.nanoTime() - cachedAt)
+          > TimeUnit.NANOSECONDS.convert(timeToLiveMs, TimeUnit.MILLISECONDS);
     }
   }
 

Modified: lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java (original)
+++ lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrClient.java Sat Aug  8 13:39:58 2015
@@ -128,9 +128,6 @@ public class LBHttpSolrClient extends So
 
     final HttpSolrClient client;
 
-    long lastUsed;     // last time used for a real request
-    long lastChecked;  // last time checked for liveness
-
     // "standard" servers are used by default.  They normally live in the alive list
     // and move to the zombie list when unavailable.  When they become available again,
     // they move back to the alive list.
@@ -362,7 +359,6 @@ public class LBHttpSolrClient extends So
     ServerWrapper wrapper;
 
     wrapper = new ServerWrapper(server);
-    wrapper.lastUsed = System.currentTimeMillis();
     wrapper.standard = false;
     zombieServers.put(wrapper.getKey(), wrapper);
     startAliveCheckExecutor();
@@ -514,7 +510,6 @@ public class LBHttpSolrClient extends So
       
       int count = counter.incrementAndGet() & Integer.MAX_VALUE;
       ServerWrapper wrapper = serverList[count % serverList.length];
-      wrapper.lastUsed = System.currentTimeMillis();
 
       try {
         return wrapper.client.request(request, collection);
@@ -591,9 +586,7 @@ public class LBHttpSolrClient extends So
    * @param zombieServer a server in the dead pool
    */
   private void checkAZombieServer(ServerWrapper zombieServer) {
-    long currTime = System.currentTimeMillis();
     try {
-      zombieServer.lastChecked = currTime;
       QueryResponse resp = zombieServer.client.query(solrQuery);
       if (resp.getStatus() == 0) {
         // server has come back up.

Modified: lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java (original)
+++ lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/CloudSolrStream.java Sat Aug  8 13:39:58 2015
@@ -294,7 +294,6 @@ public class CloudSolrStream extends Tup
         throw new Exception("Collection not found:"+this.collection);
       }
 
-      long time = System.currentTimeMillis();
       params.put("distrib","false"); // We are the aggregator.
 
       for(Slice slice : slices) {
@@ -304,7 +303,7 @@ public class CloudSolrStream extends Tup
           shuffler.add(replica);
         }
 
-        Collections.shuffle(shuffler, new Random(time));
+        Collections.shuffle(shuffler, new Random());
         Replica rep = shuffler.get(0);
         ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep);
         String url = zkProps.getCoreUrl();

Modified: lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ParallelStream.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ParallelStream.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ParallelStream.java (original)
+++ lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/client/solrj/io/stream/ParallelStream.java Sat Aug  8 13:39:58 2015
@@ -252,7 +252,6 @@ public class ParallelStream extends Clou
       ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
       ClusterState clusterState = zkStateReader.getClusterState();
       Collection<Slice> slices = clusterState.getActiveSlices(this.collection);
-      long time = System.currentTimeMillis();
       List<Replica> shuffler = new ArrayList();
       for(Slice slice : slices) {
         Collection<Replica> replicas = slice.getReplicas();
@@ -265,7 +264,7 @@ public class ParallelStream extends Clou
         throw new IOException("Number of workers exceeds nodes in the worker collection");
       }
 
-      Collections.shuffle(shuffler, new Random(time));
+      Collections.shuffle(shuffler, new Random());
 
       for(int w=0; w<workers; w++) {
         HashMap params = new HashMap();

Modified: lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/common/cloud/RoutingRule.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/common/cloud/RoutingRule.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/common/cloud/RoutingRule.java (original)
+++ lucene/dev/trunk/solr/solrj/src/java/org/apache/solr/common/cloud/RoutingRule.java Sat Aug  8 13:39:58 2015
@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.solr.common.util.SuppressForbidden;
 import org.noggit.JSONUtil;
 
 /**
@@ -56,8 +57,14 @@ public class RoutingRule extends ZkNodeP
     return targetCollectionName;
   }
 
-  public Long getExpireAt() {
-    return expireAt;
+  @SuppressForbidden(reason = "For currentTimeMillis, expiry time depends on external data (should it?)")
+  public static String makeExpiryAt(long timeMsFromNow) {
+    return String.valueOf(System.currentTimeMillis() + timeMsFromNow);
+  }
+
+  @SuppressForbidden(reason = "For currentTimeMillis, expiry time depends on external data (should it?)")
+  public boolean isExpired() {
+    return (expireAt < System.currentTimeMillis());
   }
 
   public String getRouteRangesStr() {

Modified: lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestsBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestsBase.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestsBase.java (original)
+++ lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestsBase.java Sat Aug  8 13:39:58 2015
@@ -26,6 +26,7 @@ import org.apache.solr.client.solrj.util
 import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.CommonParams;
+import org.apache.solr.util.TimeOut;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -33,6 +34,7 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
 abstract public class SolrExampleTestsBase extends SolrJettyTestBase {
@@ -149,7 +151,7 @@ abstract public class SolrExampleTestsBa
     Assert.assertEquals(1, rsp.getResults().getNumFound());
     
     // check if the doc has been deleted every 250 ms for 30 seconds
-    long timeout = System.currentTimeMillis() + 30000;
+    TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS);
     do {
       Thread.sleep(250); // wait 250 ms
       
@@ -157,7 +159,7 @@ abstract public class SolrExampleTestsBa
       if (rsp.getResults().getNumFound() == 0) {
         return;
       }
-    } while (System.currentTimeMillis() < timeout);
+    } while (! timeout.hasTimedOut());
     
     Assert.fail("commitWithin failed to commit");
   }

Modified: lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java (original)
+++ lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrClient.java Sat Aug  8 13:39:58 2015
@@ -35,6 +35,7 @@ import org.apache.solr.client.solrj.resp
 import org.apache.solr.client.solrj.response.SolrResponseBase;
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.util.TimeOut;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.slf4j.Logger;
@@ -48,6 +49,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Properties;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 /**
  * Test for LBHttpSolrClient
@@ -221,16 +223,16 @@ public class TestLBHttpSolrClient extend
       // Start the killed server once again
       solr[1].startJetty();
       // Wait for the alive check to complete
-      waitForServer(30000, client, 3, "solr1");
+      waitForServer(30, client, 3, "solr1");
     } finally {
       myHttpClient.close();
     }
   }
   
   // wait maximum ms for serverName to come back up
-  private void waitForServer(int maximum, LBHttpSolrClient client, int nServers, String serverName) throws Exception {
-    long endTime = System.currentTimeMillis() + maximum;
-    while (System.currentTimeMillis() < endTime) {
+  private void waitForServer(int maxSeconds, LBHttpSolrClient client, int nServers, String serverName) throws Exception {
+    final TimeOut timeout = new TimeOut(maxSeconds, TimeUnit.SECONDS);
+    while (! timeout.hasTimedOut()) {
       QueryResponse resp;
       try {
         resp = client.query(new SolrQuery("*:*"));

Modified: lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java (original)
+++ lucene/dev/trunk/solr/solrj/src/test/org/apache/solr/common/util/TestJavaBinCodec.java Sat Aug  8 13:39:58 2015
@@ -41,6 +41,7 @@ import org.apache.solr.common.SolrDocume
 import org.apache.solr.common.SolrInputDocument;
 import org.apache.solr.common.SolrInputField;
 import org.apache.solr.util.ConcurrentLRUCache;
+import org.apache.solr.util.RTimer;
 import org.junit.Test;
 import org.noggit.CharArr;
 
@@ -439,7 +440,7 @@ public class TestJavaBinCodec extends So
     }
     printMem("after cache init");
 
-    long ms = System.currentTimeMillis();
+    RTimer timer = new RTimer();
     final int ITERS = 1000000;
     int THREADS = 10;
 
@@ -459,8 +460,8 @@ public class TestJavaBinCodec extends So
 
 
     printMem("after cache test");
-    System.out.println("time taken by LRUCACHE "+ (System.currentTimeMillis()-ms));
-    ms = System.currentTimeMillis();
+    System.out.println("time taken by LRUCACHE " + timer.getTime());
+    timer = new RTimer();
 
     runInThreads(THREADS, new Runnable() {
       @Override
@@ -477,7 +478,7 @@ public class TestJavaBinCodec extends So
     });
 
     printMem("after new string test");
-    System.out.println("time taken by string creation "+ (System.currentTimeMillis()-ms));
+    System.out.println("time taken by string creation "+ timer.getTime());
 
 
 
@@ -569,7 +570,7 @@ public class TestJavaBinCodec extends So
     }
 
     int ret = 0;
-    long start = System.currentTimeMillis();
+    final RTimer timer = new RTimer();
     ConcurrentLRUCache underlyingCache = cacheSz > 0 ? new ConcurrentLRUCache<>(cacheSz,cacheSz-cacheSz/10,cacheSz,cacheSz/10,false,true,null) : null;  // the cache in the first version of the patch was 10000,9000,10000,1000,false,true,null
     final JavaBinCodec.StringCache stringCache = underlyingCache==null ? null : new JavaBinCodec.StringCache(underlyingCache);
     if (nThreads <= 0) {
@@ -586,10 +587,9 @@ public class TestJavaBinCodec extends So
         }
       });
     }
-    long end = System.currentTimeMillis();
 
     long n = iter * Math.max(1,nThreads);
-    System.out.println("ret=" + ret + " THROUGHPUT=" + (n*1000 / (end-start)));
+    System.out.println("ret=" + ret + " THROUGHPUT=" + (n*1000 / timer.getTime()));
     if (underlyingCache != null) System.out.println("cache: hits=" + underlyingCache.getStats().getCumulativeHits() + " lookups=" + underlyingCache.getStats().getCumulativeLookups() + " size=" + underlyingCache.getStats().getCurrentSize());
   }
 

Modified: lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java (original)
+++ lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java Sat Aug  8 13:39:58 2015
@@ -73,6 +73,8 @@ import org.apache.solr.core.CoreContaine
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.servlet.SolrDispatchFilter;
 import org.apache.solr.update.DirectUpdateHandler2;
+import org.apache.solr.util.RTimer;
+import org.apache.solr.util.TimeOut;
 import org.apache.zookeeper.CreateMode;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -1680,10 +1682,10 @@ public abstract class AbstractFullDistri
       List<Integer> numShardsNumReplicaList,
       List<String> nodesAllowedToRunShards) throws Exception {
     // check for an expectedSlices new collection - we poll the state
-    long timeoutAt = System.currentTimeMillis() + 120000;
+    final TimeOut timeout = new TimeOut(120, TimeUnit.SECONDS);
     boolean success = false;
     String checkResult = "Didnt get to perform a single check";
-    while (System.currentTimeMillis() < timeoutAt) {
+    while (! timeout.hasTimedOut()) {
       checkResult = checkCollectionExpectations(collectionName,
           numShardsNumReplicaList, nodesAllowedToRunShards);
       if (checkResult == null) {
@@ -1743,9 +1745,9 @@ public abstract class AbstractFullDistri
  public static void waitForNon403or404or503(HttpSolrClient collectionClient)
       throws Exception {
     SolrException exp = null;
-    long timeoutAt = System.currentTimeMillis() + 30000;
+    final TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS);
 
-    while (System.currentTimeMillis() < timeoutAt) {
+    while (! timeout.hasTimedOut()) {
       boolean missing = false;
 
       try {
@@ -1787,7 +1789,7 @@ public abstract class AbstractFullDistri
   }
 
   protected List<Replica> ensureAllReplicasAreActive(String testCollectionName, String shardId, int shards, int rf, int maxWaitSecs) throws Exception {
-    long startMs = System.currentTimeMillis();
+    final RTimer timer = new RTimer();
 
     Map<String,Replica> notLeaders = new HashMap<>();
 
@@ -1845,8 +1847,7 @@ public abstract class AbstractFullDistri
     if (notLeaders.isEmpty())
       fail("Didn't isolate any replicas that are not the leader! ClusterState: " + printClusterStateInfo());
 
-    long diffMs = (System.currentTimeMillis() - startMs);
-    log.info("Took " + diffMs + " ms to see all replicas become active.");
+    log.info("Took {} ms to see all replicas become active.", timer.getTime());
 
     List<Replica> replicas = new ArrayList<>();
     replicas.addAll(notLeaders.values());
@@ -1878,9 +1879,9 @@ public abstract class AbstractFullDistri
   static String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrClient client)
       throws IOException, SolrServerException {
     String state = null;
-    long maxWait = System.nanoTime() + TimeUnit.NANOSECONDS.convert(waitForSeconds, TimeUnit.SECONDS);
+    final TimeOut timeout = new TimeOut(waitForSeconds, TimeUnit.SECONDS);
 
-    while (System.nanoTime() < maxWait)  {
+    while (! timeout.hasTimedOut())  {
       state = getRequestState(requestId, client);
       if(state.equals("completed") || state.equals("failed"))
         return state;

Modified: lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java (original)
+++ lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java Sat Aug  8 13:39:58 2015
@@ -30,6 +30,7 @@ import org.apache.solr.core.CoreContaine
 import org.apache.solr.core.SolrCore;
 import org.apache.solr.servlet.SolrDispatchFilter;
 import org.apache.solr.update.DirectUpdateHandler2;
+import org.apache.solr.util.RTimer;
 import org.apache.zookeeper.KeeperException;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.slf4j.Logger;
@@ -79,7 +80,7 @@ public class ChaosMonkey {
   private boolean causeConnectionLoss;
   private boolean aggressivelyKillLeaders;
   private Map<String,CloudJettyRunner> shardToLeaderJetty;
-  private volatile long startTime;
+  private volatile RTimer runTimer;
   
   private List<CloudJettyRunner> deadPool = new ArrayList<>();
 
@@ -451,9 +452,9 @@ public class ChaosMonkey {
       monkeyLog("Jetty will not commit on close");
       DirectUpdateHandler2.commitOnClose = false;
     }
-    
+
     this.aggressivelyKillLeaders = killLeaders;
-    startTime = System.currentTimeMillis();
+    runTimer = new RTimer();
     // TODO: when kill leaders is on, lets kill a higher percentage of leaders
     
     stop = false;
@@ -510,7 +511,7 @@ public class ChaosMonkey {
           }
         }
         monkeyLog("finished");
-        monkeyLog("I ran for " + (System.currentTimeMillis() - startTime)/1000.0f + "sec. I stopped " + stops + " and I started " + starts
+        monkeyLog("I ran for " + runTimer.getTime() / 1000 + "s. I stopped " + stops + " and I started " + starts
             + ". I also expired " + expires.get() + " and caused " + connloss
             + " connection losses");
       }
@@ -529,10 +530,11 @@ public class ChaosMonkey {
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
     }
-    
+    runTimer.stop();
+
     DirectUpdateHandler2.commitOnClose = true;
-    
-    float runtime = (System.currentTimeMillis() - startTime)/1000.0f;
+
+    double runtime = runTimer.getTime()/1000.0f;
     if (runtime > 30 && stops.get() == 0) {
       LuceneTestCase.fail("The Monkey ran for over 30 seconds and no jetties were stopped - this is worth investigating!");
     }

Modified: lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/ZkTestServer.java
URL: http://svn.apache.org/viewvc/lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/ZkTestServer.java?rev=1694798&r1=1694797&r2=1694798&view=diff
==============================================================================
--- lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/ZkTestServer.java (original)
+++ lucene/dev/trunk/solr/test-framework/src/java/org/apache/solr/cloud/ZkTestServer.java Sat Aug  8 13:39:58 2015
@@ -20,6 +20,7 @@ package org.apache.solr.cloud;
 import com.google.common.collect.Ordering;
 import com.google.common.util.concurrent.AtomicLongMap;
 import org.apache.solr.common.cloud.SolrZkClient;
+import org.apache.solr.util.TimeOut;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
@@ -56,6 +57,7 @@ import java.util.Arrays;
 import java.util.Comparator;
 import java.util.List;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
 
 public class ZkTestServer {
   public static final int TICK_TIME = 1000;
@@ -521,8 +523,8 @@ public class ZkTestServer {
     }
   }
   
-  public static boolean waitForServerDown(String hp, long timeout) {
-    long start = System.currentTimeMillis();
+  public static boolean waitForServerDown(String hp, long timeoutMs) {
+    final TimeOut timeout = new TimeOut(timeoutMs, TimeUnit.MILLISECONDS);
     while (true) {
       try {
         HostPort hpobj = parseHostPortList(hp).get(0);
@@ -531,7 +533,7 @@ public class ZkTestServer {
         return true;
       }
       
-      if (System.currentTimeMillis() > start + timeout) {
+      if (timeout.hasTimedOut()) {
         break;
       }
       try {