You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ec...@apache.org on 2013/03/15 19:37:25 UTC

svn commit: r1457055 - /accumulo/branches/1.5/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java

Author: ecn
Date: Fri Mar 15 18:37:25 2013
New Revision: 1457055

URL: http://svn.apache.org/r1457055
Log:
ACCUMULO-727 add exponential back-off when bulk loading files

Modified:
    accumulo/branches/1.5/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java

Modified: accumulo/branches/1.5/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
URL: http://svn.apache.org/viewvc/accumulo/branches/1.5/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java?rev=1457055&r1=1457054&r2=1457055&view=diff
==============================================================================
--- accumulo/branches/1.5/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java (original)
+++ accumulo/branches/1.5/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java Fri Mar 15 18:37:25 2013
@@ -180,7 +180,9 @@ public class BulkImporter {
       for (Entry<Path,List<KeyExtent>> entry : assignmentFailures.entrySet())
         failureCount.put(entry.getKey(), 1);
       
+      long sleepTime = 2*1000;
       while (assignmentFailures.size() > 0) {
+        sleepTime = Math.min(sleepTime*2, 60*1000);
         locator.invalidateCache();
         // assumption about assignment failures is that it caused by a split
         // happening or a missing location
@@ -189,7 +191,7 @@ public class BulkImporter {
         // same key range and are contiguous (no holes, no overlap)
         
         timer.start(Timers.SLEEP);
-        UtilWaitThread.sleep(4000);
+        UtilWaitThread.sleep(sleepTime);
         timer.stop(Timers.SLEEP);
         
         log.debug("Trying to assign " + assignmentFailures.size() + " map files that previously failed on some key extents");
@@ -245,8 +247,9 @@ public class BulkImporter {
         
         Set<Entry<Path,Integer>> failureIter = failureCount.entrySet();
         for (Entry<Path,Integer> entry : failureIter) {
-          if (entry.getValue() > acuConf.getCount(Property.TSERV_BULK_RETRY) && assignmentFailures.get(entry.getKey()) != null) {
-            log.error("Map file " + entry.getKey() + " failed more than three times, giving up.");
+          int retries = acuConf.getCount(Property.TSERV_BULK_RETRY);
+          if (entry.getValue() > retries && assignmentFailures.get(entry.getKey()) != null) {
+            log.error("Map file " + entry.getKey() + " failed more than " + retries + " times, giving up.");
             completeFailures.put(entry.getKey(), assignmentFailures.get(entry.getKey()));
             assignmentFailures.remove(entry.getKey());
           }