You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@druid.apache.org by jo...@apache.org on 2019/05/16 20:59:18 UTC

[incubator-druid] branch master updated: Add option to use YARN RM as fallback for JobHistory failure (#7673)

This is an automated email from the ASF dual-hosted git repository.

jonwei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-druid.git


The following commit(s) were added to refs/heads/master by this push:
     new d99f77a  Add option to use YARN RM as fallback for JobHistory failure (#7673)
d99f77a is described below

commit d99f77a01b5f4e0abde0ec85c1a1039de09bbb78
Author: Jonathan Wei <jo...@users.noreply.github.com>
AuthorDate: Thu May 16 13:59:10 2019 -0700

    Add option to use YARN RM as fallback for JobHistory failure (#7673)
    
    * Add option to use YARN RM as fallback for job status
    
    * PR comments
---
 docs/content/ingestion/hadoop.md                   |  1 +
 .../MaterializedViewSupervisorSpec.java            |  3 +-
 .../indexer/DetermineHashedPartitionsJob.java      | 15 +++-
 .../druid/indexer/DeterminePartitionsJob.java      | 30 +++++--
 .../druid/indexer/HadoopDruidIndexerConfig.java    |  6 ++
 .../apache/druid/indexer/HadoopTuningConfig.java   | 22 ++++-
 .../apache/druid/indexer/IndexGeneratorJob.java    | 30 ++++---
 .../main/java/org/apache/druid/indexer/Utils.java  | 98 ++++++++++++++++++++++
 .../druid/indexer/BatchDeltaIngestionTest.java     |  1 +
 .../indexer/DetermineHashedPartitionsJobTest.java  |  1 +
 .../druid/indexer/DeterminePartitionsJobTest.java  |  1 +
 .../indexer/HadoopDruidIndexerConfigTest.java      |  2 +
 .../druid/indexer/HadoopTuningConfigTest.java      |  1 +
 .../druid/indexer/IndexGeneratorJobTest.java       |  1 +
 .../org/apache/druid/indexer/JobHelperTest.java    |  1 +
 .../indexer/path/GranularityPathSpecTest.java      |  1 +
 16 files changed, 187 insertions(+), 27 deletions(-)

diff --git a/docs/content/ingestion/hadoop.md b/docs/content/ingestion/hadoop.md
index ab1963b..b9a6d72 100644
--- a/docs/content/ingestion/hadoop.md
+++ b/docs/content/ingestion/hadoop.md
@@ -198,6 +198,7 @@ The tuningConfig is optional and default parameters will be used if no tuningCon
 |useExplicitVersion|Boolean|Forces HadoopIndexTask to use version.|no (default = false)|
 |logParseExceptions|Boolean|If true, log an error message when a parsing exception occurs, containing information about the row where the error occurred.|false|no|
 |maxParseExceptions|Integer|The maximum number of parse exceptions that can occur before the task halts ingestion and fails. Overrides `ignoreInvalidRows` if `maxParseExceptions` is defined.|unlimited|no|
+|useYarnRMJobStatusFallback|Boolean|If the Hadoop jobs created by the indexing task are unable to retrieve their completion status from the JobHistory server, and this parameter is true, the indexing task will try to fetch the application status from `http://<yarn-rm-address>/ws/v1/cluster/apps/<application-id>`, where `<yarn-rm-address>` is the value of `yarn.resourcemanager.webapp.address` in your Hadoop configuration. This flag is intended as a fallback for cases where an indexing tas [...]
 
 ### jobProperties field of TuningConfig
 
diff --git a/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorSpec.java b/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorSpec.java
index 3a9a113..02f07a5 100644
--- a/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorSpec.java
+++ b/extensions-contrib/materialized-view-maintenance/src/main/java/org/apache/druid/indexing/materializedview/MaterializedViewSupervisorSpec.java
@@ -188,7 +188,8 @@ public class MaterializedViewSupervisorSpec implements SupervisorSpec
         true,
         tuningConfig.getUserAllowedHadoopPrefix(),
         tuningConfig.isLogParseExceptions(),
-        tuningConfig.getMaxParseExceptions()
+        tuningConfig.getMaxParseExceptions(),
+        tuningConfig.isUseYarnRMJobStatusFallback()
     );
     
     // generate granularity
diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java
index d30899c..c83bc08 100644
--- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java
+++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DetermineHashedPartitionsJob.java
@@ -129,10 +129,17 @@ public class DetermineHashedPartitionsJob implements Jobby
         JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), groupByJob.getJobID().toString());
       }
 
-      if (!groupByJob.waitForCompletion(true)) {
-        log.error("Job failed: %s", groupByJob.getJobID());
-        failureCause = Utils.getFailureMessage(groupByJob, config.JSON_MAPPER);
-        return false;
+      try {
+        if (!groupByJob.waitForCompletion(true)) {
+          log.error("Job failed: %s", groupByJob.getJobID());
+          failureCause = Utils.getFailureMessage(groupByJob, config.JSON_MAPPER);
+          return false;
+        }
+      }
+      catch (IOException ioe) {
+        if (!Utils.checkAppSuccessForJobIOException(ioe, groupByJob, config.isUseYarnRMJobStatusFallback())) {
+          throw ioe;
+        }
       }
 
       /*
diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java
index dc0cf4e..115a926 100644
--- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java
+++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/DeterminePartitionsJob.java
@@ -165,10 +165,17 @@ public class DeterminePartitionsJob implements Jobby
         }
 
 
-        if (!groupByJob.waitForCompletion(true)) {
-          log.error("Job failed: %s", groupByJob.getJobID());
-          failureCause = Utils.getFailureMessage(groupByJob, config.JSON_MAPPER);
-          return false;
+        try {
+          if (!groupByJob.waitForCompletion(true)) {
+            log.error("Job failed: %s", groupByJob.getJobID());
+            failureCause = Utils.getFailureMessage(groupByJob, config.JSON_MAPPER);
+            return false;
+          }
+        }
+        catch (IOException ioe) {
+          if (!Utils.checkAppSuccessForJobIOException(ioe, groupByJob, config.isUseYarnRMJobStatusFallback())) {
+            throw ioe;
+          }
         }
       } else {
         log.info("Skipping group-by job.");
@@ -228,10 +235,17 @@ public class DeterminePartitionsJob implements Jobby
       }
 
 
-      if (!dimSelectionJob.waitForCompletion(true)) {
-        log.error("Job failed: %s", dimSelectionJob.getJobID().toString());
-        failureCause = Utils.getFailureMessage(dimSelectionJob, config.JSON_MAPPER);
-        return false;
+      try {
+        if (!dimSelectionJob.waitForCompletion(true)) {
+          log.error("Job failed: %s", dimSelectionJob.getJobID().toString());
+          failureCause = Utils.getFailureMessage(dimSelectionJob, config.JSON_MAPPER);
+          return false;
+        }
+      }
+      catch (IOException ioe) {
+        if (!Utils.checkAppSuccessForJobIOException(ioe, dimSelectionJob, config.isUseYarnRMJobStatusFallback())) {
+          throw ioe;
+        }
       }
 
       /*
diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopDruidIndexerConfig.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopDruidIndexerConfig.java
index 7e02f82..fc9a5d9 100644
--- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopDruidIndexerConfig.java
+++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopDruidIndexerConfig.java
@@ -375,6 +375,12 @@ public class HadoopDruidIndexerConfig
     return schema.getTuningConfig().getMaxParseExceptions();
   }
 
+  public boolean isUseYarnRMJobStatusFallback()
+  {
+    return schema.getTuningConfig().isUseYarnRMJobStatusFallback();
+  }
+
+
   public void setHadoopJobIdFileName(String hadoopJobIdFileName)
   {
     this.hadoopJobIdFileName = hadoopJobIdFileName;
diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java
index cd11029..5fd9b3d 100644
--- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java
+++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/HadoopTuningConfig.java
@@ -71,6 +71,7 @@ public class HadoopTuningConfig implements TuningConfig
         false,
         null,
         null,
+        null,
         null
     );
   }
@@ -95,6 +96,7 @@ public class HadoopTuningConfig implements TuningConfig
   private final List<String> allowedHadoopPrefix;
   private final boolean logParseExceptions;
   private final int maxParseExceptions;
+  private final boolean useYarnRMJobStatusFallback;
 
   @JsonCreator
   public HadoopTuningConfig(
@@ -121,7 +123,8 @@ public class HadoopTuningConfig implements TuningConfig
       final @JsonProperty("useExplicitVersion") boolean useExplicitVersion,
       final @JsonProperty("allowedHadoopPrefix") List<String> allowedHadoopPrefix,
       final @JsonProperty("logParseExceptions") @Nullable Boolean logParseExceptions,
-      final @JsonProperty("maxParseExceptions") @Nullable Integer maxParseExceptions
+      final @JsonProperty("maxParseExceptions") @Nullable Integer maxParseExceptions,
+      final @JsonProperty("useYarnRMJobStatusFallback") @Nullable Boolean useYarnRMJobStatusFallback
   )
   {
     this.workingPath = workingPath;
@@ -162,6 +165,8 @@ public class HadoopTuningConfig implements TuningConfig
       }
     }
     this.logParseExceptions = logParseExceptions == null ? TuningConfig.DEFAULT_LOG_PARSE_EXCEPTIONS : logParseExceptions;
+
+    this.useYarnRMJobStatusFallback = useYarnRMJobStatusFallback == null ? true : useYarnRMJobStatusFallback;
   }
 
   @JsonProperty
@@ -295,6 +300,12 @@ public class HadoopTuningConfig implements TuningConfig
     return maxParseExceptions;
   }
 
+  @JsonProperty
+  public boolean isUseYarnRMJobStatusFallback()
+  {
+    return useYarnRMJobStatusFallback;
+  }
+
   public HadoopTuningConfig withWorkingPath(String path)
   {
     return new HadoopTuningConfig(
@@ -319,7 +330,8 @@ public class HadoopTuningConfig implements TuningConfig
         useExplicitVersion,
         allowedHadoopPrefix,
         logParseExceptions,
-        maxParseExceptions
+        maxParseExceptions,
+        useYarnRMJobStatusFallback
     );
   }
 
@@ -347,7 +359,8 @@ public class HadoopTuningConfig implements TuningConfig
         useExplicitVersion,
         allowedHadoopPrefix,
         logParseExceptions,
-        maxParseExceptions
+        maxParseExceptions,
+        useYarnRMJobStatusFallback
     );
   }
 
@@ -375,7 +388,8 @@ public class HadoopTuningConfig implements TuningConfig
         useExplicitVersion,
         allowedHadoopPrefix,
         logParseExceptions,
-        maxParseExceptions
+        maxParseExceptions,
+        useYarnRMJobStatusFallback
     );
   }
 }
diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/IndexGeneratorJob.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/IndexGeneratorJob.java
index 0fb017e..6c032b0 100644
--- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/IndexGeneratorJob.java
+++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/IndexGeneratorJob.java
@@ -212,21 +212,31 @@ public class IndexGeneratorJob implements Jobby
         JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), job.getJobID().toString());
       }
 
-      boolean success = job.waitForCompletion(true);
+      try {
+        boolean success = job.waitForCompletion(true);
 
-      Counters counters = job.getCounters();
-      if (counters == null) {
-        log.info("No counters found for job [%s]", job.getJobName());
-      } else {
-        Counter invalidRowCount = counters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER);
-        if (invalidRowCount != null) {
-          jobStats.setInvalidRowCount(invalidRowCount.getValue());
+        Counters counters = job.getCounters();
+        if (counters == null) {
+          log.info("No counters found for job [%s]", job.getJobName());
+        } else {
+          Counter invalidRowCount = counters.findCounter(HadoopDruidIndexerConfig.IndexJobCounters.INVALID_ROW_COUNTER);
+          if (invalidRowCount != null) {
+            jobStats.setInvalidRowCount(invalidRowCount.getValue());
+          } else {
+            log.info("No invalid row counter found for job [%s]", job.getJobName());
+          }
+        }
+
+        return success;
+      }
+      catch (IOException ioe) {
+        if (!Utils.checkAppSuccessForJobIOException(ioe, job, config.isUseYarnRMJobStatusFallback())) {
+          throw ioe;
         } else {
-          log.info("No invalid row counter found for job [%s]", job.getJobName());
+          return true;
         }
       }
 
-      return success;
     }
     catch (Exception e) {
       throw new RuntimeException(e);
diff --git a/indexing-hadoop/src/main/java/org/apache/druid/indexer/Utils.java b/indexing-hadoop/src/main/java/org/apache/druid/indexer/Utils.java
index c45b2b6..490c314 100644
--- a/indexing-hadoop/src/main/java/org/apache/druid/indexer/Utils.java
+++ b/indexing-hadoop/src/main/java/org/apache/druid/indexer/Utils.java
@@ -19,9 +19,12 @@
 
 package org.apache.druid.indexer;
 
+import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.druid.jackson.DefaultObjectMapper;
 import org.apache.druid.java.util.common.ISE;
+import org.apache.druid.java.util.common.RetryUtils;
+import org.apache.druid.java.util.common.StringUtils;
 import org.apache.druid.java.util.common.jackson.JacksonUtils;
 import org.apache.druid.java.util.common.logger.Logger;
 import org.apache.hadoop.fs.FileSystem;
@@ -33,12 +36,17 @@ import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskCompletionEvent;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.eclipse.jetty.client.HttpClient;
+import org.eclipse.jetty.client.api.ContentResponse;
 
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  */
@@ -143,4 +151,94 @@ public class Utils
       return null;
     }
   }
+
+  /**
+   * It is possible for a Hadoop Job to succeed, but for `job.waitForCompletion()` to fail because of
+   * issues with the JobHistory server.
+   *
+   * When the JobHistory server is unavailable, it's possible to fetch the application's status
+   * from the YARN ResourceManager instead.
+   *
+   * Returns true if both `useYarnRMJobStatusFallback` is enabled and YARN ResourceManager reported success for the
+   * target job.
+   */
+  public static boolean checkAppSuccessForJobIOException(
+      IOException ioe,
+      Job job,
+      boolean useYarnRMJobStatusFallback
+  )
+  {
+    if (!useYarnRMJobStatusFallback) {
+      log.info("useYarnRMJobStatusFallback is false, not checking YARN ResourceManager.");
+      return false;
+    }
+    log.error(ioe, "Encountered IOException with job, checking application success from YARN ResourceManager.");
+
+    boolean success = checkAppSuccessFromYarnRM(job);
+    if (!success) {
+      log.error("YARN RM did not report job success either.");
+    }
+    return success;
+  }
+
+  public static boolean checkAppSuccessFromYarnRM(Job job)
+  {
+    final HttpClient httpClient = new HttpClient();
+    final AtomicBoolean succeeded = new AtomicBoolean(false);
+    try {
+      httpClient.start();
+      RetryUtils.retry(
+          () -> {
+            checkAppSuccessFromYarnRMOnce(httpClient, job, succeeded);
+            return null;
+          },
+          ex -> {
+            return !succeeded.get();
+          },
+          5
+      );
+      return succeeded.get();
+    }
+    catch (Exception e) {
+      log.error(e, "Got exception while trying to contact YARN RM.");
+      // we're already in a best-effort fallback failure handling case, just stop if we have issues with the http client
+      return false;
+    }
+    finally {
+      try {
+        httpClient.stop();
+      }
+      catch (Exception e) {
+        log.error(e, "Got exception with httpClient.stop() while trying to contact YARN RM.");
+      }
+    }
+  }
+
+  private static void checkAppSuccessFromYarnRMOnce(
+      HttpClient httpClient,
+      Job job,
+      AtomicBoolean succeeded
+  ) throws IOException, InterruptedException, ExecutionException, TimeoutException
+  {
+    String appId = StringUtils.replace(job.getJobID().toString(), "job", "application");
+    String yarnRM = job.getConfiguration().get("yarn.resourcemanager.webapp.address");
+    String yarnEndpoint = StringUtils.format("http://%s/ws/v1/cluster/apps/%s", yarnRM, appId);
+    log.info("Attempting to retrieve app status from YARN ResourceManager at [%s].", yarnEndpoint);
+
+    ContentResponse res = httpClient.GET(yarnEndpoint);
+    log.info("App status response from YARN RM: " + res.getContentAsString());
+    Map<String, Object> respMap = HadoopDruidIndexerConfig.JSON_MAPPER.readValue(
+        res.getContentAsString(),
+        new TypeReference<Map<String, Object>>()
+        {
+        }
+    );
+
+    Map<String, Object> appMap = (Map<String, Object>) respMap.get("app");
+    String state = (String) appMap.get("state");
+    String finalStatus = (String) appMap.get("finalStatus");
+    if ("FINISHED".equals(state) && "SUCCEEDED".equals(finalStatus)) {
+      succeeded.set(true);
+    }
+  }
 }
diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java
index a885c5f..3fcf50a 100644
--- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java
+++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/BatchDeltaIngestionTest.java
@@ -476,6 +476,7 @@ public class BatchDeltaIngestionTest
                 false,
                 null,
                 null,
+                null,
                 null
             )
         )
diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineHashedPartitionsJobTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineHashedPartitionsJobTest.java
index bbec676..59453fc 100644
--- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineHashedPartitionsJobTest.java
+++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DetermineHashedPartitionsJobTest.java
@@ -213,6 +213,7 @@ public class DetermineHashedPartitionsJobTest
             false,
             null,
             null,
+            null,
             null
         )
     );
diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobTest.java
index 51f4120..e9265bc 100644
--- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobTest.java
+++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/DeterminePartitionsJobTest.java
@@ -276,6 +276,7 @@ public class DeterminePartitionsJobTest
                 false,
                 null,
                 null,
+                null,
                 null
             )
         )
diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerConfigTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerConfigTest.java
index 9161a1c..7573aea 100644
--- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerConfigTest.java
+++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopDruidIndexerConfigTest.java
@@ -100,6 +100,7 @@ public class HadoopDruidIndexerConfigTest
             false,
             null,
             null,
+            null,
             null
         )
     );
@@ -177,6 +178,7 @@ public class HadoopDruidIndexerConfigTest
             false,
             null,
             null,
+            null,
             null
         )
     );
diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopTuningConfigTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopTuningConfigTest.java
index 5d863a3..2553583 100644
--- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopTuningConfigTest.java
+++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/HadoopTuningConfigTest.java
@@ -59,6 +59,7 @@ public class HadoopTuningConfigTest
         true,
         null,
         null,
+        null,
         null
     );
 
diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/IndexGeneratorJobTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/IndexGeneratorJobTest.java
index 743c03c..3925bcd 100644
--- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/IndexGeneratorJobTest.java
+++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/IndexGeneratorJobTest.java
@@ -537,6 +537,7 @@ public class IndexGeneratorJobTest
                 false,
                 null,
                 null,
+                null,
                 null
             )
         )
diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/JobHelperTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/JobHelperTest.java
index 815c1b6..3969e12 100644
--- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/JobHelperTest.java
+++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/JobHelperTest.java
@@ -134,6 +134,7 @@ public class JobHelperTest
                 false,
                 null,
                 null,
+                null,
                 null
             )
         )
diff --git a/indexing-hadoop/src/test/java/org/apache/druid/indexer/path/GranularityPathSpecTest.java b/indexing-hadoop/src/test/java/org/apache/druid/indexer/path/GranularityPathSpecTest.java
index c672142..7f2ce22 100644
--- a/indexing-hadoop/src/test/java/org/apache/druid/indexer/path/GranularityPathSpecTest.java
+++ b/indexing-hadoop/src/test/java/org/apache/druid/indexer/path/GranularityPathSpecTest.java
@@ -76,6 +76,7 @@ public class GranularityPathSpecTest
       false,
       null,
       null,
+      null,
       null
   );
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@druid.apache.org
For additional commands, e-mail: commits-help@druid.apache.org