You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@druid.apache.org by fj...@apache.org on 2019/03/30 18:36:32 UTC
[incubator-druid] branch master updated: Update scan benchmark for
time ordering (#7385)
This is an automated email from the ASF dual-hosted git repository.
fjy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-druid.git
The following commit(s) were added to refs/heads/master by this push:
new 2bf6fc3 Update scan benchmark for time ordering (#7385)
2bf6fc3 is described below
commit 2bf6fc353a48bf601f312d61fd2753ae6a7e236a
Author: Jonathan Wei <jo...@users.noreply.github.com>
AuthorDate: Sat Mar 30 11:36:25 2019 -0700
Update scan benchmark for time ordering (#7385)
---
.../druid/benchmark/query/ScanBenchmark.java | 66 ++++++++++++++++++++--
.../java/org/apache/druid/timeline/SegmentId.java | 9 +++
2 files changed, 70 insertions(+), 5 deletions(-)
diff --git a/benchmarks/src/main/java/org/apache/druid/benchmark/query/ScanBenchmark.java b/benchmarks/src/main/java/org/apache/druid/benchmark/query/ScanBenchmark.java
index 876aa95..997ae59 100644
--- a/benchmarks/src/main/java/org/apache/druid/benchmark/query/ScanBenchmark.java
+++ b/benchmarks/src/main/java/org/apache/druid/benchmark/query/ScanBenchmark.java
@@ -21,6 +21,7 @@ package org.apache.druid.benchmark.query;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import org.apache.commons.io.FileUtils;
import org.apache.druid.benchmark.datagen.BenchmarkDataGenerator;
@@ -30,6 +31,7 @@ import org.apache.druid.data.input.InputRow;
import org.apache.druid.data.input.Row;
import org.apache.druid.hll.HyperLogLogHash;
import org.apache.druid.jackson.DefaultObjectMapper;
+import org.apache.druid.java.util.common.Intervals;
import org.apache.druid.java.util.common.concurrent.Execs;
import org.apache.druid.java.util.common.guava.Sequence;
import org.apache.druid.java.util.common.logger.Logger;
@@ -42,6 +44,8 @@ import org.apache.druid.query.QueryRunner;
import org.apache.druid.query.QueryRunnerFactory;
import org.apache.druid.query.QueryToolChest;
import org.apache.druid.query.Result;
+import org.apache.druid.query.SegmentDescriptor;
+import org.apache.druid.query.TableDataSource;
import org.apache.druid.query.aggregation.hyperloglog.HyperUniquesSerde;
import org.apache.druid.query.extraction.StrlenExtractionFn;
import org.apache.druid.query.filter.BoundDimFilter;
@@ -55,6 +59,7 @@ import org.apache.druid.query.scan.ScanQueryQueryToolChest;
import org.apache.druid.query.scan.ScanQueryRunnerFactory;
import org.apache.druid.query.scan.ScanResultValue;
import org.apache.druid.query.spec.MultipleIntervalSegmentSpec;
+import org.apache.druid.query.spec.MultipleSpecificSegmentSpec;
import org.apache.druid.query.spec.QuerySegmentSpec;
import org.apache.druid.segment.IncrementalIndexSegment;
import org.apache.druid.segment.IndexIO;
@@ -342,7 +347,24 @@ public class ScanBenchmark
new IncrementalIndexSegment(incIndexes.get(0), SegmentId.dummy("incIndex"))
);
- List<ScanResultValue> results = ScanBenchmark.runQuery(factory, runner, query);
+ Query effectiveQuery = query
+ .withDataSource(new TableDataSource("incIndex"))
+ .withQuerySegmentSpec(
+ new MultipleSpecificSegmentSpec(
+ ImmutableList.of(
+ new SegmentDescriptor(
+ Intervals.ETERNITY,
+ "dummy_version",
+ 0
+ )
+ )
+ )
+ )
+ .withOverriddenContext(
+ ImmutableMap.of(ScanQuery.CTX_KEY_OUTERMOST, false)
+ );
+
+ List<ScanResultValue> results = ScanBenchmark.runQuery(factory, runner, effectiveQuery);
blackhole.consume(results);
}
@@ -357,7 +379,24 @@ public class ScanBenchmark
new QueryableIndexSegment(qIndexes.get(0), SegmentId.dummy("qIndex"))
);
- List<ScanResultValue> results = ScanBenchmark.runQuery(factory, runner, query);
+ Query effectiveQuery = query
+ .withDataSource(new TableDataSource("qIndex"))
+ .withQuerySegmentSpec(
+ new MultipleSpecificSegmentSpec(
+ ImmutableList.of(
+ new SegmentDescriptor(
+ Intervals.ETERNITY,
+ "dummy_version",
+ 0
+ )
+ )
+ )
+ )
+ .withOverriddenContext(
+ ImmutableMap.of(ScanQuery.CTX_KEY_OUTERMOST, false)
+ );
+
+ List<ScanResultValue> results = ScanBenchmark.runQuery(factory, runner, effectiveQuery);
blackhole.consume(results);
}
@@ -366,14 +405,22 @@ public class ScanBenchmark
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void queryMultiQueryableIndex(Blackhole blackhole)
{
+ List<SegmentDescriptor> segmentDescriptors = new ArrayList<>();
List<QueryRunner<Row>> runners = new ArrayList<>();
QueryToolChest toolChest = factory.getToolchest();
for (int i = 0; i < numSegments; i++) {
- String segmentName = "qIndex" + i;
+ String segmentName = "qIndex";
final QueryRunner<Result<ScanResultValue>> runner = QueryBenchmarkUtil.makeQueryRunner(
factory,
SegmentId.dummy(segmentName),
- new QueryableIndexSegment(qIndexes.get(i), SegmentId.dummy(segmentName))
+ new QueryableIndexSegment(qIndexes.get(i), SegmentId.dummy(segmentName, i))
+ );
+ segmentDescriptors.add(
+ new SegmentDescriptor(
+ Intervals.ETERNITY,
+ "dummy_version",
+ i
+ )
);
runners.add(toolChest.preMergeQueryDecoration(runner));
}
@@ -385,8 +432,17 @@ public class ScanBenchmark
)
);
+ Query effectiveQuery = query
+ .withDataSource(new TableDataSource("qIndex"))
+ .withQuerySegmentSpec(
+ new MultipleSpecificSegmentSpec(segmentDescriptors)
+ )
+ .withOverriddenContext(
+ ImmutableMap.of(ScanQuery.CTX_KEY_OUTERMOST, false)
+ );
+
Sequence<Result<ScanResultValue>> queryResult = theRunner.run(
- QueryPlus.wrap(query),
+ QueryPlus.wrap(effectiveQuery),
new HashMap<>()
);
List<Result<ScanResultValue>> results = queryResult.toList();
diff --git a/core/src/main/java/org/apache/druid/timeline/SegmentId.java b/core/src/main/java/org/apache/druid/timeline/SegmentId.java
index 99ee897..2c21e21 100644
--- a/core/src/main/java/org/apache/druid/timeline/SegmentId.java
+++ b/core/src/main/java/org/apache/druid/timeline/SegmentId.java
@@ -247,6 +247,15 @@ public final class SegmentId implements Comparable<SegmentId>
return of(dataSource, Intervals.ETERNITY, "dummy_version", 0);
}
+ /**
+ * Creates a dummy SegmentId with the given data source and partition number.
+ * This method is useful in benchmark and test code.
+ */
+ public static SegmentId dummy(String dataSource, int partitionNum)
+ {
+ return of(dataSource, Intervals.ETERNITY, "dummy_version", partitionNum);
+ }
+
private final String dataSource;
/**
* {@code intervalStartMillis}, {@link #intervalEndMillis} and {@link #intervalChronology} are the three fields of
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@druid.apache.org
For additional commands, e-mail: commits-help@druid.apache.org