You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by kunalkhamar <gi...@git.apache.org> on 2017/07/06 22:14:16 UTC

[GitHub] spark pull request #17216: [SPARK-19873][SS] Record num shuffle partitions i...

Github user kunalkhamar commented on a diff in the pull request:

    https://github.com/apache/spark/pull/17216#discussion_r126029620
  
    --- Diff: sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala ---
    @@ -389,6 +392,102 @@ class StreamSuite extends StreamTest {
         query.stop()
         assert(query.exception.isEmpty)
       }
    +
    +  test("SPARK-19873: streaming aggregation with change in number of partitions") {
    +    val inputData = MemoryStream[(Int, Int)]
    +    val agg = inputData.toDS().groupBy("_1").count()
    +
    +    testStream(agg, OutputMode.Complete())(
    +      AddData(inputData, (1, 0), (2, 0)),
    +      StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "2")),
    +      CheckAnswer((1, 1), (2, 1)),
    +      StopStream,
    +      AddData(inputData, (3, 0), (2, 0)),
    +      StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "5")),
    +      CheckAnswer((1, 1), (2, 2), (3, 1)),
    +      StopStream,
    +      AddData(inputData, (3, 0), (1, 0)),
    +      StartStream(additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "1")),
    +      CheckAnswer((1, 2), (2, 2), (3, 2)))
    +  }
    +
    +  test("recover from a Spark v2.1 checkpoint") {
    +    var inputData: MemoryStream[Int] = null
    +    var query: DataStreamWriter[Row] = null
    +
    +    def prepareMemoryStream(): Unit = {
    +      inputData = MemoryStream[Int]
    +      inputData.addData(1, 2, 3, 4)
    +      inputData.addData(3, 4, 5, 6)
    +      inputData.addData(5, 6, 7, 8)
    +
    +      query = inputData
    +        .toDF()
    +        .groupBy($"value")
    +        .agg(count("*"))
    +        .writeStream
    +        .outputMode("complete")
    +        .format("memory")
    +    }
    +
    +    // Get an existing checkpoint generated by Spark v2.1.
    +    // v2.1 does not record # shuffle partitions in the offset metadata.
    +    val resourceUri =
    +      this.getClass.getResource("/structured-streaming/checkpoint-version-2.1.0").toURI
    +    val checkpointDir = new File(resourceUri)
    +
    +    // 1 - Test if recovery from the checkpoint is successful.
    +    prepareMemoryStream()
    +    withTempDir { dir =>
    +      // Copy the checkpoint to a temp dir to prevent changes to the original.
    +      // Not doing this will lead to the test passing on the first run, but fail subsequent runs.
    +      FileUtils.copyDirectory(checkpointDir, dir)
    +
    +      // Checkpoint data was generated by a query with 10 shuffle partitions.
    +      // In order to test reading from the checkpoint, the checkpoint must have two or more batches,
    +      // since the last batch may be rerun.
    --- End diff --
    
    https://github.com/apache/spark/pull/18503#discussion_r126028838


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastructure@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org