You are viewing a plain text version of this content. The canonical link for it is here.
Posted to user@spark.apache.org by Ravi Chandran <ra...@gmail.com> on 2022/10/06 23:13:19 UTC

ERROR MicroBatchExecution

Hi,

I am getting a temporary file that does not exist error any idea what could
be wrong? I am trying to join two large datasets. I am using Spark version
3.1.3

Thanks,
Ravi

WARN TaskSetManager: Lost task 197.0 in stage 16.0 (TID 2240) (10.53.44.164
executor 3): java.lang.IllegalStateException: Error reading delta file
file:/tmp/temporary-ce0c71e9-2bd0-4eac-bc17-7c33540bddaf/state/1/197/left-keyToNumValues/1.delta
of HDFSStateStoreProvider[id = (op=1,part=197),dir =
file:/tmp/temporary-ce0c71e9-2bd0-4eac-bc17-7c33540bddaf/state/1/197/left-keyToNumValues]:
file:/tmp/temporary-ce0c71e9-2bd0-4eac-bc17-7c33540bddaf/state/1/197/*left-keyToNumValues/1.delta
does not exist*
        at
org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider.updateFromDeltaFile(HDFSBackedStateStoreProvider.scala:462)
        at
org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider.$anonfun$loadMap$4(HDFSBackedStateStoreProvider.scala:418)
        at
org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider$$Lambda$1702/0000000034AEA430.apply$mcVJ$sp(Unknown
Source)
        at
scala.runtime.java8.JFunction1$mcVJ$sp.apply(JFunction1$mcVJ$sp.java:23)
        at
scala.collection.immutable.NumericRange.foreach(NumericRange.scala:74)
        at
org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider.$anonfun$loadMap$2(HDFSBackedStateStoreProvider.scala:417)
        at
org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider$$Lambda$1701/0000000034AEFB10.apply(Unknown
Source)
        at org.apache.spark.util.Utils$.timeTakenMs(Utils.scala:597)
        at
org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider.loadMap(HDFSBackedStateStoreProvider.scala:390)
        at
org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider.getLoadedMapForStore(HDFSBackedStateStoreProvider.scala:237)
        at
org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider.getStore(HDFSBackedStateStoreProvider.scala:221)
        at
org.apache.spark.sql.execution.streaming.state.StateStore$.get(StateStore.scala:469)
        at
org.apache.spark.sql.execution.streaming.state.SymmetricHashJoinStateManager$StateStoreHandler.getStateStore(SymmetricHashJoinStateManager.scala:376)
        at
org.apache.spark.sql.execution.streaming.state.SymmetricHashJoinStateManager$KeyToNumValuesStore.<init>(SymmetricHashJoinStateManager.scala:400)
        at
org.apache.spark.sql.execution.streaming.state.SymmetricHashJoinStateManager.<init>(SymmetricHashJoinStateManager.scala:344)
        at
org.apache.spark.sql.execution.streaming.StreamingSymmetricHashJoinExec$OneSideHashJoiner.<init>(StreamingSymmetricHashJoinExec.scala:471)
        at
org.apache.spark.sql.execution.streaming.StreamingSymmetricHashJoinExec.processPartitions(StreamingSymmetricHashJoinExec.scala:253)
        at
org.apache.spark.sql.execution.streaming.StreamingSymmetricHashJoinExec.$anonfun$doExecute$1(StreamingSymmetricHashJoinExec.scala:225)
        at
org.apache.spark.sql.execution.streaming.StreamingSymmetricHashJoinExec.$anonfun$doExecute$1$adapted(StreamingSymmetricHashJoinExec.scala:225)
        at
org.apache.spark.sql.execution.streaming.StreamingSymmetricHashJoinExec$$Lambda$1002/00000000D17CF220.apply(Unknown
Source)
        at
org.apache.spark.sql.execution.streaming.StreamingSymmetricHashJoinHelper$StateStoreAwareZipPartitionsRDD.compute(StreamingSymmetricHashJoinHelper.scala:235)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at
org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
        at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
        at org.apache.spark.scheduler.Task.run(Task.scala:131)
        at
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:498)
        at
org.apache.spark.executor.Executor$TaskRunner$$Lambda$481/00000000D0093F50.apply(Unknown
Source)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1439)
        at
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:501)
        at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1160)
        at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
        at java.lang.Thread.run(Thread.java:820)
Caused by: java.io.FileNotFoundException: File
file:/tmp/temporary-ce0c71e9-2bd0-4eac-bc17-7c33540bddaf/state/1/197/left-keyToNumValues/1.delta
does not exist
        at
org.apache.hadoop.fs.RawLocalFileSystem.deprecatedGetFileStatus(RawLocalFileSystem.java:666)
        at
org.apache.hadoop.fs.RawLocalFileSystem.getFileLinkStatusInternal(RawLocalFileSystem.java:987)
        at
org.apache.hadoop.fs.RawLocalFileSystem.getFileStatus(RawLocalFileSystem.java:656)
        at
org.apache.hadoop.fs.RawLocalFileSystem.open(RawLocalFileSystem.java:212)
        at
org.apache.hadoop.fs.DelegateToFileSystem.open(DelegateToFileSystem.java:190)
        at
org.apache.hadoop.fs.AbstractFileSystem.open(AbstractFileSystem.java:649)
        at org.apache.hadoop.fs.FilterFs.open(FilterFs.java:220)
        at org.apache.hadoop.fs.FileContext$6.next(FileContext.java:869)
        at org.apache.hadoop.fs.FileContext$6.next(FileContext.java:865)
        at
org.apache.hadoop.fs.FSLinkResolver.resolve(FSLinkResolver.java:90)
        at org.apache.hadoop.fs.FileContext.open(FileContext.java:871)
        at
org.apache.spark.sql.execution.streaming.FileContextBasedCheckpointFileManager.open(CheckpointFileManager.scala:326)
        at
org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider.updateFromDeltaFile(HDFSBackedStateStoreProvider.scala:458)
        ... 45 more


22/10/06 13:10:55 ERROR TaskSetManager: *Task 187 in stage 16.0 failed 4
times; aborting job*
22/10/06 13:10:55 ERROR WriteToDataSourceV2Exec: Data source write support
org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@8c0a6c1a
is aborting.
22/10/06 13:10:55 ERROR WriteToDataSourceV2Exec: Data source write support
org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@8c0a6c1a
aborted.
22/10/06 13:10:55 ERROR MicroBatchExecution: Query [id =
e8e3ab90-a107-48f2-8deb-721f4c739c19, runId =
29913719-e63c-4949-9286-f0426e104d31] terminated with error
org.apache.spark.SparkException: Writing job aborted.

>