AbortedConsole Output

Skipping 3,232 KB.. Full Log
p$1(Try.scala:255)
	at scala.util.Success.map(Try.scala:213)
	at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
	at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
	at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
	at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
	at java.base/java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1426)
	at java.base/java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:290)
	at java.base/java.util.concurrent.ForkJoinPool$WorkQueue.topLevelExec(ForkJoinPool.java:1020)
	at java.base/java.util.concurrent.ForkJoinPool.scan(ForkJoinPool.java:1656)
	at java.base/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1594)
	at java.base/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:177)
- read parquet footers in parallel
SaveLoadSuite:
- save with path and load
- save with string mode and path, and load
- save with path and datasource, and load
- save with data source and options, and load
- save and save again
- SPARK-23459: Improve error message when specified unknown column in partition columns
- skip empty files in non bucketed read
OrcQuerySuite:
- Read/write All Types
- Read/write binary data
- Read/write all types with non-primitive type
- Read/write UserDefinedType
- Creating case class RDD table
- Simple selection form ORC table
- save and load case class RDD with `None`s as orc
- SPARK-16610: Respect orc.compress (i.e., OrcConf.COMPRESS) when compression is unset
- Compression options for writing to an ORC file (SNAPPY, ZLIB and NONE)
- simple select queries
- appending
- overwriting
- self-join
- nested data - struct with array field
- nested data - array of struct
- columns only referenced by pushed down filters should remain
- SPARK-5309 strings stored using dictionary compression in orc
- SPARK-9170: Don't implicitly lowercase of user-provided columns
- SPARK-10623 Enable ORC PPD
- SPARK-14962 Produce correct results on array type with isnotnull
- SPARK-15198 Support for pushing down filters for boolean types
- Support for pushing down filters for decimal types
- Support for pushing down filters for timestamp types
- column nullability and comment - write and then read
- Empty schema does not read data from ORC file
- read from multiple orc input paths
09:47:18.416 WARN org.apache.spark.sql.execution.datasources.v2.FilePartitionReader: Skipped the rest of the content in the corrupted file.
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-99710bb9-c22b-4c28-8e74-9c81985b09f1/third/part-00001-4e6557af-f646-436d-89c9-37763b53ad43-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:92)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1804)
	at org.apache.spark.rdd.RDD.$anonfun$count$1(RDD.scala:1227)
	at org.apache.spark.rdd.RDD.$anonfun$count$1$adapted(RDD.scala:1227)
	at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2156)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:18.444 WARN org.apache.spark.sql.execution.datasources.v2.FilePartitionReader: Skipped the rest of the content in the corrupted file.
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-99710bb9-c22b-4c28-8e74-9c81985b09f1/third/part-00001-4e6557af-f646-436d-89c9-37763b53ad43-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:92)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:19.121 WARN org.apache.spark.sql.execution.datasources.v2.FilePartitionReader: Skipped the rest of the content in the corrupted file.
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-7f4ed7fa-271e-4e7c-a1e4-5f08e6e829dd/third/part-00001-c0ee5b74-e60e-4df0-b027-609b595984be-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:92)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1804)
	at org.apache.spark.rdd.RDD.$anonfun$count$1(RDD.scala:1227)
	at org.apache.spark.rdd.RDD.$anonfun$count$1$adapted(RDD.scala:1227)
	at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2156)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:19.157 WARN org.apache.spark.sql.execution.datasources.v2.FilePartitionReader: Skipped the rest of the content in the corrupted file.
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-7f4ed7fa-271e-4e7c-a1e4-5f08e6e829dd/third/part-00001-c0ee5b74-e60e-4df0-b027-609b595984be-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:92)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:19.472 WARN org.apache.spark.sql.execution.datasources.orc.OrcUtils: Skipped the footer in the corrupted file: file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-33d52c3b-b5cf-47c8-af1a-b75d02d0fad5/first/part-00001-f540280e-44ac-4bf0-992b-ffecab23c7c4-c000.json
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-33d52c3b-b5cf-47c8-af1a-b75d02d0fad5/first/part-00001-f540280e-44ac-4bf0-992b-ffecab23c7c4-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$readSchema$1(OrcUtils.scala:65)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.readSchema(OrcUtils.scala:65)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$readSchema$4(OrcUtils.scala:88)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at scala.collection.TraversableOnce.collectFirst(TraversableOnce.scala:148)
	at scala.collection.TraversableOnce.collectFirst$(TraversableOnce.scala:135)
	at scala.collection.AbstractIterator.collectFirst(Iterator.scala:1429)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.readSchema(OrcUtils.scala:88)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.inferSchema(OrcUtils.scala:114)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcTable.inferSchema(OrcTable.scala:44)
	at org.apache.spark.sql.execution.datasources.v2.FileTable.$anonfun$dataSchema$4(FileTable.scala:69)
	at scala.Option.orElse(Option.scala:447)
	at org.apache.spark.sql.execution.datasources.v2.FileTable.dataSchema$lzycompute(FileTable.scala:69)
	at org.apache.spark.sql.execution.datasources.v2.FileTable.dataSchema(FileTable.scala:63)
	at org.apache.spark.sql.execution.datasources.v2.FileTable.schema$lzycompute(FileTable.scala:82)
	at org.apache.spark.sql.execution.datasources.v2.FileTable.schema(FileTable.scala:80)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation$.create(DataSourceV2Relation.scala:141)
	at org.apache.spark.sql.DataFrameReader.$anonfun$load$1(DataFrameReader.scala:229)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:197)
	at org.apache.spark.sql.DataFrameReader.orc(DataFrameReader.scala:727)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.$anonfun$new$153(OrcQuerySuite.scala:570)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.$anonfun$new$153$adapted(OrcQuerySuite.scala:564)
	at org.apache.spark.sql.test.SQLTestUtils.$anonfun$withTempDir$1(SQLTestUtils.scala:76)
	at org.apache.spark.sql.test.SQLTestUtils.$anonfun$withTempDir$1$adapted(SQLTestUtils.scala:75)
	at org.apache.spark.SparkFunSuite.withTempDir(SparkFunSuite.scala:161)
	at org.apache.spark.sql.execution.datasources.orc.OrcTest.org$apache$spark$sql$test$SQLTestUtils$$super$withTempDir(OrcTest.scala:50)
	at org.apache.spark.sql.test.SQLTestUtils.withTempDir(SQLTestUtils.scala:75)
	at org.apache.spark.sql.test.SQLTestUtils.withTempDir$(SQLTestUtils.scala:74)
	at org.apache.spark.sql.execution.datasources.orc.OrcTest.withTempDir(OrcTest.scala:50)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.testAllCorruptFiles$1(OrcQuerySuite.scala:564)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.$anonfun$new$156(OrcQuerySuite.scala:591)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.Assertions.intercept(Assertions.scala:807)
	at org.scalatest.Assertions.intercept$(Assertions.scala:804)
	at org.scalatest.FunSuite.intercept(FunSuite.scala:1560)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.$anonfun$new$155(OrcQuerySuite.scala:590)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf(SQLHelper.scala:52)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf$(SQLHelper.scala:36)
	at org.apache.spark.sql.execution.datasources.orc.OrcTest.org$apache$spark$sql$test$SQLTestUtilsBase$$super$withSQLConf(OrcTest.scala:50)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf(SQLTestUtils.scala:231)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf$(SQLTestUtils.scala:229)
	at org.apache.spark.sql.execution.datasources.orc.OrcTest.withSQLConf(OrcTest.scala:50)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.$anonfun$new$148(OrcQuerySuite.scala:587)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:149)
	at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
	at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:286)
	at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
	at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:56)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:56)
	at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:393)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:381)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:376)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:458)
	at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
	at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
	at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
	at org.scalatest.Suite.run(Suite.scala:1124)
	at org.scalatest.Suite.run$(Suite.scala:1106)
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
	at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:518)
	at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
	at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:56)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:56)
	at org.scalatest.Suite.callExecuteOnSuite$1(Suite.scala:1187)
	at org.scalatest.Suite.$anonfun$runNestedSuites$1(Suite.scala:1234)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at org.scalatest.Suite.runNestedSuites(Suite.scala:1232)
	at org.scalatest.Suite.runNestedSuites$(Suite.scala:1166)
	at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:30)
	at org.scalatest.Suite.run(Suite.scala:1121)
	at org.scalatest.Suite.run$(Suite.scala:1106)
	at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:30)
	at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
	at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13(Runner.scala:1349)
	at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13$adapted(Runner.scala:1343)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1343)
	at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24(Runner.scala:1033)
	at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24$adapted(Runner.scala:1011)
	at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1509)
	at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1011)
	at org.scalatest.tools.Runner$.main(Runner.scala:827)
	at org.scalatest.tools.Runner.main(Runner.scala)
09:47:19.474 WARN org.apache.spark.sql.execution.datasources.orc.OrcUtils: Skipped the footer in the corrupted file: file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-33d52c3b-b5cf-47c8-af1a-b75d02d0fad5/second/part-00001-7f849003-f5b5-4de2-b331-e94c89c18a51-c000.json
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-33d52c3b-b5cf-47c8-af1a-b75d02d0fad5/second/part-00001-7f849003-f5b5-4de2-b331-e94c89c18a51-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$readSchema$1(OrcUtils.scala:65)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.readSchema(OrcUtils.scala:65)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$readSchema$4(OrcUtils.scala:88)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at scala.collection.TraversableOnce.collectFirst(TraversableOnce.scala:148)
	at scala.collection.TraversableOnce.collectFirst$(TraversableOnce.scala:135)
	at scala.collection.AbstractIterator.collectFirst(Iterator.scala:1429)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.readSchema(OrcUtils.scala:88)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.inferSchema(OrcUtils.scala:114)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcTable.inferSchema(OrcTable.scala:44)
	at org.apache.spark.sql.execution.datasources.v2.FileTable.$anonfun$dataSchema$4(FileTable.scala:69)
	at scala.Option.orElse(Option.scala:447)
	at org.apache.spark.sql.execution.datasources.v2.FileTable.dataSchema$lzycompute(FileTable.scala:69)
	at org.apache.spark.sql.execution.datasources.v2.FileTable.dataSchema(FileTable.scala:63)
	at org.apache.spark.sql.execution.datasources.v2.FileTable.schema$lzycompute(FileTable.scala:82)
	at org.apache.spark.sql.execution.datasources.v2.FileTable.schema(FileTable.scala:80)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation$.create(DataSourceV2Relation.scala:141)
	at org.apache.spark.sql.DataFrameReader.$anonfun$load$1(DataFrameReader.scala:229)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:197)
	at org.apache.spark.sql.DataFrameReader.orc(DataFrameReader.scala:727)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.$anonfun$new$153(OrcQuerySuite.scala:570)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.$anonfun$new$153$adapted(OrcQuerySuite.scala:564)
	at org.apache.spark.sql.test.SQLTestUtils.$anonfun$withTempDir$1(SQLTestUtils.scala:76)
	at org.apache.spark.sql.test.SQLTestUtils.$anonfun$withTempDir$1$adapted(SQLTestUtils.scala:75)
	at org.apache.spark.SparkFunSuite.withTempDir(SparkFunSuite.scala:161)
	at org.apache.spark.sql.execution.datasources.orc.OrcTest.org$apache$spark$sql$test$SQLTestUtils$$super$withTempDir(OrcTest.scala:50)
	at org.apache.spark.sql.test.SQLTestUtils.withTempDir(SQLTestUtils.scala:75)
	at org.apache.spark.sql.test.SQLTestUtils.withTempDir$(SQLTestUtils.scala:74)
	at org.apache.spark.sql.execution.datasources.orc.OrcTest.withTempDir(OrcTest.scala:50)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.testAllCorruptFiles$1(OrcQuerySuite.scala:564)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.$anonfun$new$156(OrcQuerySuite.scala:591)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.Assertions.intercept(Assertions.scala:807)
	at org.scalatest.Assertions.intercept$(Assertions.scala:804)
	at org.scalatest.FunSuite.intercept(FunSuite.scala:1560)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.$anonfun$new$155(OrcQuerySuite.scala:590)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf(SQLHelper.scala:52)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf$(SQLHelper.scala:36)
	at org.apache.spark.sql.execution.datasources.orc.OrcTest.org$apache$spark$sql$test$SQLTestUtilsBase$$super$withSQLConf(OrcTest.scala:50)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf(SQLTestUtils.scala:231)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf$(SQLTestUtils.scala:229)
	at org.apache.spark.sql.execution.datasources.orc.OrcTest.withSQLConf(OrcTest.scala:50)
	at org.apache.spark.sql.execution.datasources.orc.OrcQueryTest.$anonfun$new$148(OrcQuerySuite.scala:587)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:149)
	at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
	at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:286)
	at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
	at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:56)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:56)
	at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:393)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:381)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:376)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:458)
	at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
	at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
	at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
	at org.scalatest.Suite.run(Suite.scala:1124)
	at org.scalatest.Suite.run$(Suite.scala:1106)
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
	at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:518)
	at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
	at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:56)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:56)
	at org.scalatest.Suite.callExecuteOnSuite$1(Suite.scala:1187)
	at org.scalatest.Suite.$anonfun$runNestedSuites$1(Suite.scala:1234)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at org.scalatest.Suite.runNestedSuites(Suite.scala:1232)
	at org.scalatest.Suite.runNestedSuites$(Suite.scala:1166)
	at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:30)
	at org.scalatest.Suite.run(Suite.scala:1121)
	at org.scalatest.Suite.run$(Suite.scala:1106)
	at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:30)
	at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:45)
	at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13(Runner.scala:1349)
	at org.scalatest.tools.Runner$.$anonfun$doRunRunRunDaDoRunRun$13$adapted(Runner.scala:1343)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:1343)
	at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24(Runner.scala:1033)
	at org.scalatest.tools.Runner$.$anonfun$runOptionallyWithPassFailReporter$24$adapted(Runner.scala:1011)
	at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:1509)
	at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1011)
	at org.scalatest.tools.Runner$.main(Runner.scala:827)
	at org.scalatest.tools.Runner.main(Runner.scala)
09:47:19.907 WARN org.apache.spark.sql.execution.datasources.v2.FilePartitionReader: Skipped the rest of the content in the corrupted file.
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-03dbdcef-1649-4cc4-8faa-549d064ca9b6/first/part-00001-dc2cc765-316c-4618-9fe5-fd0a414c7511-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:19.907 WARN org.apache.spark.sql.execution.datasources.v2.FilePartitionReader: Skipped the rest of the content in the corrupted file.
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-03dbdcef-1649-4cc4-8faa-549d064ca9b6/second/part-00001-cec5d78b-c764-47d6-bdbb-a84843972ab2-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:20.541 ERROR org.apache.spark.executor.Executor: Exception in task 1.0 in stage 193.0 (TID 408)
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-62451955-1633-41c2-984c-635c074bdb43/third/part-00001-3edf0e6b-50a4-4484-a11e-11098f166ff7-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:92)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1804)
	at org.apache.spark.rdd.RDD.$anonfun$count$1(RDD.scala:1227)
	at org.apache.spark.rdd.RDD.$anonfun$count$1$adapted(RDD.scala:1227)
	at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2156)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:20.546 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in stage 193.0 (TID 408, 192.168.10.32, executor driver): org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-62451955-1633-41c2-984c-635c074bdb43/third/part-00001-3edf0e6b-50a4-4484-a11e-11098f166ff7-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:92)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1804)
	at org.apache.spark.rdd.RDD.$anonfun$count$1(RDD.scala:1227)
	at org.apache.spark.rdd.RDD.$anonfun$count$1$adapted(RDD.scala:1227)
	at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2156)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)

09:47:20.546 ERROR org.apache.spark.scheduler.TaskSetManager: Task 1 in stage 193.0 failed 1 times; aborting job
09:47:21.151 ERROR org.apache.spark.executor.Executor: Exception in task 1.0 in stage 197.0 (TID 416)
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-dfc157a3-8b65-40d4-9bb6-e34ceeb17065/third/part-00001-f94c225a-0e97-4547-a727-39baf69620e2-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:92)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1804)
	at org.apache.spark.rdd.RDD.$anonfun$count$1(RDD.scala:1227)
	at org.apache.spark.rdd.RDD.$anonfun$count$1$adapted(RDD.scala:1227)
	at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2156)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:21.153 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in stage 197.0 (TID 416, 192.168.10.32, executor driver): org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-dfc157a3-8b65-40d4-9bb6-e34ceeb17065/third/part-00001-f94c225a-0e97-4547-a727-39baf69620e2-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:92)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1804)
	at org.apache.spark.rdd.RDD.$anonfun$count$1(RDD.scala:1227)
	at org.apache.spark.rdd.RDD.$anonfun$count$1$adapted(RDD.scala:1227)
	at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2156)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)

09:47:21.154 ERROR org.apache.spark.scheduler.TaskSetManager: Task 1 in stage 197.0 failed 1 times; aborting job
09:47:21.897 ERROR org.apache.spark.executor.Executor: Exception in task 1.0 in stage 202.0 (TID 426)
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-d2495e45-8900-4324-9088-abbecb8d9f64/second/part-00001-fb6dc1dc-3ff4-4fe3-8352-4427718f0c26-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:21.897 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 202.0 (TID 425)
org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-d2495e45-8900-4324-9088-abbecb8d9f64/first/part-00001-e9bdf31e-691b-42d4-a84a-950f4ccee928-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:21.898 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in stage 202.0 (TID 426, 192.168.10.32, executor driver): org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-d2495e45-8900-4324-9088-abbecb8d9f64/second/part-00001-fb6dc1dc-3ff4-4fe3-8352-4427718f0c26-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)

09:47:21.899 ERROR org.apache.spark.scheduler.TaskSetManager: Task 1 in stage 202.0 failed 1 times; aborting job
09:47:21.899 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 202.0 (TID 425, 192.168.10.32, executor driver): org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-d2495e45-8900-4324-9088-abbecb8d9f64/first/part-00001-e9bdf31e-691b-42d4-a84a-950f4ccee928-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:274)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:581)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:369)
	at org.apache.orc.OrcFile.createReader(OrcFile.java:343)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.$anonfun$buildColumnarReader$1(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2538)
	at org.apache.spark.sql.execution.datasources.v2.orc.OrcPartitionReaderFactory.buildColumnarReader(OrcPartitionReaderFactory.scala:124)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReaderFactory.$anonfun$createColumnarReader$1(FilePartitionReaderFactory.scala:38)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.getNextReader(FilePartitionReader.scala:106)
	at org.apache.spark.sql.execution.datasources.v2.FilePartitionReader.next(FilePartitionReader.scala:42)
	at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:62)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)

- Enabling/disabling ignoreCorruptFiles
- SPARK-27160 Predicate pushdown correctness on DecimalType for ORC
- LZO compression options for writing to an ORC file
09:47:23.190 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-69a46bff-445e-4efe-a873-2871ed92a7cc was not found. Was it deleted very recently?
09:47:23.221 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/sql/core/target/tmp/spark-69a46bff-445e-4efe-a873-2871ed92a7cc was not found. Was it deleted very recently?
- Schema discovery on empty ORC files
- SPARK-21791 ORC should support column names with dot
- SPARK-25579 ORC PPD should support column names with dot
- SPARK-20728 Make ORCFileFormat configurable between sql/hive and sql/core
09:47:24.985 WARN org.apache.spark.sql.execution.datasources.orc.OrcQuerySuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.datasources.orc.OrcQuerySuite, thread names: block-manager-slave-async-thread-pool-15, block-manager-ask-thread-pool-67, block-manager-slave-async-thread-pool-26, block-manager-slave-async-thread-pool-74, block-manager-slave-async-thread-pool-37, block-manager-ask-thread-pool-40, block-manager-ask-thread-pool-83, block-manager-ask-thread-pool-9, block-manager-ask-thread-pool-78, block-manager-slave-async-thread-pool-89, block-manager-slave-async-thread-pool-67, block-manager-slave-async-thread-pool-96, block-manager-slave-async-thread-pool-56, block-manager-ask-thread-pool-62, block-manager-ask-thread-pool-73, block-manager-ask-thread-pool-94, block-manager-ask-thread-pool-25, block-manager-ask-thread-pool-51, block-manager-ask-thread-pool-23, block-manager-slave-async-thread-pool-90, block-manager-slave-async-thread-pool-78, block-manager-slave-async-thread-pool-92, block-manager-slave-async-thread-pool-63, block-manager-slave-async-thread-pool-5, block-manager-ask-thread-pool-34, block-manager-slave-async-thread-pool-9, block-manager-ask-thread-pool-90, block-manager-slave-async-thread-pool-52, block-manager-ask-thread-pool-39, block-manager-ask-thread-pool-69, block-manager-slave-async-thread-pool-41, block-manager-slave-async-thread-pool-75, block-manager-ask-thread-pool-58, block-manager-ask-thread-pool-56, block-manager-slave-async-thread-pool-36, block-manager-ask-thread-pool-15, block-manager-slave-async-thread-pool-86, block-manager-slave-async-thread-pool-6, block-manager-slave-async-thread-pool-21, block-manager-slave-async-thread-pool-1, block-manager-ask-thread-pool-36, block-manager-ask-thread-pool-45, block-manager-slave-async-thread-pool-10, block-manager-slave-async-thread-pool-32, block-manager-ask-thread-pool-5, block-manager-slave-async-thread-pool-45, block-manager-slave-async-thread-pool-25, block-manager-slave-async-thread-pool-14, block-manager-slave-async-thread-pool-95, block-manager-ask-thread-pool-47, block-manager-ask-thread-pool-0, block-manager-ask-thread-pool-72, block-manager-ask-thread-pool-95, block-manager-ask-thread-pool-80, block-manager-slave-async-thread-pool-93, block-manager-slave-async-thread-pool-64, dispatcher-BlockManagerMaster, block-manager-slave-async-thread-pool-49, block-manager-slave-async-thread-pool-29, block-manager-ask-thread-pool-61, block-manager-slave-async-thread-pool-53, block-manager-slave-async-thread-pool-18, block-manager-ask-thread-pool-8, block-manager-ask-thread-pool-91, block-manager-slave-async-thread-pool-79, block-manager-slave-async-thread-pool-68, block-manager-ask-thread-pool-26, block-manager-ask-thread-pool-79, block-manager-ask-thread-pool-16, block-manager-ask-thread-pool-10, block-manager-ask-thread-pool-50, block-manager-slave-async-thread-pool-57, block-manager-slave-async-thread-pool-4, block-manager-ask-thread-pool-4, block-manager-slave-async-thread-pool-61, block-manager-slave-async-thread-pool-7, block-manager-ask-thread-pool-1, block-manager-ask-thread-pool-29, block-manager-ask-thread-pool-37, block-manager-ask-thread-pool-12, block-manager-slave-async-thread-pool-46, block-manager-ask-thread-pool-48, block-manager-slave-async-thread-pool-76, block-manager-ask-thread-pool-59, block-manager-slave-async-thread-pool-87, block-manager-slave-async-thread-pool-99, block-manager-slave-async-thread-pool-0, block-manager-ask-thread-pool-84, block-manager-slave-async-thread-pool-65, block-manager-slave-async-thread-pool-33, block-manager-ask-thread-pool-19, block-manager-slave-async-thread-pool-50, block-manager-ask-thread-pool-89, block-manager-slave-async-thread-pool-72, block-manager-slave-async-thread-pool-42, block-manager-slave-async-thread-pool-83, block-manager-slave-async-thread-pool-55, block-manager-slave-async-thread-pool-94, block-manager-ask-thread-pool-32, block-manager-slave-async-thread-pool-69, block-manager-ask-thread-pool-43, block-manager-ask-thread-pool-31, block-manager-ask-thread-pool-71, block-manager-slave-async-thread-pool-54, block-manager-ask-thread-pool-27, block-manager-ask-thread-pool-21, block-manager-ask-thread-pool-11, block-manager-ask-thread-pool-76, block-manager-slave-async-thread-pool-28, block-manager-ask-thread-pool-54, block-manager-slave-async-thread-pool-66, block-manager-slave-async-thread-pool-88, block-manager-slave-async-thread-pool-98, block-manager-ask-thread-pool-2, block-manager-slave-async-thread-pool-48, block-manager-slave-async-thread-pool-58, block-manager-slave-async-thread-pool-22, block-manager-slave-async-thread-pool-39, block-manager-ask-thread-pool-92, block-manager-slave-async-thread-pool-17, block-manager-ask-thread-pool-65, block-manager-ask-thread-pool-7, block-manager-slave-async-thread-pool-77, block-manager-ask-thread-pool-17, block-manager-slave-async-thread-pool-11, block-manager-ask-thread-pool-81, block-manager-slave-async-thread-pool-3, block-manager-slave-async-thread-pool-23, block-manager-ask-thread-pool-18, block-manager-ask-thread-pool-64, block-manager-ask-thread-pool-96, block-manager-ask-thread-pool-3, block-manager-ask-thread-pool-85, block-manager-slave-async-thread-pool-47, block-manager-slave-async-thread-pool-30, block-manager-ask-thread-pool-13, block-manager-slave-async-thread-pool-8, block-manager-slave-async-thread-pool-59, block-manager-ask-thread-pool-38, block-manager-ask-thread-pool-99, block-manager-ask-thread-pool-60, block-manager-ask-thread-pool-49, block-manager-slave-async-thread-pool-60, block-manager-slave-async-thread-pool-71, block-manager-ask-thread-pool-75, block-manager-slave-async-thread-pool-82, block-manager-ask-thread-pool-42, block-manager-ask-thread-pool-53, block-manager-ask-thread-pool-88, block-manager-slave-async-thread-pool-34, block-manager-slave-async-thread-pool-43, block-manager-slave-async-thread-pool-44, block-manager-ask-thread-pool-46, block-manager-slave-async-thread-pool-84, block-manager-ask-thread-pool-55, block-manager-slave-async-thread-pool-16, block-manager-ask-thread-pool-86, block-manager-ask-thread-pool-97, block-manager-ask-thread-pool-22, block-manager-ask-thread-pool-35, block-manager-slave-async-thread-pool-27, block-manager-slave-async-thread-pool-73, block-manager-slave-async-thread-pool-38, block-manager-ask-thread-pool-33, block-manager-ask-thread-pool-63, block-manager-slave-async-thread-pool-51, block-manager-slave-async-thread-pool-12, block-manager-ask-thread-pool-57, block-manager-ask-thread-pool-66, block-manager-ask-thread-pool-14, block-manager-slave-async-thread-pool-97, block-manager-ask-thread-pool-52, block-manager-slave-async-thread-pool-2, block-manager-ask-thread-pool-77, block-manager-ask-thread-pool-74, block-manager-ask-thread-pool-28, block-manager-slave-async-thread-pool-62, block-manager-ask-thread-pool-68, block-manager-ask-thread-pool-41, block-manager-slave-async-thread-pool-81, block-manager-ask-thread-pool-30, dispatcher-BlockManagerEndpoint180, block-manager-slave-async-thread-pool-35, block-manager-ask-thread-pool-70, block-manager-ask-thread-pool-24, block-manager-slave-async-thread-pool-91, block-manager-slave-async-thread-pool-20, block-manager-slave-async-thread-pool-40, block-manager-ask-thread-pool-44, block-manager-slave-async-thread-pool-19, block-manager-ask-thread-pool-98, block-manager-ask-thread-pool-20, block-manager-ask-thread-pool-87, block-manager-slave-async-thread-pool-70, block-manager-slave-async-thread-pool-85, block-manager-slave-async-thread-pool-13, block-manager-ask-thread-pool-6, block-manager-ask-thread-pool-93, block-manager-ask-thread-pool-82, block-manager-slave-async-thread-pool-24, block-manager-slave-async-thread-pool-31, block-manager-slave-async-thread-pool-80 =====

MicroBatchExecutionSuite:
09:47:27.224 WARN org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider: The state for version 3 doesn't exist in loadedMaps. Reading snapshot file and delta files if needed...Note that this is normal for the first batch of starting query.
09:47:27.225 WARN org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider: The state for version 3 doesn't exist in loadedMaps. Reading snapshot file and delta files if needed...Note that this is normal for the first batch of starting query.
09:47:27.273 WARN org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider: The state for version 3 doesn't exist in loadedMaps. Reading snapshot file and delta files if needed...Note that this is normal for the first batch of starting query.
09:47:27.275 WARN org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider: The state for version 3 doesn't exist in loadedMaps. Reading snapshot file and delta files if needed...Note that this is normal for the first batch of starting query.
09:47:27.316 WARN org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider: The state for version 3 doesn't exist in loadedMaps. Reading snapshot file and delta files if needed...Note that this is normal for the first batch of starting query.
09:47:28.337 WARN org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider: The state for version 5 doesn't exist in loadedMaps. Reading snapshot file and delta files if needed...Note that this is normal for the first batch of starting query.
09:47:28.338 WARN org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider: The state for version 5 doesn't exist in loadedMaps. Reading snapshot file and delta files if needed...Note that this is normal for the first batch of starting query.
09:47:28.378 WARN org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider: The state for version 5 doesn't exist in loadedMaps. Reading snapshot file and delta files if needed...Note that this is normal for the first batch of starting query.
09:47:28.381 WARN org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider: The state for version 5 doesn't exist in loadedMaps. Reading snapshot file and delta files if needed...Note that this is normal for the first batch of starting query.
09:47:28.421 WARN org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider: The state for version 5 doesn't exist in loadedMaps. Reading snapshot file and delta files if needed...Note that this is normal for the first batch of starting query.
- SPARK-24156: do not plan a no-data batch again after it has already been planned
09:47:29.257 WARN org.apache.spark.sql.execution.streaming.MicroBatchExecutionSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.streaming.MicroBatchExecutionSuite, thread names: state-store-maintenance-task =====

FileBasedDataSourceSuite:
- Writing empty datasets should not fail - orc
- Writing empty datasets should not fail - parquet
- Writing empty datasets should not fail - csv
- Writing empty datasets should not fail - json
- Writing empty datasets should not fail - text
- SPARK-23072 Write and read back unicode column names - orc
- SPARK-23072 Write and read back unicode column names - parquet
- SPARK-23072 Write and read back unicode column names - csv
- SPARK-23072 Write and read back unicode column names - json
- SPARK-15474 Write and read back non-empty schema with empty dataframe - orc
- SPARK-15474 Write and read back non-empty schema with empty dataframe - parquet
- SPARK-23271 empty RDD when saved should write a metadata only file - orc
- SPARK-23271 empty RDD when saved should write a metadata only file - parquet
- SPARK-23372 error while writing empty schema files using orc
- SPARK-23372 error while writing empty schema files using parquet
- SPARK-23372 error while writing empty schema files using csv
- SPARK-23372 error while writing empty schema files using json
- SPARK-23372 error while writing empty schema files using text
- SPARK-22146 read files containing special characters using orc
- SPARK-22146 read files containing special characters using parquet
- SPARK-22146 read files containing special characters using csv
- SPARK-22146 read files containing special characters using json
- SPARK-22146 read files containing special characters using text
- SPARK-23148 read files containing special characters using json with multiline enabled
- SPARK-23148 read files containing special characters using csv with multiline enabled
- Enabling/disabling ignoreMissingFiles using orc
- Enabling/disabling ignoreMissingFiles using parquet
- Enabling/disabling ignoreMissingFiles using csv
- Enabling/disabling ignoreMissingFiles using json
- Enabling/disabling ignoreMissingFiles using text
- SPARK-24691 error handling for unsupported types - text
- SPARK-24204 error handling for unsupported Array/Map/Struct types - csv
- SPARK-24204 error handling for unsupported Interval data types - csv, json, parquet, orc
09:47:56.422 WARN org.apache.spark.sql.catalyst.analysis.SimpleFunctionRegistry: The function testtype replaced a previously registered function.
09:47:56.735 WARN org.apache.spark.sql.catalyst.analysis.SimpleFunctionRegistry: The function testtype replaced a previously registered function.
- SPARK-24204 error handling for unsupported Null data types - csv, parquet, orc
09:47:57.773 ERROR org.apache.spark.executor.Executor: Exception in task 1.0 in stage 208.0 (TID 287)
java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$7(ParquetReadSupport.scala:335)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$6(ParquetReadSupport.scala:330)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.Iterator.foreach(Iterator.scala:941)
	at scala.collection.Iterator.foreach$(Iterator.scala:941)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
	at scala.collection.IterableLike.foreach(IterableLike.scala:74)
	at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
	at org.apache.spark.sql.types.StructType.foreach(StructType.scala:99)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at org.apache.spark.sql.types.StructType.map(StructType.scala:99)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetGroupFields(ParquetReadSupport.scala:327)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetSchema(ParquetReadSupport.scala:147)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport.init(ParquetReadSupport.scala:82)
	at org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase.initialize(SpecificParquetRecordReaderBase.java:141)
	at org.apache.spark.sql.execution.datasources.parquet.VectorizedParquetRecordReader.initialize(VectorizedParquetRecordReader.java:131)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.$anonfun$buildReaderWithPartitionValues$2(ParquetFileFormat.scala:319)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:57.773 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 208.0 (TID 286)
java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$7(ParquetReadSupport.scala:335)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$6(ParquetReadSupport.scala:330)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.Iterator.foreach(Iterator.scala:941)
	at scala.collection.Iterator.foreach$(Iterator.scala:941)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
	at scala.collection.IterableLike.foreach(IterableLike.scala:74)
	at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
	at org.apache.spark.sql.types.StructType.foreach(StructType.scala:99)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at org.apache.spark.sql.types.StructType.map(StructType.scala:99)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetGroupFields(ParquetReadSupport.scala:327)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetSchema(ParquetReadSupport.scala:147)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport.init(ParquetReadSupport.scala:82)
	at org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase.initialize(SpecificParquetRecordReaderBase.java:141)
	at org.apache.spark.sql.execution.datasources.parquet.VectorizedParquetRecordReader.initialize(VectorizedParquetRecordReader.java:131)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.$anonfun$buildReaderWithPartitionValues$2(ParquetFileFormat.scala:319)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:57.777 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in stage 208.0 (TID 287, 192.168.10.32, executor driver): java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$7(ParquetReadSupport.scala:335)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$6(ParquetReadSupport.scala:330)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.Iterator.foreach(Iterator.scala:941)
	at scala.collection.Iterator.foreach$(Iterator.scala:941)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
	at scala.collection.IterableLike.foreach(IterableLike.scala:74)
	at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
	at org.apache.spark.sql.types.StructType.foreach(StructType.scala:99)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at org.apache.spark.sql.types.StructType.map(StructType.scala:99)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetGroupFields(ParquetReadSupport.scala:327)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetSchema(ParquetReadSupport.scala:147)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport.init(ParquetReadSupport.scala:82)
	at org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase.initialize(SpecificParquetRecordReaderBase.java:141)
	at org.apache.spark.sql.execution.datasources.parquet.VectorizedParquetRecordReader.initialize(VectorizedParquetRecordReader.java:131)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.$anonfun$buildReaderWithPartitionValues$2(ParquetFileFormat.scala:319)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)

09:47:57.777 ERROR org.apache.spark.scheduler.TaskSetManager: Task 1 in stage 208.0 failed 1 times; aborting job
09:47:57.833 ERROR org.apache.spark.executor.Executor: Exception in task 1.0 in stage 209.0 (TID 289)
java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$7(ParquetReadSupport.scala:335)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$6(ParquetReadSupport.scala:330)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.Iterator.foreach(Iterator.scala:941)
	at scala.collection.Iterator.foreach$(Iterator.scala:941)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
	at scala.collection.IterableLike.foreach(IterableLike.scala:74)
	at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
	at org.apache.spark.sql.types.StructType.foreach(StructType.scala:99)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at org.apache.spark.sql.types.StructType.map(StructType.scala:99)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetGroupFields(ParquetReadSupport.scala:327)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetSchema(ParquetReadSupport.scala:147)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport.init(ParquetReadSupport.scala:82)
	at org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase.initialize(SpecificParquetRecordReaderBase.java:141)
	at org.apache.spark.sql.execution.datasources.parquet.VectorizedParquetRecordReader.initialize(VectorizedParquetRecordReader.java:131)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.$anonfun$buildReaderWithPartitionValues$2(ParquetFileFormat.scala:319)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:57.834 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 209.0 (TID 288)
java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$7(ParquetReadSupport.scala:335)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$6(ParquetReadSupport.scala:330)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.Iterator.foreach(Iterator.scala:941)
	at scala.collection.Iterator.foreach$(Iterator.scala:941)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
	at scala.collection.IterableLike.foreach(IterableLike.scala:74)
	at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
	at org.apache.spark.sql.types.StructType.foreach(StructType.scala:99)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at org.apache.spark.sql.types.StructType.map(StructType.scala:99)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetGroupFields(ParquetReadSupport.scala:327)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetSchema(ParquetReadSupport.scala:147)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport.init(ParquetReadSupport.scala:82)
	at org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase.initialize(SpecificParquetRecordReaderBase.java:141)
	at org.apache.spark.sql.execution.datasources.parquet.VectorizedParquetRecordReader.initialize(VectorizedParquetRecordReader.java:131)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.$anonfun$buildReaderWithPartitionValues$2(ParquetFileFormat.scala:319)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:57.836 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in stage 209.0 (TID 289, 192.168.10.32, executor driver): java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$7(ParquetReadSupport.scala:335)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.$anonfun$clipParquetGroupFields$6(ParquetReadSupport.scala:330)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.Iterator.foreach(Iterator.scala:941)
	at scala.collection.Iterator.foreach$(Iterator.scala:941)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1429)
	at scala.collection.IterableLike.foreach(IterableLike.scala:74)
	at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
	at org.apache.spark.sql.types.StructType.foreach(StructType.scala:99)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at org.apache.spark.sql.types.StructType.map(StructType.scala:99)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetGroupFields(ParquetReadSupport.scala:327)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport$.clipParquetSchema(ParquetReadSupport.scala:147)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetReadSupport.init(ParquetReadSupport.scala:82)
	at org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase.initialize(SpecificParquetRecordReaderBase.java:141)
	at org.apache.spark.sql.execution.datasources.parquet.VectorizedParquetRecordReader.initialize(VectorizedParquetRecordReader.java:131)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat.$anonfun$buildReaderWithPartitionValues$2(ParquetFileFormat.scala:319)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)

09:47:57.836 ERROR org.apache.spark.scheduler.TaskSetManager: Task 1 in stage 209.0 failed 1 times; aborting job
- Spark native readers should respect spark.sql.caseSensitive - parquet
09:47:58.847 ERROR org.apache.spark.executor.Executor: Exception in task 1.0 in stage 222.0 (TID 315)
java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8(OrcUtils.scala:168)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8$adapted(OrcUtils.scala:162)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7(OrcUtils.scala:162)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7$adapted(OrcUtils.scala:159)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:198)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.requestedColumnIds(OrcUtils.scala:159)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$4(OrcFileFormat.scala:185)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2539)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$2(OrcFileFormat.scala:183)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:58.847 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 222.0 (TID 314)
java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8(OrcUtils.scala:168)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8$adapted(OrcUtils.scala:162)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7(OrcUtils.scala:162)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7$adapted(OrcUtils.scala:159)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:198)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.requestedColumnIds(OrcUtils.scala:159)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$4(OrcFileFormat.scala:185)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2539)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$2(OrcFileFormat.scala:183)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:58.850 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in stage 222.0 (TID 315, 192.168.10.32, executor driver): java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8(OrcUtils.scala:168)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8$adapted(OrcUtils.scala:162)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7(OrcUtils.scala:162)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7$adapted(OrcUtils.scala:159)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:198)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.requestedColumnIds(OrcUtils.scala:159)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$4(OrcFileFormat.scala:185)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2539)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$2(OrcFileFormat.scala:183)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)

09:47:58.850 ERROR org.apache.spark.scheduler.TaskSetManager: Task 1 in stage 222.0 failed 1 times; aborting job
09:47:58.920 ERROR org.apache.spark.executor.Executor: Exception in task 1.0 in stage 223.0 (TID 317)
java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8(OrcUtils.scala:168)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8$adapted(OrcUtils.scala:162)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7(OrcUtils.scala:162)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7$adapted(OrcUtils.scala:159)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:198)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.requestedColumnIds(OrcUtils.scala:159)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$4(OrcFileFormat.scala:185)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2539)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$2(OrcFileFormat.scala:183)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:58.920 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 223.0 (TID 316)
java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8(OrcUtils.scala:168)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8$adapted(OrcUtils.scala:162)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7(OrcUtils.scala:162)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7$adapted(OrcUtils.scala:159)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:198)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.requestedColumnIds(OrcUtils.scala:159)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$4(OrcFileFormat.scala:185)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2539)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$2(OrcFileFormat.scala:183)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)
09:47:58.922 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 1.0 in stage 223.0 (TID 317, 192.168.10.32, executor driver): java.lang.RuntimeException: Found duplicate field(s) "b": [b, B] in case-insensitive mode
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8(OrcUtils.scala:168)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$8$adapted(OrcUtils.scala:162)
	at scala.Option.map(Option.scala:230)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7(OrcUtils.scala:162)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.$anonfun$requestedColumnIds$7$adapted(OrcUtils.scala:159)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:198)
	at org.apache.spark.sql.execution.datasources.orc.OrcUtils$.requestedColumnIds(OrcUtils.scala:159)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$4(OrcFileFormat.scala:185)
	at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2539)
	at org.apache.spark.sql.execution.datasources.orc.OrcFileFormat.$anonfun$buildReaderWithPartitionValues$2(OrcFileFormat.scala:183)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:116)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:486)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:726)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:339)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:872)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:872)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:441)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:444)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
	at java.base/java.lang.Thread.run(Thread.java:834)

09:47:58.922 ERROR org.apache.spark.scheduler.TaskSetManager: Task 1 in stage 223.0 failed 1 times; aborting job
- Spark native readers should respect spark.sql.caseSensitive - orc
- SPARK-25237 compute correct input metrics in FileScanRDD
- Do not use cache on overwrite
- Do not use cache on append
- UDF input_file_name()
- Option pathGlobFilter: filter files correctly
- Option pathGlobFilter: simple extension filtering should contains partition info
- Option recursiveFileLookup: recursive loading correctly
- Option recursiveFileLookup: disable partition inferring
- Return correct results when data columns overlap with partition columns
- Return correct results when data columns overlap with partition columns (nested data)
- sizeInBytes should be the total size of all files
- SPARK-22790,SPARK-27668: spark.sql.sources.compressionFactor takes effect
- File source v2: support partition pruning
- File table location should include both values of option `path` and `paths`
ExtraStrategiesSuite:
- insert an extraStrategy
OrcV1SchemaPruningSuite:
- Spark vectorized reader - without partition data column - select only top-level fields
- Spark vectorized reader - with partition data column - select only top-level fields
- Non-vectorized reader - without partition data column - select only top-level fields
- Non-vectorized reader - with partition data column - select only top-level fields
- Spark vectorized reader - without partition data column - select a single complex field with disabled nested schema pruning
- Spark vectorized reader - with partition data column - select a single complex field with disabled nested schema pruning
- Non-vectorized reader - without partition data column - select a single complex field with disabled nested schema pruning
- Non-vectorized reader - with partition data column - select a single complex field with disabled nested schema pruning
- Spark vectorized reader - without partition data column - select only input_file_name()
- Spark vectorized reader - with partition data column - select only input_file_name()
- Non-vectorized reader - without partition data column - select only input_file_name()
- Non-vectorized reader - with partition data column - select only input_file_name()
- Spark vectorized reader - without partition data column - select only expressions without references
- Spark vectorized reader - with partition data column - select only expressions without references
- Non-vectorized reader - without partition data column - select only expressions without references
- Non-vectorized reader - with partition data column - select only expressions without references
- Spark vectorized reader - without partition data column - select a single complex field
- Spark vectorized reader - with partition data column - select a single complex field
- Non-vectorized reader - without partition data column - select a single complex field
- Non-vectorized reader - with partition data column - select a single complex field
- Spark vectorized reader - without partition data column - select a single complex field and its parent struct
- Spark vectorized reader - with partition data column - select a single complex field and its parent struct
- Non-vectorized reader - without partition data column - select a single complex field and its parent struct
- Non-vectorized reader - with partition data column - select a single complex field and its parent struct
- Spark vectorized reader - without partition data column - select a single complex field array and its parent struct array
- Spark vectorized reader - with partition data column - select a single complex field array and its parent struct array
- Non-vectorized reader - without partition data column - select a single complex field array and its parent struct array
- Non-vectorized reader - with partition data column - select a single complex field array and its parent struct array
- Spark vectorized reader - without partition data column - select a single complex field from a map entry and its parent map entry
- Spark vectorized reader - with partition data column - select a single complex field from a map entry and its parent map entry
- Non-vectorized reader - without partition data column - select a single complex field from a map entry and its parent map entry
- Non-vectorized reader - with partition data column - select a single complex field from a map entry and its parent map entry
- Spark vectorized reader - without partition data column - select a single complex field and the partition column
- Spark vectorized reader - with partition data column - select a single complex field and the partition column
- Non-vectorized reader - without partition data column - select a single complex field and the partition column
- Non-vectorized reader - with partition data column - select a single complex field and the partition column
- Spark vectorized reader - without partition data column - partial schema intersection - select missing subfield
- Spark vectorized reader - with partition data column - partial schema intersection - select missing subfield
- Non-vectorized reader - without partition data column - partial schema intersection - select missing subfield
- Non-vectorized reader - with partition data column - partial schema intersection - select missing subfield
- Spark vectorized reader - without partition data column - no unnecessary schema pruning
- Spark vectorized reader - with partition data column - no unnecessary schema pruning
- Non-vectorized reader - without partition data column - no unnecessary schema pruning
- Non-vectorized reader - with partition data column - no unnecessary schema pruning
- Spark vectorized reader - without partition data column - empty schema intersection
- Spark vectorized reader - with partition data column - empty schema intersection
- Non-vectorized reader - without partition data column - empty schema intersection
- Non-vectorized reader - with partition data column - empty schema intersection
- Spark vectorized reader - without partition data column - select a single complex field and in where clause
- Spark vectorized reader - with partition data column - select a single complex field and in where clause
- Non-vectorized reader - without partition data column - select a single complex field and in where clause
- Non-vectorized reader - with partition data column - select a single complex field and in where clause
- Spark vectorized reader - without partition data column - select nullable complex field and having is not null predicate
- Spark vectorized reader - with partition data column - select nullable complex field and having is not null predicate
- Non-vectorized reader - without partition data column - select nullable complex field and having is not null predicate
- Non-vectorized reader - with partition data column - select nullable complex field and having is not null predicate
- Spark vectorized reader - without partition data column - select a single complex field and is null expression in project
- Spark vectorized reader - with partition data column - select a single complex field and is null expression in project
- Non-vectorized reader - without partition data column - select a single complex field and is null expression in project
- Non-vectorized reader - with partition data column - select a single complex field and is null expression in project
- Spark vectorized reader - without partition data column - select a single complex field array and in clause
- Spark vectorized reader - with partition data column - select a single complex field array and in clause
- Non-vectorized reader - without partition data column - select a single complex field array and in clause
- Non-vectorized reader - with partition data column - select a single complex field array and in clause
- Spark vectorized reader - without partition data column - select a single complex field from a map entry and in clause
- Spark vectorized reader - with partition data column - select a single complex field from a map entry and in clause
- Non-vectorized reader - without partition data column - select a single complex field from a map entry and in clause
- Non-vectorized reader - with partition data column - select a single complex field from a map entry and in clause
- Spark vectorized reader - without partition data column - select one complex field and having is null predicate on another complex field
- Spark vectorized reader - with partition data column - select one complex field and having is null predicate on another complex field
- Non-vectorized reader - without partition data column - select one complex field and having is null predicate on another complex field
- Non-vectorized reader - with partition data column - select one complex field and having is null predicate on another complex field
- Spark vectorized reader - without partition data column - select one deep nested complex field and having is null predicate on another deep nested complex field
- Spark vectorized reader - with partition data column - select one deep nested complex field and having is null predicate on another deep nested complex field
- Non-vectorized reader - without partition data column - select one deep nested complex field and having is null predicate on another deep nested complex field
- Non-vectorized reader - with partition data column - select one deep nested complex field and having is null predicate on another deep nested complex field
- Spark vectorized reader - without partition data column - select nested field from a complex map key using map_keys
- Spark vectorized reader - with partition data column - select nested field from a complex map key using map_keys
- Non-vectorized reader - without partition data column - select nested field from a complex map key using map_keys
- Non-vectorized reader - with partition data column - select nested field from a complex map key using map_keys
- Spark vectorized reader - without partition data column - select nested field from a complex map value using map_values
- Spark vectorized reader - with partition data column - select nested field from a complex map value using map_values
- Non-vectorized reader - without partition data column - select nested field from a complex map value using map_values
- Non-vectorized reader - with partition data column - select nested field from a complex map value using map_values
- Case-sensitive parser - mixed-case schema - select with exact column names
- Case-insensitive parser - mixed-case schema - select with exact column names
- Case-insensitive parser - mixed-case schema - select with lowercase column names
- Case-insensitive parser - mixed-case schema - select with different-case column names
- Case-insensitive parser - mixed-case schema - filter with different-case column names
- Case-insensitive parser - mixed-case schema - subquery filter with different-case column names
DataSourceV2SQLSessionCatalogSuite:
- InsertInto: when the table doesn't exist
- InsertInto: append to partitioned table - static clause
- InsertInto: static PARTITION clause fails with non-partition column
- InsertInto: dynamic PARTITION clause fails with non-partition column
- InsertInto: overwrite - dynamic clause - static mode
- InsertInto: overwrite - dynamic clause - dynamic mode
- InsertInto: overwrite - missing clause - static mode
- InsertInto: overwrite - missing clause - dynamic mode
- InsertInto: overwrite - static clause
- InsertInto: overwrite - mixed clause - static mode
- InsertInto: overwrite - mixed clause reordered - static mode
- InsertInto: overwrite - implicit dynamic partition - static mode
- InsertInto: overwrite - mixed clause - dynamic mode
- InsertInto: overwrite - mixed clause reordered - dynamic mode
- InsertInto: overwrite - implicit dynamic partition - dynamic mode
- InsertInto: overwrite - multiple static partitions - dynamic mode
- insertInto: append
- insertInto: append by position
- insertInto: append partitioned table
- insertInto: overwrite non-partitioned table
- insertInto: overwrite partitioned table in static mode
- insertInto: overwrite partitioned table in static mode by position
- insertInto: fails when missing a column
- insertInto: fails when an extra column is present
- insertInto: overwrite partitioned table in dynamic mode
- insertInto: overwrite partitioned table in dynamic mode by position
- AlterTable: table does not exist
- AlterTable: change rejected by implementation
- AlterTable: add top-level column
- AlterTable: add column with NOT NULL
- AlterTable: add column with comment
- AlterTable: add column with position
- AlterTable: add multiple columns
- AlterTable: add nested column
- AlterTable: add nested column to map key
- AlterTable: add nested column to map value
- AlterTable: add nested column to array element
- AlterTable: add complex column
- AlterTable: add nested column with comment
- AlterTable: add nested column parent must exist
- AlterTable: update column type int -> long
- AlterTable: SET/DROP NOT NULL
- AlterTable: update nested type float -> double
- AlterTable: update column with struct type fails
- AlterTable: update column with array type fails
- AlterTable: update column array element type
- AlterTable: update column with map type fails
- AlterTable: update column map value type
- AlterTable: update nested type in map key
- AlterTable: update nested type in map value
- AlterTable: update nested type in array
- AlterTable: update column must exist
- AlterTable: nested update column must exist
- AlterTable: update column type must be compatible
- AlterTable: update column comment
- AlterTable: update column position
- AlterTable: update column type and comment
- AlterTable: update nested column comment
- AlterTable: update nested column comment in map key
- AlterTable: update nested column comment in map value
- AlterTable: update nested column comment in array
- AlterTable: comment update column must exist
- AlterTable: nested comment update column must exist
- AlterTable: rename column
- AlterTable: rename nested column
- AlterTable: rename nested column in map key
- AlterTable: rename nested column in map value
- AlterTable: rename nested column in array element
- AlterTable: rename column must exist
- AlterTable: nested rename column must exist
- AlterTable: drop column
- AlterTable: drop nested column
- AlterTable: drop nested column in map key
- AlterTable: drop nested column in map value
- AlterTable: drop nested column in array element
- AlterTable: drop column must exist
- AlterTable: nested drop column must exist
- AlterTable: set location
- AlterTable: set partition location
- AlterTable: set table property
- AlterTable: remove table property
- saveAsTable: v2 table - table doesn't exist and default mode (ErrorIfExists)
- saveAsTable: v2 table - table doesn't exist and append mode
- saveAsTable: Append mode should not fail if the table not exists but a same-name temp view exist
- saveAsTable: v2 table - table exists
- saveAsTable: v2 table - table overwrite and table doesn't exist
- saveAsTable: v2 table - table overwrite and table exists
- saveAsTable: Overwrite mode should not drop the temp view if the table not exists but a same-name temp view exist
- saveAsTable: v2 table - ignore mode and table doesn't exist
- saveAsTable: v2 table - ignore mode and table exists
OffsetSeqLogSuite:
- OffsetSeqMetadata - deserialization
- OffsetSeqLog - serialization - deserialization
- deserialization log written by future version
- read Spark 2.1.0 log format
QueryExecutionSuite:
- dumping query execution info to a file
- dumping query execution info to an existing file
- dumping query execution info to non-existing folder
- dumping query execution info by invalid path
- limit number of fields by sql config
- check maximum fields restriction
- toString() exception/error handling
- SPARK-28346: clone the query plan between different stages
ApproxCountDistinctForIntervalsQuerySuite:
09:49:55.998 WARN org.apache.spark.scheduler.TaskSetManager: Stage 0 contains a task of very large size (1618 KiB). The maximum recommended task size is 1000 KiB.
- test ApproxCountDistinctForIntervals with large number of endpoints
DeprecatedDatasetAggregatorSuite:
- typed aggregation: TypedAggregator
- typed aggregation: TypedAggregator, expr, expr
- typed aggregation: in project list
- typed aggregate: avg, count, sum
- spark-15114 shorter system generated alias names
SparkSqlParserSuite:
- refresh resource
- create table - schema
- describe query
- query organization
- pipeline concatenation
- database and schema tokens are interchangeable
- manage resources
BatchEvalPythonExecSuite:
- Python UDF: push down deterministic FilterExec predicates
- Nested Python UDF: push down deterministic FilterExec predicates
- Python UDF: no push down on non-deterministic
- Python UDF: push down on deterministic predicates after the first non-deterministic
09:49:58.142 WARN org.apache.spark.sql.catalyst.optimizer.ExtractPythonUDFFromJoinCondition: The join condition:(dummyUDF(a#469657, c#469668) = dummyUDF(d#469669, c#469668)) of the join plan contains PythonUDF only, it will be moved out and the join plan will be turned to cross join.
- Python UDF refers to the attributes from more than one child
- SPARK-28422: GROUPED_AGG pandas_udf should work without group by clause
UDTRegistrationSuite:
- register non-UserDefinedType
- default UDTs
- query registered user class
- query unregistered user class
ParquetV1PartitionDiscoverySuite:
- column type inference
- parse invalid partitioned directories
- parse partition
- parse partition with base paths
- parse partitions
- parse partitions with type inference disabled
- read partitioned table - normal case
- read partitioned table using different path options
- read partitioned table - with nulls
- read partitioned table - merging compatible schemas
- SPARK-7847: Dynamic partition directory path escaping and unescaping
- Various partition value types
- Various inferred partition value types
- SPARK-8037: Ignores files whose name starts with dot
- SPARK-11678: Partition discovery stops at the root path of the dataset
- use basePath to specify the root dir of a partitioned table.
- use basePath and file globbing to selectively load partitioned table
- _SUCCESS should not break partitioning discovery
- listConflictingPartitionColumns
- Parallel partition discovery
- SPARK-15895 summary files in non-leaf partition directories
- SPARK-22109: Resolve type conflicts between strings and timestamps in partition column
- Resolve type conflicts - decimals, dates and timestamps in partition column
- SPARK-23436: invalid Dates should be inferred as String in partition inference
09:50:15.025 WARN org.apache.spark.sql.execution.datasources.DataSource: Found duplicate column(s) in the data schema and the partition schema: `ps`, `pi`;
- read partitioned table - partition key included in Parquet file
09:50:16.472 WARN org.apache.spark.sql.execution.datasources.DataSource: Found duplicate column(s) in the data schema and the partition schema: `ps`, `pi`;
- read partitioned table - with nulls and partition keys are included in Parquet file
- SPARK-7749 Non-partitioned table should have empty partition spec
09:50:17.283 WARN org.apache.spark.sql.execution.datasources.DataSource: Found duplicate column(s) in the data schema and the partition schema: `a`;
- SPARK-18108 Parquet reader fails when data column types conflict with partition ones
- SPARK-21463: MetadataLogFileIndex should respect userSpecifiedSchema for partition cols
LogicalPlanTagInSparkPlanSuite:
- q1
- q2
- q3
- q4
- q5
- q6
- q7
- q8
- q9
- q10
- q11
- q12
- q13
- q14a
- q14b
- q15
- q16
- q17
- q18
- q19
- q20
- q21
- q22
- q23a
- q23b
- q24a
- q24b
- q25
- q26
- q27
- q28
- q29
- q30
- q31
- q32
- q33
- q34
- q35
- q36
- q37
- q38
- q39a
- q39b
- q40
- q41
- q42
- q43
09:51:03.503 WARN org.apache.spark.sql.execution.window.WindowExec: No Partition Defined for Window operation! Moving all data to a single partition, this can cause serious performance degradation.
09:51:03.504 WARN org.apache.spark.sql.execution.window.WindowExec: No Partition Defined for Window operation! Moving all data to a single partition, this can cause serious performance degradation.
- q44
- q45
- q46
- q47
- q48
09:51:07.012 WARN org.apache.spark.sql.execution.window.WindowExec: No Partition Defined for Window operation! Moving all data to a single partition, this can cause serious performance degradation.
09:51:07.013 WARN org.apache.spark.sql.execution.window.WindowExec: No Partition Defined for Window operation! Moving all data to a single partition, this can cause serious performance degradation.
09:51:07.013 WARN org.apache.spark.sql.execution.window.WindowExec: No Partition Defined for Window operation! Moving all data to a single partition, this can cause serious performance degradation.
09:51:07.013 WARN org.apache.spark.sql.execution.window.WindowExec: No Partition Defined for Window operation! Moving all data to a single partition, this can cause serious performance degradation.
09:51:07.014 WARN org.apache.spark.sql.execution.window.WindowExec: No Partition Defined for Window operation! Moving all data to a single partition, this can cause serious performance degradation.
09:51:07.014 WARN org.apache.spark.sql.execution.window.WindowExec: No Partition Defined for Window operation! Moving all data to a single partition, this can cause serious performance degradation.
- q49
- q50
- q51
- q52
- q53
- q54
- q55
- q56
- q57
- q58
- q59
- q60
- q61
- q62
- q63
- q64
- q65
- q66
- q67
- q68
- q69
- q70
- q71
- q72
- q73
- q74
- q75
- q76
- q77
- q78
- q79
- q80
- q81
- q82
- q83
- q84
- q85
- q86
- q87
- q88
- q89
- q90
- q91
- q92
- q93
- q94
- q95
- q96
- q97
- q98
- q99
- q5a-v2.7
- q6-v2.7
- q10a-v2.7
- q11-v2.7
- q12-v2.7
- q14-v2.7
Build was aborted
Aborted by sknapp
Archiving artifacts
ERROR: Failed to archive artifacts: **/target/unit-tests.log
java.io.IOException: java.io.IOException: Failed to extract /home/jenkins/workspace/spark-master-test-maven-hadoop-3.2-jdk-11/transfer of 341 files
	at hudson.FilePath.readFromTar(FilePath.java:2300)
	at hudson.FilePath.copyRecursiveTo(FilePath.java:2209)
	at jenkins.model.StandardArtifactManager.archive(StandardArtifactManager.java:61)
	at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:236)
	at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:78)
	at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
	at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:782)
	at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:723)
	at hudson.model.Build$BuildExecution.post2(Build.java:185)
	at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:668)
	at hudson.model.Run.execute(Run.java:1763)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
	at hudson.model.ResourceController.execute(ResourceController.java:98)
	at hudson.model.Executor.run(Executor.java:410)
Caused by: java.io.IOException: java.lang.InterruptedException
	at hudson.remoting.FastPipedInputStream.read(FastPipedInputStream.java:177)
	at hudson.util.HeadBufferingStream.read(HeadBufferingStream.java:61)
	at com.jcraft.jzlib.InflaterInputStream.fill(InflaterInputStream.java:175)
	at com.jcraft.jzlib.InflaterInputStream.read(InflaterInputStream.java:106)
	at org.apache.commons.compress.archivers.tar.TarArchiveInputStream.read(TarArchiveInputStream.java:614)
	at java.io.InputStream.read(InputStream.java:101)
	at org.apache.commons.io.IOUtils.copyLarge(IOUtils.java:1792)
	at org.apache.commons.io.IOUtils.copyLarge(IOUtils.java:1769)
	at org.apache.commons.io.IOUtils.copy(IOUtils.java:1744)
	at hudson.util.IOUtils.copy(IOUtils.java:40)
	at hudson.FilePath.readFromTar(FilePath.java:2290)
	... 13 more
Caused by: java.lang.InterruptedException
	at java.lang.Object.wait(Native Method)
	at hudson.remoting.FastPipedInputStream.read(FastPipedInputStream.java:175)
	... 23 more

	at hudson.FilePath.copyRecursiveTo(FilePath.java:2216)
	at jenkins.model.StandardArtifactManager.archive(StandardArtifactManager.java:61)
	at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:236)
	at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:78)
	at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
	at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:782)
	at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:723)
	at hudson.model.Build$BuildExecution.post2(Build.java:185)
	at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:668)
	at hudson.model.Run.execute(Run.java:1763)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
	at hudson.model.ResourceController.execute(ResourceController.java:98)
	at hudson.model.Executor.run(Executor.java:410)
Caused by: java.util.concurrent.ExecutionException: java.io.IOException: This archives contains unclosed entries.
	at hudson.remoting.Channel$2.adapt(Channel.java:813)
	at hudson.remoting.Channel$2.adapt(Channel.java:808)
	at hudson.remoting.FutureAdapter.get(FutureAdapter.java:59)
	at hudson.FilePath.copyRecursiveTo(FilePath.java:2212)
	... 12 more
Caused by: java.io.IOException: This archives contains unclosed entries.
	at org.apache.commons.compress.archivers.tar.TarArchiveOutputStream.finish(TarArchiveOutputStream.java:225)
	at org.apache.commons.compress.archivers.tar.TarArchiveOutputStream.close(TarArchiveOutputStream.java:241)
	at hudson.util.io.TarArchiver.close(TarArchiver.java:111)
	at hudson.FilePath.writeToTar(FilePath.java:2263)
	at hudson.FilePath.access$2100(FilePath.java:190)
	at hudson.FilePath$45.invoke(FilePath.java:2202)
	at hudson.FilePath$45.invoke(FilePath.java:2198)
	at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2719)
	at hudson.remoting.UserRequest.perform(UserRequest.java:120)
	at hudson.remoting.UserRequest.perform(UserRequest.java:48)
	at hudson.remoting.Request$2.run(Request.java:326)
	at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:68)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
	at ......remote call to amp-jenkins-staging-worker-02(Native Method)
	at hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1416)
	at hudson.remoting.UserResponse.retrieve(UserRequest.java:220)
	at hudson.remoting.Channel$2.adapt(Channel.java:811)
	... 15 more
Recording test results
Finished: ABORTED