FailedConsole Output

Skipping 5,730 KB.. Full Log
uteTask(FileFormatWriter.scala:286)
	... 9 more
18:37:43.438 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 156.0 (TID 271) (amp-jenkins-worker-05.amp executor driver): org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: Casting 9.223373E18 to long causes overflow
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more

18:37:43.439 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 156.0 failed 1 times; aborting job
18:37:43.440 ERROR org.apache.spark.sql.execution.datasources.FileFormatWriter: Aborting job 17b6e83e-e360-42c6-b501-fa5a253ec6e1.
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 156.0 failed 1 times, most recent failure: Lost task 0.0 in stage 156.0 (TID 271) (amp-jenkins-worker-05.amp executor driver): org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: Casting 9.223373E18 to long causes overflow
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2211)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2160)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2159)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2159)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1076)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1076)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1076)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2398)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2340)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2329)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:866)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2128)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:200)
	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:178)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:108)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:106)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:120)
	at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:229)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3681)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3679)
	at org.apache.spark.sql.Dataset.<init>(Dataset.scala:229)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:97)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:612)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:607)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$sql$1(InsertSuite.scala:60)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$139(InsertSuite.scala:732)
	at org.scalatest.Assertions.intercept(Assertions.scala:749)
	at org.scalatest.Assertions.intercept$(Assertions.scala:746)
	at org.scalatest.funsuite.AnyFunSuite.intercept(AnyFunSuite.scala:1562)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$138(InsertSuite.scala:731)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTable(SQLTestUtils.scala:305)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTable$(SQLTestUtils.scala:303)
	at org.apache.spark.sql.sources.InsertSuite.withTable(InsertSuite.scala:57)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$137(InsertSuite.scala:728)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf(SQLHelper.scala:54)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf$(SQLHelper.scala:38)
	at org.apache.spark.sql.sources.InsertSuite.org$apache$spark$sql$test$SQLTestUtilsBase$$super$withSQLConf(InsertSuite.scala:57)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf(SQLTestUtils.scala:246)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf$(SQLTestUtils.scala:244)
	at org.apache.spark.sql.sources.InsertSuite.withSQLConf(InsertSuite.scala:57)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$136(InsertSuite.scala:728)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:189)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:176)
	at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:187)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:199)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:199)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:181)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:61)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:61)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:232)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:232)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:231)
	at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1562)
	at org.scalatest.Suite.run(Suite.scala:1112)
	at org.scalatest.Suite.run$(Suite.scala:1094)
	at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1562)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:236)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
	at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:236)
	at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:235)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:61)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:61)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:318)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:513)
	at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:413)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	... 3 more
Caused by: java.lang.ArithmeticException: Casting 9.223373E18 to long causes overflow
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more
18:37:43.575 ERROR org.apache.spark.util.Utils: Aborting task
java.lang.ArithmeticException: Casting -9.223373E18 to long causes overflow
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
18:37:43.576 ERROR org.apache.spark.sql.execution.datasources.FileFormatWriter: Job job_20201028183743_0157 aborted.
18:37:43.576 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 157.0 (TID 272)
org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: Casting -9.223373E18 to long causes overflow
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more
18:37:43.579 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 157.0 (TID 272) (amp-jenkins-worker-05.amp executor driver): org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: Casting -9.223373E18 to long causes overflow
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more

18:37:43.579 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 157.0 failed 1 times; aborting job
18:37:43.580 ERROR org.apache.spark.sql.execution.datasources.FileFormatWriter: Aborting job c91eb9fa-b31a-49ea-92aa-8feefc2b94ed.
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 157.0 failed 1 times, most recent failure: Lost task 0.0 in stage 157.0 (TID 272) (amp-jenkins-worker-05.amp executor driver): org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: Casting -9.223373E18 to long causes overflow
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2211)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2160)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2159)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2159)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1076)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1076)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1076)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2398)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2340)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2329)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:866)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2128)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:200)
	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:178)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:108)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:106)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:120)
	at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:229)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3681)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3679)
	at org.apache.spark.sql.Dataset.<init>(Dataset.scala:229)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:97)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:612)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:607)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$sql$1(InsertSuite.scala:60)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$140(InsertSuite.scala:738)
	at org.scalatest.Assertions.intercept(Assertions.scala:749)
	at org.scalatest.Assertions.intercept$(Assertions.scala:746)
	at org.scalatest.funsuite.AnyFunSuite.intercept(AnyFunSuite.scala:1562)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$138(InsertSuite.scala:737)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTable(SQLTestUtils.scala:305)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTable$(SQLTestUtils.scala:303)
	at org.apache.spark.sql.sources.InsertSuite.withTable(InsertSuite.scala:57)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$137(InsertSuite.scala:728)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf(SQLHelper.scala:54)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf$(SQLHelper.scala:38)
	at org.apache.spark.sql.sources.InsertSuite.org$apache$spark$sql$test$SQLTestUtilsBase$$super$withSQLConf(InsertSuite.scala:57)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf(SQLTestUtils.scala:246)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf$(SQLTestUtils.scala:244)
	at org.apache.spark.sql.sources.InsertSuite.withSQLConf(InsertSuite.scala:57)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$136(InsertSuite.scala:728)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:189)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:176)
	at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:187)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:199)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:199)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:181)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:61)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:61)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:232)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:232)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:231)
	at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1562)
	at org.scalatest.Suite.run(Suite.scala:1112)
	at org.scalatest.Suite.run$(Suite.scala:1094)
	at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1562)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:236)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
	at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:236)
	at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:235)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:61)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:61)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:318)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:513)
	at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:413)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	... 3 more
Caused by: java.lang.ArithmeticException: Casting -9.223373E18 to long causes overflow
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more
[info] - Throw exceptions on inserting out-of-range long value with ANSI casting policy (338 milliseconds)
18:37:43.763 ERROR org.apache.spark.util.Utils: Aborting task
java.lang.ArithmeticException: Decimal(compact,12345,5,2}) cannot be represented as Decimal(3, 2).
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
18:37:43.764 ERROR org.apache.spark.sql.execution.datasources.FileFormatWriter: Job job_20201028183743_0158 aborted.
18:37:43.765 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 158.0 (TID 273)
org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: Decimal(compact,12345,5,2}) cannot be represented as Decimal(3, 2).
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more
18:37:43.767 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 158.0 (TID 273) (amp-jenkins-worker-05.amp executor driver): org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: Decimal(compact,12345,5,2}) cannot be represented as Decimal(3, 2).
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more

18:37:43.767 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 158.0 failed 1 times; aborting job
18:37:43.768 ERROR org.apache.spark.sql.execution.datasources.FileFormatWriter: Aborting job 0402ad02-6364-4396-9b6f-7665b97b63ee.
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 158.0 failed 1 times, most recent failure: Lost task 0.0 in stage 158.0 (TID 273) (amp-jenkins-worker-05.amp executor driver): org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: Decimal(compact,12345,5,2}) cannot be represented as Decimal(3, 2).
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2211)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2160)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2159)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2159)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1076)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1076)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1076)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2398)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2340)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2329)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:866)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2128)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:200)
	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:178)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:108)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:106)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:120)
	at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:229)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3681)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3679)
	at org.apache.spark.sql.Dataset.<init>(Dataset.scala:229)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:97)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:612)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:607)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$sql$1(InsertSuite.scala:60)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$144(InsertSuite.scala:752)
	at org.scalatest.Assertions.intercept(Assertions.scala:749)
	at org.scalatest.Assertions.intercept$(Assertions.scala:746)
	at org.scalatest.funsuite.AnyFunSuite.intercept(AnyFunSuite.scala:1562)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$143(InsertSuite.scala:751)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTable(SQLTestUtils.scala:305)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTable$(SQLTestUtils.scala:303)
	at org.apache.spark.sql.sources.InsertSuite.withTable(InsertSuite.scala:57)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$142(InsertSuite.scala:748)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf(SQLHelper.scala:54)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf$(SQLHelper.scala:38)
	at org.apache.spark.sql.sources.InsertSuite.org$apache$spark$sql$test$SQLTestUtilsBase$$super$withSQLConf(InsertSuite.scala:57)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf(SQLTestUtils.scala:246)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf$(SQLTestUtils.scala:244)
	at org.apache.spark.sql.sources.InsertSuite.withSQLConf(InsertSuite.scala:57)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$141(InsertSuite.scala:748)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:189)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:176)
	at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:187)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:199)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:199)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:181)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:61)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:61)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:232)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:232)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:231)
	at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1562)
	at org.scalatest.Suite.run(Suite.scala:1112)
	at org.scalatest.Suite.run$(Suite.scala:1094)
	at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1562)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:236)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
	at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:236)
	at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:235)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:61)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:61)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:318)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:513)
	at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:413)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	... 3 more
Caused by: java.lang.ArithmeticException: Decimal(compact,12345,5,2}) cannot be represented as Decimal(3, 2).
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:756)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:277)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more
[info] - Throw exceptions on inserting out-of-range decimal value with ANSI casting policy (187 milliseconds)
[info] - SPARK-30844: static partition should also follow StoreAssignmentPolicy (588 milliseconds)
[info] - SPARK-24860: dynamic partition overwrite specified per source without catalog table (1 second, 69 milliseconds)
[info] - SPARK-24583 Wrong schema type in InsertIntoDataSourceCommand (93 milliseconds)
18:37:45.730 ERROR org.apache.spark.util.Utils: Aborting task
org.apache.hadoop.fs.FileAlreadyExistsException: file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0175_m_000000_299/part1=1/part-00000-ec9e76fd-0b42-491d-95a6-a6bfd0603c61.c000.snappy.parquet already exists
	at org.apache.spark.sql.sources.FileExistingTestFileSystem.create(InsertSuite.scala:908)
	at org.apache.parquet.hadoop.util.HadoopOutputFile.create(HadoopOutputFile.java:74)
	at org.apache.parquet.hadoop.ParquetFileWriter.<init>(ParquetFileWriter.java:248)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:390)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:349)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.<init>(ParquetOutputWriter.scala:37)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anon$1.newInstance(ParquetFileFormat.scala:150)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.newOutputWriter(FileFormatDataWriter.scala:241)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.write(FileFormatDataWriter.scala:262)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:278)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
18:37:45.731 WARN org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter: Could not delete file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0175_m_000000_299
18:37:45.731 ERROR org.apache.spark.sql.execution.datasources.FileFormatWriter: Job job_20201028183745_0175 aborted.
18:37:45.731 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 175.0 (TID 299)
org.apache.spark.TaskOutputFileAlreadyExistException: org.apache.hadoop.fs.FileAlreadyExistsException: file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0175_m_000000_299/part1=1/part-00000-ec9e76fd-0b42-491d-95a6-a6bfd0603c61.c000.snappy.parquet already exists
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:294)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.hadoop.fs.FileAlreadyExistsException: file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0175_m_000000_299/part1=1/part-00000-ec9e76fd-0b42-491d-95a6-a6bfd0603c61.c000.snappy.parquet already exists
	at org.apache.spark.sql.sources.FileExistingTestFileSystem.create(InsertSuite.scala:908)
	at org.apache.parquet.hadoop.util.HadoopOutputFile.create(HadoopOutputFile.java:74)
	at org.apache.parquet.hadoop.ParquetFileWriter.<init>(ParquetFileWriter.java:248)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:390)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:349)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.<init>(ParquetOutputWriter.scala:37)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anon$1.newInstance(ParquetFileFormat.scala:150)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.newOutputWriter(FileFormatDataWriter.scala:241)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.write(FileFormatDataWriter.scala:262)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:278)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more
18:37:45.738 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0.0 in stage 175.0 (TID 299) can not write to output file: org.apache.hadoop.fs.FileAlreadyExistsException: file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0175_m_000000_299/part1=1/part-00000-ec9e76fd-0b42-491d-95a6-a6bfd0603c61.c000.snappy.parquet already exists; not retrying
18:37:45.739 ERROR org.apache.spark.sql.execution.datasources.FileFormatWriter: Aborting job 53b6d4fe-56b4-4390-a620-5c7f80bb032f.
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0.0 in stage 175.0 (TID 299) can not write to output file: org.apache.hadoop.fs.FileAlreadyExistsException: file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0175_m_000000_299/part1=1/part-00000-ec9e76fd-0b42-491d-95a6-a6bfd0603c61.c000.snappy.parquet already exists
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2211)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2160)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2159)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2159)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1076)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1076)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1076)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2398)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2340)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2329)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:866)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2128)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:200)
	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:178)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:108)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:106)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.doExecute(commands.scala:131)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:175)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:213)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:171)
	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:127)
	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:126)
	at org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:985)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:985)
	at org.apache.spark.sql.DataFrameWriter.insertInto(DataFrameWriter.scala:541)
	at org.apache.spark.sql.DataFrameWriter.insertInto(DataFrameWriter.scala:496)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$163(InsertSuite.scala:842)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.Assertions.intercept(Assertions.scala:749)
	at org.scalatest.Assertions.intercept$(Assertions.scala:746)
	at org.scalatest.funsuite.AnyFunSuite.intercept(AnyFunSuite.scala:1562)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$162(InsertSuite.scala:841)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTable(SQLTestUtils.scala:305)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTable$(SQLTestUtils.scala:303)
	at org.apache.spark.sql.sources.InsertSuite.withTable(InsertSuite.scala:57)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$161(InsertSuite.scala:833)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf(SQLHelper.scala:54)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf$(SQLHelper.scala:38)
	at org.apache.spark.sql.sources.InsertSuite.org$apache$spark$sql$test$SQLTestUtilsBase$$super$withSQLConf(InsertSuite.scala:57)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf(SQLTestUtils.scala:246)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf$(SQLTestUtils.scala:244)
	at org.apache.spark.sql.sources.InsertSuite.withSQLConf(InsertSuite.scala:57)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$160(InsertSuite.scala:833)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$160$adapted(InsertSuite.scala:829)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$159(InsertSuite.scala:829)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:189)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:176)
	at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:187)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:199)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:199)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:181)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:61)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:61)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:232)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:232)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:231)
	at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1562)
	at org.scalatest.Suite.run(Suite.scala:1112)
	at org.scalatest.Suite.run$(Suite.scala:1094)
	at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1562)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:236)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
	at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:236)
	at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:235)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:61)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:61)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:318)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:513)
	at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:413)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
18:37:45.922 ERROR org.apache.spark.util.Utils: Aborting task
org.apache.hadoop.fs.FileAlreadyExistsException: file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0176_m_000000_300/part1=1/part-00000-1a4f37fa-71ba-40e6-9c3e-ee65a38f4160.c000.snappy.parquet already exists
	at org.apache.spark.sql.sources.FileExistingTestFileSystem.create(InsertSuite.scala:908)
	at org.apache.parquet.hadoop.util.HadoopOutputFile.create(HadoopOutputFile.java:74)
	at org.apache.parquet.hadoop.ParquetFileWriter.<init>(ParquetFileWriter.java:248)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:390)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:349)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.<init>(ParquetOutputWriter.scala:37)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anon$1.newInstance(ParquetFileFormat.scala:150)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.newOutputWriter(FileFormatDataWriter.scala:241)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.write(FileFormatDataWriter.scala:262)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:278)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
18:37:45.922 WARN org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter: Could not delete file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0176_m_000000_300
18:37:45.923 ERROR org.apache.spark.sql.execution.datasources.FileFormatWriter: Job job_20201028183745_0176 aborted.
18:37:45.923 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 176.0 (TID 300)
org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.hadoop.fs.FileAlreadyExistsException: file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0176_m_000000_300/part1=1/part-00000-1a4f37fa-71ba-40e6-9c3e-ee65a38f4160.c000.snappy.parquet already exists
	at org.apache.spark.sql.sources.FileExistingTestFileSystem.create(InsertSuite.scala:908)
	at org.apache.parquet.hadoop.util.HadoopOutputFile.create(HadoopOutputFile.java:74)
	at org.apache.parquet.hadoop.ParquetFileWriter.<init>(ParquetFileWriter.java:248)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:390)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:349)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.<init>(ParquetOutputWriter.scala:37)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anon$1.newInstance(ParquetFileFormat.scala:150)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.newOutputWriter(FileFormatDataWriter.scala:241)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.write(FileFormatDataWriter.scala:262)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:278)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more
18:37:45.925 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 176.0 (TID 300) (amp-jenkins-worker-05.amp executor driver): org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.hadoop.fs.FileAlreadyExistsException: file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0176_m_000000_300/part1=1/part-00000-1a4f37fa-71ba-40e6-9c3e-ee65a38f4160.c000.snappy.parquet already exists
	at org.apache.spark.sql.sources.FileExistingTestFileSystem.create(InsertSuite.scala:908)
	at org.apache.parquet.hadoop.util.HadoopOutputFile.create(HadoopOutputFile.java:74)
	at org.apache.parquet.hadoop.ParquetFileWriter.<init>(ParquetFileWriter.java:248)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:390)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:349)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.<init>(ParquetOutputWriter.scala:37)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anon$1.newInstance(ParquetFileFormat.scala:150)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.newOutputWriter(FileFormatDataWriter.scala:241)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.write(FileFormatDataWriter.scala:262)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:278)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more

18:37:45.925 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 176.0 failed 1 times; aborting job
18:37:45.926 ERROR org.apache.spark.sql.execution.datasources.FileFormatWriter: Aborting job 57415719-eecb-4a67-80ff-c253b8349bad.
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 176.0 failed 1 times, most recent failure: Lost task 0.0 in stage 176.0 (TID 300) (amp-jenkins-worker-05.amp executor driver): org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.hadoop.fs.FileAlreadyExistsException: file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0176_m_000000_300/part1=1/part-00000-1a4f37fa-71ba-40e6-9c3e-ee65a38f4160.c000.snappy.parquet already exists
	at org.apache.spark.sql.sources.FileExistingTestFileSystem.create(InsertSuite.scala:908)
	at org.apache.parquet.hadoop.util.HadoopOutputFile.create(HadoopOutputFile.java:74)
	at org.apache.parquet.hadoop.ParquetFileWriter.<init>(ParquetFileWriter.java:248)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:390)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:349)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.<init>(ParquetOutputWriter.scala:37)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anon$1.newInstance(ParquetFileFormat.scala:150)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.newOutputWriter(FileFormatDataWriter.scala:241)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.write(FileFormatDataWriter.scala:262)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:278)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2211)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2160)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2159)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2159)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1076)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1076)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1076)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2398)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2340)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2329)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:866)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2128)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:200)
	at org.apache.spark.sql.execution.datasources.InsertIntoHadoopFsRelationCommand.run(InsertIntoHadoopFsRelationCommand.scala:178)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:108)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:106)
	at org.apache.spark.sql.execution.command.DataWritingCommandExec.doExecute(commands.scala:131)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:175)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:213)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:210)
	at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:171)
	at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:127)
	at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:126)
	at org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:985)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:769)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:985)
	at org.apache.spark.sql.DataFrameWriter.insertInto(DataFrameWriter.scala:541)
	at org.apache.spark.sql.DataFrameWriter.insertInto(DataFrameWriter.scala:496)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$163(InsertSuite.scala:842)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.Assertions.intercept(Assertions.scala:749)
	at org.scalatest.Assertions.intercept$(Assertions.scala:746)
	at org.scalatest.funsuite.AnyFunSuite.intercept(AnyFunSuite.scala:1562)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$162(InsertSuite.scala:841)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTable(SQLTestUtils.scala:305)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTable$(SQLTestUtils.scala:303)
	at org.apache.spark.sql.sources.InsertSuite.withTable(InsertSuite.scala:57)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$161(InsertSuite.scala:833)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf(SQLHelper.scala:54)
	at org.apache.spark.sql.catalyst.plans.SQLHelper.withSQLConf$(SQLHelper.scala:38)
	at org.apache.spark.sql.sources.InsertSuite.org$apache$spark$sql$test$SQLTestUtilsBase$$super$withSQLConf(InsertSuite.scala:57)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf(SQLTestUtils.scala:246)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withSQLConf$(SQLTestUtils.scala:244)
	at org.apache.spark.sql.sources.InsertSuite.withSQLConf(InsertSuite.scala:57)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$160(InsertSuite.scala:833)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$160$adapted(InsertSuite.scala:829)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.sources.InsertSuite.$anonfun$new$159(InsertSuite.scala:829)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:189)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:176)
	at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:187)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:199)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:199)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:181)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:61)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:61)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:232)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:232)
	at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:231)
	at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1562)
	at org.scalatest.Suite.run(Suite.scala:1112)
	at org.scalatest.Suite.run$(Suite.scala:1094)
	at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1562)
	at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:236)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
	at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:236)
	at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:235)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:61)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:61)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:318)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:513)
	at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:413)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Task failed while writing rows.
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:296)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$15(FileFormatWriter.scala:210)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	... 3 more
Caused by: org.apache.hadoop.fs.FileAlreadyExistsException: file:/home/jenkins/workspace/SparkPullRequestBuilder@3/sql/core/spark-warehouse/org.apache.spark.sql.sources.InsertSuite/t/_temporary/0/_temporary/attempt_20201028183745_0176_m_000000_300/part1=1/part-00000-1a4f37fa-71ba-40e6-9c3e-ee65a38f4160.c000.snappy.parquet already exists
	at org.apache.spark.sql.sources.FileExistingTestFileSystem.create(InsertSuite.scala:908)
	at org.apache.parquet.hadoop.util.HadoopOutputFile.create(HadoopOutputFile.java:74)
	at org.apache.parquet.hadoop.ParquetFileWriter.<init>(ParquetFileWriter.java:248)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:390)
	at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:349)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetOutputWriter.<init>(ParquetOutputWriter.scala:37)
	at org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat$$anon$1.newInstance(ParquetFileFormat.scala:150)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.newOutputWriter(FileFormatDataWriter.scala:241)
	at org.apache.spark.sql.execution.datasources.DynamicPartitionDataWriter.write(FileFormatDataWriter.scala:262)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:278)
	at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1460)
	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:286)
	... 9 more
[info] - Stop task set if FileAlreadyExistsException was thrown (408 milliseconds)
18:37:45.950 WARN org.apache.spark.util.HadoopFSUtils: The directory file:/home/jenkins/workspace/SparkPullRequestBuilder@3/target/tmp/spark-7fc217fa-34c8-4da7-9aaa-17c103f63015 was not found. Was it deleted very recently?
18:37:45.954 WARN org.apache.spark.util.HadoopFSUtils: The directory file:/home/jenkins/workspace/SparkPullRequestBuilder@3/target/tmp/spark-7fc217fa-34c8-4da7-9aaa-17c103f63015 was not found. Was it deleted very recently?
[info] - SPARK-29174 Support LOCAL in INSERT OVERWRITE DIRECTORY to data source (853 milliseconds)
[info] - SPARK-29174 fail LOCAL in INSERT OVERWRITE DIRECT remote path (2 milliseconds)
[info] - SPARK-32508 Disallow empty part col values in partition spec before static partition writing (673 milliseconds)
18:37:47.498 WARN org.apache.spark.sql.sources.InsertSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.sources.InsertSuite, thread names: shuffle-boss-2390-1, rpc-boss-2387-1 =====

[info] DataFrameSetOperationsSuite:
[info] - except (3 seconds, 820 milliseconds)
[info] - SPARK-23274: except between two projects without references used in filter (781 milliseconds)
[info] - except distinct - SQL compliance (390 milliseconds)
[info] - except - nullability (1 second, 476 milliseconds)
[info] - except all (5 seconds, 139 milliseconds)
[info] - exceptAll - nullability (1 second, 804 milliseconds)
[info] - intersect (2 seconds, 224 milliseconds)
[info] - intersect - nullability (2 seconds, 121 milliseconds)
[info] - intersectAll (2 seconds, 546 milliseconds)
[info] - intersectAll - nullability (1 second, 805 milliseconds)
[info] - SPARK-10539: Project should not be pushed down through Intersect or Except (449 milliseconds)
[info] - SPARK-10740: handle nondeterministic expressions correctly for set operations (1 second, 420 milliseconds)
[info] - SPARK-17123: Performing set operations that combine non-scala native types (487 milliseconds)
[info] - SPARK-19893: cannot run set operations with map type (22 milliseconds)
[info] - union all (1 second, 523 milliseconds)
[info] - union should union DataFrames with UDTs (SPARK-13410) (281 milliseconds)
[info] - union by name (235 milliseconds)
[info] - union by name - type coercion (702 milliseconds)
[info] - union by name - check case sensitivity (143 milliseconds)
[info] - union by name - check name duplication (50 milliseconds)
[info] - SPARK-25368 Incorrect predicate pushdown returns wrong result (719 milliseconds)
[info] - SPARK-29358: Make unionByName optionally fill missing columns with nulls (622 milliseconds)
[info] - SPARK-32376: Make unionByName null-filling behavior work with struct columns - simple (412 milliseconds)
[info] - SPARK-32376: Make unionByName null-filling behavior work with struct columns - nested (557 milliseconds)
[info] - SPARK-32376: Make unionByName null-filling behavior work with struct columns - case-sensitive cases (818 milliseconds)
[info] - SPARK-32376: Make unionByName null-filling behavior work with struct columns - edge case (157 milliseconds)
[info] - SPARK-32376: Make unionByName null-filling behavior work with struct columns - deep expr (2 seconds, 802 milliseconds)
18:38:21.125 WARN org.apache.spark.sql.DataFrameSetOperationsSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.DataFrameSetOperationsSuite, thread names: shuffle-boss-2396-1, rpc-boss-2393-1 =====

[info] SparkPlannerSuite:
[info] - Ensure to go down only the first branch, not any other possible branches (86 milliseconds)
18:38:21.280 WARN org.apache.spark.sql.execution.SparkPlannerSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.SparkPlannerSuite, thread names: rpc-boss-2399-1, shuffle-boss-2402-1 =====

[info] DatasetSerializerRegistratorSuite:
[info] - Kryo registrator (51 milliseconds)
18:38:21.405 WARN org.apache.spark.sql.DatasetSerializerRegistratorSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.DatasetSerializerRegistratorSuite, thread names: shuffle-boss-2408-1, rpc-boss-2405-1 =====

[info] StreamingQueryStatusAndProgressSuite:
[info] - StreamingQueryProgress - prettyJson (2 milliseconds)
[info] - StreamingQueryProgress - json (1 millisecond)
[info] - StreamingQueryProgress - toString (0 milliseconds)
[info] - StreamingQueryStatus - prettyJson (0 milliseconds)
[info] - StreamingQueryStatus - json (0 milliseconds)
[info] - StreamingQueryStatus - toString (0 milliseconds)
18:38:21.497 WARN org.apache.spark.sql.streaming.StreamingQueryManager: Temporary checkpoint location created which is deleted normally when the query didn't fail: /home/jenkins/workspace/SparkPullRequestBuilder@3/target/tmp/temporary-3bd04574-1689-491a-9c74-edc43987fd91. If it's required to delete it under any circumstances, please set spark.sql.streaming.forceDeleteTempCheckpointLocation to true. Important to know deleting temp checkpoint folder is best effort.
[info] - progress classes should be Serializable (708 milliseconds)
18:38:22.202 WARN org.apache.spark.sql.streaming.StreamingQueryManager: Temporary checkpoint location created which is deleted normally when the query didn't fail: /home/jenkins/workspace/SparkPullRequestBuilder@3/target/tmp/temporary-5459d35d-efd1-412b-93fc-52b32f30fbdf. If it's required to delete it under any circumstances, please set spark.sql.streaming.forceDeleteTempCheckpointLocation to true. Important to know deleting temp checkpoint folder is best effort.
[info] - SPARK-19378: Continue reporting stateOp metrics even if there is no active trigger (567 milliseconds)
[info] - SPARK-29973: Make `processedRowsPerSecond` calculated more accurately and meaningfully (179 milliseconds)
18:38:22.967 WARN org.apache.spark.sql.streaming.StreamingQueryStatusAndProgressSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.streaming.StreamingQueryStatusAndProgressSuite, thread names: state-store-maintenance-task, shuffle-boss-2414-1, rpc-boss-2411-1 =====

[info] StateStoreCompatibleSuite:
[info] - SPARK-33263: Recovery from checkpoint before codec config introduced - with codec lz4 *** FAILED *** (2 milliseconds)
[info]   java.lang.NullPointerException:
[info]   at org.apache.spark.sql.execution.streaming.state.StateStoreCompatibleSuite.$anonfun$new$66(StateStoreSuite.scala:824)
[info]   at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
[info]   at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
[info]   at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
[info]   at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
[info]   at org.scalatest.Transformer.apply(Transformer.scala:22)
[info]   at org.scalatest.Transformer.apply(Transformer.scala:20)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:189)
[info]   at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:176)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:187)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:199)
[info]   at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:199)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:181)
[info]   at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:61)
[info]   at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
[info]   at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
[info]   at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:61)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:232)
[info]   at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
[info]   at scala.collection.immutable.List.foreach(List.scala:392)
[info]   at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
[info]   at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
[info]   at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:232)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:231)
[info]   at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1562)
[info]   at org.scalatest.Suite.run(Suite.scala:1112)
[info]   at org.scalatest.Suite.run$(Suite.scala:1094)
[info]   at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1562)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:236)
[info]   at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:236)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:235)
[info]   at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:61)
[info]   at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
[info]   at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
[info]   at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
[info]   at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:61)
[info]   at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:318)
[info]   at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:513)
[info]   at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:413)
[info]   at java.util.concurrent.FutureTask.run(FutureTask.java:266)
[info]   at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[info]   at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[info]   at java.lang.Thread.run(Thread.java:748)
[info] - SPARK-33263: Recovery from checkpoint before codec config introduced - with codec lzf *** FAILED *** (0 milliseconds)
[info]   java.lang.NullPointerException:
[info]   at org.apache.spark.sql.execution.streaming.state.StateStoreCompatibleSuite.$anonfun$new$66(StateStoreSuite.scala:824)
[info]   at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
[info]   at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
[info]   at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
[info]   at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
[info]   at org.scalatest.Transformer.apply(Transformer.scala:22)
[info]   at org.scalatest.Transformer.apply(Transformer.scala:20)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:189)
[info]   at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:176)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:187)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:199)
[info]   at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:199)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:181)
[info]   at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:61)
[info]   at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
[info]   at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
[info]   at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:61)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:232)
[info]   at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
[info]   at scala.collection.immutable.List.foreach(List.scala:392)
[info]   at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
[info]   at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
[info]   at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:232)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:231)
[info]   at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1562)
[info]   at org.scalatest.Suite.run(Suite.scala:1112)
[info]   at org.scalatest.Suite.run$(Suite.scala:1094)
[info]   at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1562)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:236)
[info]   at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:236)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:235)
[info]   at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:61)
[info]   at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
[info]   at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
[info]   at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
[info]   at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:61)
[info]   at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:318)
[info]   at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:513)
[info]   at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:413)
[info]   at java.util.concurrent.FutureTask.run(FutureTask.java:266)
[info]   at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[info]   at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[info]   at java.lang.Thread.run(Thread.java:748)
[info] - SPARK-33263: Recovery from checkpoint before codec config introduced - with codec snappy *** FAILED *** (0 milliseconds)
[info]   java.lang.NullPointerException:
[info]   at org.apache.spark.sql.execution.streaming.state.StateStoreCompatibleSuite.$anonfun$new$66(StateStoreSuite.scala:824)
[info]   at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
[info]   at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
[info]   at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
[info]   at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
[info]   at org.scalatest.Transformer.apply(Transformer.scala:22)
[info]   at org.scalatest.Transformer.apply(Transformer.scala:20)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:189)
[info]   at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:176)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:187)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:199)
[info]   at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:199)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:181)
[info]   at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:61)
[info]   at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
[info]   at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
[info]   at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:61)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:232)
[info]   at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
[info]   at scala.collection.immutable.List.foreach(List.scala:392)
[info]   at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
[info]   at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
[info]   at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:232)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:231)
[info]   at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1562)
[info]   at org.scalatest.Suite.run(Suite.scala:1112)
[info]   at org.scalatest.Suite.run$(Suite.scala:1094)
[info]   at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1562)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:236)
[info]   at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:236)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:235)
[info]   at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:61)
[info]   at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
[info]   at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
[info]   at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
[info]   at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:61)
[info]   at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:318)
[info]   at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:513)
[info]   at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:413)
[info]   at java.util.concurrent.FutureTask.run(FutureTask.java:266)
[info]   at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[info]   at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[info]   at java.lang.Thread.run(Thread.java:748)
[info] - SPARK-33263: Recovery from checkpoint before codec config introduced - with codec zstd *** FAILED *** (1 millisecond)
[info]   java.lang.NullPointerException:
[info]   at org.apache.spark.sql.execution.streaming.state.StateStoreCompatibleSuite.$anonfun$new$66(StateStoreSuite.scala:824)
[info]   at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
[info]   at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
[info]   at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
[info]   at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
[info]   at org.scalatest.Transformer.apply(Transformer.scala:22)
[info]   at org.scalatest.Transformer.apply(Transformer.scala:20)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike$$anon$1.apply(AnyFunSuiteLike.scala:189)
[info]   at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:176)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.invokeWithFixture$1(AnyFunSuiteLike.scala:187)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTest$1(AnyFunSuiteLike.scala:199)
[info]   at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTest(AnyFunSuiteLike.scala:199)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTest$(AnyFunSuiteLike.scala:181)
[info]   at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:61)
[info]   at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:234)
[info]   at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:227)
[info]   at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:61)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$runTests$1(AnyFunSuiteLike.scala:232)
[info]   at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:413)
[info]   at scala.collection.immutable.List.foreach(List.scala:392)
[info]   at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
[info]   at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:396)
[info]   at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:475)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTests(AnyFunSuiteLike.scala:232)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.runTests$(AnyFunSuiteLike.scala:231)
[info]   at org.scalatest.funsuite.AnyFunSuite.runTests(AnyFunSuite.scala:1562)
[info]   at org.scalatest.Suite.run(Suite.scala:1112)
[info]   at org.scalatest.Suite.run$(Suite.scala:1094)
[info]   at org.scalatest.funsuite.AnyFunSuite.org$scalatest$funsuite$AnyFunSuiteLike$$super$run(AnyFunSuite.scala:1562)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.$anonfun$run$1(AnyFunSuiteLike.scala:236)
[info]   at org.scalatest.SuperEngine.runImpl(Engine.scala:535)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.run(AnyFunSuiteLike.scala:236)
[info]   at org.scalatest.funsuite.AnyFunSuiteLike.run$(AnyFunSuiteLike.scala:235)
[info]   at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:61)
[info]   at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
[info]   at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
[info]   at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
[info]   at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:61)
[info]   at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:318)
[info]   at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:513)
[info]   at sbt.ForkMain$Run.lambda$runTest$1(ForkMain.java:413)
[info]   at java.util.concurrent.FutureTask.run(FutureTask.java:266)
[info]   at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[info]   at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[info]   at java.lang.Thread.run(Thread.java:748)
18:38:23.063 WARN org.apache.spark.sql.execution.streaming.state.StateStoreCompatibleSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.streaming.state.StateStoreCompatibleSuite, thread names: rpc-boss-2417-1, shuffle-boss-2420-1 =====

[info] PartitionBatchPruningSuite:
[info] - SELECT key FROM pruningData WHERE key = 1 (101 milliseconds)
[info] - SELECT key FROM pruningData WHERE 1 = key (57 milliseconds)
[info] - SELECT key FROM pruningData WHERE key <=> 1 (57 milliseconds)
[info] - SELECT key FROM pruningData WHERE 1 <=> key (65 milliseconds)
[info] - SELECT key FROM pruningData WHERE key < 12 (65 milliseconds)
[info] - SELECT key FROM pruningData WHERE key <= 11 (58 milliseconds)
[info] - SELECT key FROM pruningData WHERE key > 88 (55 milliseconds)
[info] - SELECT key FROM pruningData WHERE key >= 89 (62 milliseconds)
[info] - SELECT key FROM pruningData WHERE 12 > key (56 milliseconds)
[info] - SELECT key FROM pruningData WHERE 11 >= key (55 milliseconds)
[info] - SELECT key FROM pruningData WHERE 88 < key (55 milliseconds)
[info] - SELECT key FROM pruningData WHERE 89 <= key (53 milliseconds)
[info] - SELECT _1 FROM pruningArrayData WHERE _1 = array(1) (94 milliseconds)
[info] - SELECT _1 FROM pruningArrayData WHERE _1 <= array(1) (64 milliseconds)
[info] - SELECT _1 FROM pruningArrayData WHERE _1 >= array(1) (60 milliseconds)
[info] - SELECT _1 FROM pruningBinaryData WHERE _1 == binary(chr(1)) (83 milliseconds)
[info] - SELECT key FROM pruningData WHERE value IS NULL (73 milliseconds)
[info] - SELECT key FROM pruningData WHERE value IS NOT NULL (61 milliseconds)
[info] - SELECT key FROM pruningData WHERE key > 8 AND key <= 21 (65 milliseconds)
[info] - SELECT key FROM pruningData WHERE key < 2 OR key > 99 (65 milliseconds)
[info] - SELECT key FROM pruningData WHERE key < 12 AND key IS NOT NULL (47 milliseconds)
[info] - SELECT key FROM pruningData WHERE key < 2 OR (key > 78 AND key < 92) (69 milliseconds)
[info] - SELECT key FROM pruningData WHERE NOT (key < 88) (64 milliseconds)
[info] - SELECT key FROM pruningData WHERE key IN (1) (73 milliseconds)
[info] - SELECT key FROM pruningData WHERE key IN (1, 2) (66 milliseconds)
[info] - SELECT key FROM pruningData WHERE key IN (1, 11) (60 milliseconds)
[info] - SELECT key FROM pruningData WHERE key IN (1, 21, 41, 61, 81) (67 milliseconds)
[info] - SELECT CAST(s AS INT) FROM pruningStringData WHERE s = '100' (91 milliseconds)
[info] - SELECT CAST(s AS INT) FROM pruningStringData WHERE s < '102' (62 milliseconds)
[info] - SELECT CAST(s AS INT) FROM pruningStringData WHERE s IN ('99', '150', '201') (71 milliseconds)
[info] - SELECT _1 FROM pruningArrayData WHERE _1 IN (array(1), array(2, 2)) (67 milliseconds)
[info] - SELECT key FROM pruningData WHERE key IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) (56 milliseconds)
[info] - SELECT key FROM pruningData WHERE NOT (key IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)) (63 milliseconds)
[info] - SELECT key FROM pruningData WHERE NOT (key IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)) AND key > 88 (59 milliseconds)
[info] - SELECT CAST(s AS INT) FROM pruningStringData WHERE s like '18%' (73 milliseconds)
[info] - SELECT CAST(s AS INT) FROM pruningStringData WHERE s like '%' (63 milliseconds)
[info] - SELECT CAST(s AS INT) FROM pruningStringData WHERE '18%' like s (69 milliseconds)
[info] - disable IN_MEMORY_PARTITION_PRUNING (44 milliseconds)
18:38:28.993 WARN org.apache.spark.sql.execution.columnar.PartitionBatchPruningSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.columnar.PartitionBatchPruningSuite, thread names: shuffle-boss-2426-1, rpc-boss-2423-1 =====

[info] FileStreamSinkLogSuite:
[info] - shouldRetain (16 milliseconds)
[info] - serialize (6 milliseconds)
[info] - deserialize (9 milliseconds)
[info] - compact (271 milliseconds)
[info] - delete expired file (240 milliseconds)
[info] - read Spark 2.1.0 log format (3 milliseconds)
18:38:29.611 WARN org.apache.spark.sql.execution.streaming.CheckpointFileManager: Could not use FileContext API for managing Structured Streaming checkpoint files at FileStreamSinkLogSuite1745176753fs:/home/jenkins/workspace/SparkPullRequestBuilder@3/target/tmp/spark-68f8c7e4-15a5-48ed-ae44-0e0047eb172f. Using FileSystem API instead for managing log files. If the implementation of FileSystem.rename() is not atomic, then the correctness and fault-tolerance ofyour Structured Streaming is not guaranteed.
[info] - getLatestBatchId (23 milliseconds)
18:38:29.650 WARN org.apache.spark.sql.execution.streaming.FileStreamSinkLogSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.streaming.FileStreamSinkLogSuite, thread names: rpc-boss-2429-1, shuffle-boss-2432-1 =====

[info] VectorizedOrcReadSchemaSuite:
[info] - append column at the end (660 milliseconds)
[info] - hide column at the end (581 milliseconds)
[info] - append column into middle (484 milliseconds)
[info] - hide column in the middle (421 milliseconds)
[info] - add a nested column at the end of the leaf struct column (428 milliseconds)
[info] - add a nested column in the middle of the leaf struct column (465 milliseconds)
[info] - add a nested column at the end of the middle struct column (443 milliseconds)
[info] - add a nested column in the middle of the middle struct column (442 milliseconds)
[info] - hide a nested column at the end of the leaf struct column (609 milliseconds)
[info] - hide a nested column in the middle of the leaf struct column (601 milliseconds)
[info] - hide a nested column at the end of the middle struct column (643 milliseconds)
[info] - hide a nested column in the middle of the middle struct column (593 milliseconds)
[info] - change column position (478 milliseconds)
[info] - change column type from boolean to byte/short/int/long (749 milliseconds)
[info] - change column type from byte to short/int/long (485 milliseconds)
[info] - change column type from short to int/long (391 milliseconds)
[info] - change column type from int to long (256 milliseconds)
[info] - read byte, int, short, long together (839 milliseconds)
[info] - change column type from float to double (303 milliseconds)
[info] - read float and double together (455 milliseconds)
18:38:40.058 WARN org.apache.spark.sql.execution.datasources.VectorizedOrcReadSchemaSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.datasources.VectorizedOrcReadSchemaSuite, thread names: rpc-boss-2435-1, shuffle-boss-2438-1 =====

[info] ComplexTypesSuite:
[info] - simple case (125 milliseconds)
[info] - named_struct is used in the top Project (522 milliseconds)
[info] - expression in named_struct (278 milliseconds)
[info] - nested case (287 milliseconds)
[info] - SPARK-32167: get field from an array of struct (89 milliseconds)
18:38:41.575 WARN org.apache.spark.sql.ComplexTypesSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.ComplexTypesSuite, thread names: shuffle-boss-2444-1, rpc-boss-2441-1 =====

[info] HashedRelationSuite:
[info] - UnsafeHashedRelation (38 milliseconds)
[info] - test serialization empty hash map (1 millisecond)
[info] - LongToUnsafeRowMap (15 milliseconds)
[info] - LongToUnsafeRowMap with very wide range (3 milliseconds)
[info] - LongToUnsafeRowMap with random keys (2 seconds, 956 milliseconds)
[info] - SPARK-24257: insert big values into LongToUnsafeRowMap (19 milliseconds)
[info] - SPARK-24809: Serializing LongToUnsafeRowMap in executor may result in data error (9 milliseconds)
[info] - Spark-14521 (84 milliseconds)
[info] - SPARK-31511: Make BytesToBytesMap iterators thread-safe (135 milliseconds)
[info] - build HashedRelation that is larger than 1G !!! IGNORED !!!
[info] - build HashedRelation with more than 100 millions rows !!! IGNORED !!!
[info] - UnsafeHashedRelation: key set iterator on a contiguous array of keys (32 milliseconds)
[info] - UnsafeHashedRelation: key set iterator on a sparse array of keys (29 milliseconds)
[info] - LongHashedRelation: key set iterator on a contiguous array of keys (3 milliseconds)
[info] - LongToUnsafeRowMap: key set iterator on a contiguous array of keys (9 milliseconds)
[info] - LongToUnsafeRowMap: key set iterator on a sparse array with equidistant keys (3 milliseconds)
[info] - LongToUnsafeRowMap: key set iterator on an array with a single key (266 milliseconds)
[info] - LongToUnsafeRowMap: multiple hasNext calls before calling next() on the key iterator (6 milliseconds)
[info] - LongToUnsafeRowMap: no explicit hasNext calls on the key iterator (4 milliseconds)
[info] - LongToUnsafeRowMap: call hasNext at the end of the iterator (3 milliseconds)
[info] - LongToUnsafeRowMap: random sequence of hasNext and next() calls on the key iterator (4 milliseconds)
[info] - HashJoin: packing and unpacking with the same key type in a LongType (32 milliseconds)
[info] - HashJoin: packing and unpacking with various key types in a LongType (20 milliseconds)
[info] - EmptyHashedRelation override methods behavior test (1 millisecond)
[info] - SPARK-32399: test methods related to key index (36 milliseconds)
18:38:45.364 WARN org.apache.spark.sql.execution.joins.HashedRelationSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.joins.HashedRelationSuite, thread names: shuffle-boss-2450-1, rpc-boss-2447-1 =====

[info] JDBCWriteSuite:
[info] - Basic CREATE (225 milliseconds)
[info] - Basic CREATE with illegal batchsize (20 milliseconds)
[info] - Basic CREATE with batchsize (302 milliseconds)
[info] - CREATE with ignore (297 milliseconds)
[info] - CREATE with overwrite (232 milliseconds)
[info] - CREATE then INSERT to append (165 milliseconds)
[info] - SPARK-18123 Append with column names with different cases (192 milliseconds)
[info] - Truncate (218 milliseconds)
[info] - createTableOptions (13 milliseconds)
[info] - Incompatible INSERT to append (70 milliseconds)
[info] - INSERT to JDBC Datasource (145 milliseconds)
[info] - INSERT to JDBC Datasource with overwrite (190 milliseconds)
[info] - save works for format("jdbc") if url and dbtable are set (119 milliseconds)
[info] - save API with SaveMode.Overwrite (159 milliseconds)
[info] - save errors if url is not specified (10 milliseconds)
[info] - save errors if dbtable is not specified (20 milliseconds)
[info] - save errors if wrong user/password combination (767 milliseconds)
[info] - save errors if partitionColumn and numPartitions and bounds not set (15 milliseconds)
[info] - SPARK-18433: Improve DataSource option keys to be more case-insensitive (57 milliseconds)
[info] - SPARK-18413: Use `numPartitions` JDBCOption (12 milliseconds)
[info] - SPARK-19318 temporary view data source option keys should be case-insensitive (111 milliseconds)
[info] - SPARK-10849: test schemaString - from createTableColumnTypes option values (17 milliseconds)
[info] - SPARK-10849: create table using user specified column type and verify on target table (355 milliseconds)
[info] - SPARK-10849: jdbc CreateTableColumnTypes option with invalid data type (12 milliseconds)
[info] - SPARK-10849: jdbc CreateTableColumnTypes option with invalid syntax (10 milliseconds)
[info] - SPARK-10849: jdbc CreateTableColumnTypes duplicate columns (10 milliseconds)
[info] - SPARK-10849: jdbc CreateTableColumnTypes invalid columns (45 milliseconds)
18:38:50.074 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 76.0 (TID 98)
org.h2.jdbc.JdbcBatchUpdateException: NULL not allowed for column "NAME"; SQL statement:
INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
	at org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1234)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.savePartition(JdbcUtils.scala:679)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.$anonfun$saveTable$1(JdbcUtils.scala:853)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.$anonfun$saveTable$1$adapted(JdbcUtils.scala:851)
	at org.apache.spark.rdd.RDD.$anonfun$foreachPartition$2(RDD.scala:1020)
	at org.apache.spark.rdd.RDD.$anonfun$foreachPartition$2$adapted(RDD.scala:1020)
	at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2168)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.h2.jdbc.JdbcSQLException: NULL not allowed for column "NAME"; SQL statement:
INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
	at org.h2.message.DbException.getJdbcSQLException(DbException.java:345)
	at org.h2.message.DbException.get(DbException.java:179)
	at org.h2.message.DbException.get(DbException.java:155)
	at org.h2.table.Column.validateConvertUpdateSequence(Column.java:345)
	at org.h2.table.Table.validateConvertUpdateSequence(Table.java:793)
	at org.h2.command.dml.Insert.insertRows(Insert.java:151)
	at org.h2.command.dml.Insert.update(Insert.java:114)
	at org.h2.command.CommandContainer.update(CommandContainer.java:101)
	at org.h2.command.Command.executeUpdate(Command.java:260)
	at org.h2.jdbc.JdbcPreparedStatement.executeUpdateInternal(JdbcPreparedStatement.java:164)
	at org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1215)
	... 14 more
org.h2.jdbc.JdbcSQLException: NULL not allowed for column "NAME"; SQL statement:
INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
	at org.h2.message.DbException.getJdbcSQLException(DbException.java:345)
	at org.h2.message.DbException.get(DbException.java:179)
	at org.h2.message.DbException.get(DbException.java:155)
	at org.h2.table.Column.validateConvertUpdateSequence(Column.java:345)
	at org.h2.table.Table.validateConvertUpdateSequence(Table.java:793)
	at org.h2.command.dml.Insert.insertRows(Insert.java:151)
	at org.h2.command.dml.Insert.update(Insert.java:114)
	at org.h2.command.CommandContainer.update(CommandContainer.java:101)
	at org.h2.command.Command.executeUpdate(Command.java:260)
	at org.h2.jdbc.JdbcPreparedStatement.executeUpdateInternal(JdbcPreparedStatement.java:164)
	at org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1215)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.savePartition(JdbcUtils.scala:679)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.$anonfun$saveTable$1(JdbcUtils.scala:853)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.$anonfun$saveTable$1$adapted(JdbcUtils.scala:851)
	at org.apache.spark.rdd.RDD.$anonfun$foreachPartition$2(RDD.scala:1020)
	at org.apache.spark.rdd.RDD.$anonfun$foreachPartition$2$adapted(RDD.scala:1020)
	at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2168)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
18:38:50.082 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 76.0 (TID 98) (amp-jenkins-worker-05.amp executor driver): org.h2.jdbc.JdbcBatchUpdateException: NULL not allowed for column "NAME"; SQL statement:
INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
	at org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1234)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.savePartition(JdbcUtils.scala:679)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.$anonfun$saveTable$1(JdbcUtils.scala:853)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.$anonfun$saveTable$1$adapted(JdbcUtils.scala:851)
	at org.apache.spark.rdd.RDD.$anonfun$foreachPartition$2(RDD.scala:1020)
	at org.apache.spark.rdd.RDD.$anonfun$foreachPartition$2$adapted(RDD.scala:1020)
	at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2168)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.h2.jdbc.JdbcSQLException: NULL not allowed for column "NAME"; SQL statement:
INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
	at org.h2.message.DbException.getJdbcSQLException(DbException.java:345)
	at org.h2.message.DbException.get(DbException.java:179)
	at org.h2.message.DbException.get(DbException.java:155)
	at org.h2.table.Column.validateConvertUpdateSequence(Column.java:345)
	at org.h2.table.Table.validateConvertUpdateSequence(Table.java:793)
	at org.h2.command.dml.Insert.insertRows(Insert.java:151)
	at org.h2.command.dml.Insert.update(Insert.java:114)
	at org.h2.command.CommandContainer.update(CommandContainer.java:101)
	at org.h2.command.Command.executeUpdate(Command.java:260)
	at org.h2.jdbc.JdbcPreparedStatement.executeUpdateInternal(JdbcPreparedStatement.java:164)
	at org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1215)
	... 14 more
org.h2.jdbc.JdbcSQLException: NULL not allowed for column "NAME"; SQL statement:
INSERT INTO TEST.PEOPLE1 ("NAME","THEID") VALUES (?,?) [23502-195]
	at org.h2.message.DbException.getJdbcSQLException(DbException.java:345)
	at org.h2.message.DbException.get(DbException.java:179)
	at org.h2.message.DbException.get(DbException.java:155)
	at org.h2.table.Column.validateConvertUpdateSequence(Column.java:345)
	at org.h2.table.Table.validateConvertUpdateSequence(Table.java:793)
	at org.h2.command.dml.Insert.insertRows(Insert.java:151)
	at org.h2.command.dml.Insert.update(Insert.java:114)
	at org.h2.command.CommandContainer.update(CommandContainer.java:101)
	at org.h2.command.Command.executeUpdate(Command.java:260)
	at org.h2.jdbc.JdbcPreparedStatement.executeUpdateInternal(JdbcPreparedStatement.java:164)
	at org.h2.jdbc.JdbcPreparedStatement.executeBatch(JdbcPreparedStatement.java:1215)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.savePartition(JdbcUtils.scala:679)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.$anonfun$saveTable$1(JdbcUtils.scala:853)
	at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.$anonfun$saveTable$1$adapted(JdbcUtils.scala:851)
	at org.apache.spark.rdd.RDD.$anonfun$foreachPartition$2(RDD.scala:1020)
	at org.apache.spark.rdd.RDD.$anonfun$foreachPartition$2$adapted(RDD.scala:1020)
	at org.apache.spark.SparkContext.$anonfun$runJob$5(SparkContext.scala:2168)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:131)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:484)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1426)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:487)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

18:38:50.082 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 76.0 failed 1 times; aborting job
[info] - SPARK-19726: INSERT null to a NOT NULL column (79 milliseconds)
[info] - SPARK-23856 Spark jdbc setQueryTimeout option !!! IGNORED !!!
[info] - metrics (170 milliseconds)
18:38:50.304 WARN org.apache.spark.sql.jdbc.JDBCWriteSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.jdbc.JDBCWriteSuite, thread names: shuffle-boss-2456-1, Generate Seed, rpc-boss-2453-1 =====

[info] ExplainSuiteAE:
[info] - Explain formatted (359 milliseconds)
18:38:50.733 WARN org.apache.spark.sql.ExplainSuiteAE: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.ExplainSuiteAE, thread names: shuffle-boss-2462-1, rpc-boss-2459-1, QueryStageCreator-119, QueryStageCreator-118, QueryStageCreator-120, QueryStageCreator-117 =====

[info] OuterJoinSuite:
[info] - basic left outer join using ShuffledHashJoin (137 milliseconds)
[info] - basic left outer join using BroadcastHashJoin (whole-stage-codegen off) (76 milliseconds)
[info] - basic left outer join using BroadcastHashJoin (whole-stage-codegen on) (57 milliseconds)
[info] - basic left outer join using SortMergeJoin (127 milliseconds)
[info] - basic left outer join using BroadcastNestedLoopJoin build left (91 milliseconds)
[info] - basic left outer join using BroadcastNestedLoopJoin build right (59 milliseconds)
[info] - basic right outer join using ShuffledHashJoin (61 milliseconds)
[info] - basic right outer join using BroadcastHashJoin (whole-stage-codegen off) (59 milliseconds)
[info] - basic right outer join using BroadcastHashJoin (whole-stage-codegen on) (55 milliseconds)
[info] - basic right outer join using SortMergeJoin (144 milliseconds)
[info] - basic right outer join using BroadcastNestedLoopJoin build left (44 milliseconds)
[info] - basic right outer join using BroadcastNestedLoopJoin build right (65 milliseconds)
[info] - basic full outer join using SortMergeJoin (88 milliseconds)
[info] - basic full outer join using BroadcastNestedLoopJoin build left (58 milliseconds)
[info] - basic full outer join using BroadcastNestedLoopJoin build right (55 milliseconds)
[info] - left outer join with both inputs empty using ShuffledHashJoin (52 milliseconds)
[info] - left outer join with both inputs empty using BroadcastHashJoin (whole-stage-codegen off) (34 milliseconds)
[info] - left outer join with both inputs empty using BroadcastHashJoin (whole-stage-codegen on) (47 milliseconds)
[info] - left outer join with both inputs empty using SortMergeJoin (55 milliseconds)
[info] - left outer join with both inputs empty using BroadcastNestedLoopJoin build left (48 milliseconds)
[info] - left outer join with both inputs empty using BroadcastNestedLoopJoin build right (38 milliseconds)
[info] - right outer join with both inputs empty using ShuffledHashJoin (55 milliseconds)
[info] - right outer join with both inputs empty using BroadcastHashJoin (whole-stage-codegen off) (37 milliseconds)
[info] - right outer join with both inputs empty using BroadcastHashJoin (whole-stage-codegen on) (47 milliseconds)
[info] - right outer join with both inputs empty using SortMergeJoin (43 milliseconds)
[info] - right outer join with both inputs empty using BroadcastNestedLoopJoin build left (34 milliseconds)
[info] - right outer join with both inputs empty using BroadcastNestedLoopJoin build right (48 milliseconds)
[info] - full outer join with both inputs empty using SortMergeJoin (45 milliseconds)
[info] - full outer join with both inputs empty using BroadcastNestedLoopJoin build left (69 milliseconds)
[info] - full outer join with both inputs empty using BroadcastNestedLoopJoin build right (50 milliseconds)
[info] - left outer join with unique keys using ShuffledHashJoin (55 milliseconds)
[info] - left outer join with unique keys using BroadcastHashJoin (whole-stage-codegen off) (43 milliseconds)
[info] - left outer join with unique keys using BroadcastHashJoin (whole-stage-codegen on) (52 milliseconds)
[info] - left outer join with unique keys using SortMergeJoin (101 milliseconds)
[info] - left outer join with unique keys using BroadcastNestedLoopJoin build left (65 milliseconds)
[info] - left outer join with unique keys using BroadcastNestedLoopJoin build right (57 milliseconds)
[info] - right outer join with unique keys using ShuffledHashJoin (49 milliseconds)
[info] - right outer join with unique keys using BroadcastHashJoin (whole-stage-codegen off) (67 milliseconds)
[info] - right outer join with unique keys using BroadcastHashJoin (whole-stage-codegen on) (71 milliseconds)
[info] - right outer join with unique keys using SortMergeJoin (597 milliseconds)
[info] - right outer join with unique keys using BroadcastNestedLoopJoin build left (49 milliseconds)
[info] - right outer join with unique keys using BroadcastNestedLoopJoin build right (82 milliseconds)
18:38:53.996 WARN org.apache.spark.sql.execution.joins.OuterJoinSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.joins.OuterJoinSuite, thread names: rpc-boss-2465-1, shuffle-boss-2468-1 =====

[info] RowQueueSuite:
[info] - in-memory queue (2 milliseconds)
[info] - disk queue (encryption = off) (4 milliseconds)
[info] - disk queue (encryption = on) (111 milliseconds)
[info] - hybrid queue (encryption = off) (6 milliseconds)
[info] - hybrid queue (encryption = on) (27 milliseconds)
[info] PlannerSuite:
[info] - count is partially aggregated (6 milliseconds)
[info] - count distinct is partially aggregated (3 milliseconds)
[info] - mixed aggregates are partially aggregated (4 milliseconds)
[info] - mixed aggregates with same distinct columns (79 milliseconds)
[info] - sizeInBytes estimation of limit operator for broadcast hash join optimization (131 milliseconds)
[info] - InMemoryRelation statistics propagation (158 milliseconds)
[info] - SPARK-11390 explain should print PushedFilters of PhysicalRDD (273 milliseconds)
[info] - efficient terminal limit -> sort should use TakeOrderedAndProject (28 milliseconds)
[info] - terminal limit -> project -> sort should use TakeOrderedAndProject (32 milliseconds)
[info] - terminal limits that are not handled by TakeOrderedAndProject should use CollectLimit (19 milliseconds)
[info] - TakeOrderedAndProject can appear in the middle of plans (30 milliseconds)
[info] - CollectLimit can appear in the middle of a plan when caching is used (27 milliseconds)
[info] - TakeOrderedAndProjectExec appears only when number of limit is below the threshold. (60 milliseconds)
[info] - PartitioningCollection (145 milliseconds)
[info] - collapse adjacent repartitions (17 milliseconds)
[info] - EnsureRequirements with child partitionings with different numbers of output partitions (4 milliseconds)
[info] - EnsureRequirements with compatible child partitionings that do not satisfy distribution (1 millisecond)
[info] - EnsureRequirements with compatible child partitionings that satisfy distribution (1 millisecond)
[info] - EnsureRequirements should not repartition if only ordering requirement is unsatisfied (1 millisecond)
[info] - EnsureRequirements eliminates Exchange if child has same partitioning (1 millisecond)
[info] - EnsureRequirements does not eliminate Exchange with different partitioning (0 milliseconds)
[info] - EnsureRequirements should respect ClusteredDistribution's num partitioning (1 millisecond)
[info] - Reuse exchanges (4 milliseconds)
[info] - EnsureRequirements skips sort when either side of join keys is required after inner SMJ (4 milliseconds)
[info] - EnsureRequirements skips sort when key order of a parent SMJ is propagated from its child SMJ (8 milliseconds)
[info] - EnsureRequirements for sort operator after left outer sort merge join (2 milliseconds)
[info] - EnsureRequirements for sort operator after right outer sort merge join (1 millisecond)
[info] - EnsureRequirements adds sort after full outer sort merge join (2 milliseconds)
[info] - EnsureRequirements adds sort when there is no existing ordering (0 milliseconds)
[info] - EnsureRequirements skips sort when required ordering is prefix of existing ordering (0 milliseconds)
[info] - EnsureRequirements skips sort when required ordering is semantically equal to existing ordering (0 milliseconds)
[info] - EnsureRequirements adds sort when required ordering isn't a prefix of existing ordering (0 milliseconds)
[info] - SPARK-24242: RangeExec should have correct output ordering and partitioning (41 milliseconds)
[info] - SPARK-24495: EnsureRequirements can return wrong plan when reusing the same key in join (1 millisecond)
[info] - SPARK-27485: EnsureRequirements.reorder should handle duplicate expressions (0 milliseconds)
[info] - SPARK-24500: create union with stream of children (34 milliseconds)
[info] - SPARK-25278: physical nodes should be different instances for same logical nodes (31 milliseconds)
[info] - SPARK-24556: always rewrite output partitioning in ReusedExchangeExec and InMemoryTableScanExec (216 milliseconds)
[info] - SPARK-26812: wrong nullability for complex datatypes in union (1 millisecond)
[info] - Do not analyze subqueries twice (42 milliseconds)
[info] - aliases in the project should not introduce extra shuffle (78 milliseconds)
[info] - aliases to expressions should not be replaced (85 milliseconds)
[info] - aliases in the aggregate expressions should not introduce extra shuffle (76 milliseconds)
[info] - aliases in the object hash/sort aggregate expressions should not introduce extra shuffle (216 milliseconds)
[info] - aliases in the sort aggregate expressions should not introduce extra sort (87 milliseconds)
[info] - Change the number of partitions to zero when a range is empty (whole-stage-codegen off) (14 milliseconds)
[info] - Change the number of partitions to zero when a range is empty (whole-stage-codegen on) (24 milliseconds)
18:38:56.529 WARN org.apache.spark.sql.execution.PlannerSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.PlannerSuite, thread names: shuffle-boss-2474-1, rpc-boss-2471-1 =====

[info] OrcColumnarBatchReaderSuite:
[info] - all partitions are requested: struct<col1:int,col2:int> (1 millisecond)
[info] - initBatch should initialize requested partition columns only: struct<col1:int,col2:int> (1 millisecond)
[info] - all partitions are requested: struct<col1:int,col2:int,p1:string,p2:string> (0 milliseconds)
[info] - initBatch should initialize requested partition columns only: struct<col1:int,col2:int,p1:string,p2:string> (1 millisecond)
[info] - all partitions are requested: struct<col1:int,col2:int,p1:string> (0 milliseconds)
[info] - initBatch should initialize requested partition columns only: struct<col1:int,col2:int,p1:string> (0 milliseconds)
[info] - all partitions are requested: struct<col1:int,col2:int,p2:string> (1 millisecond)
[info] - initBatch should initialize requested partition columns only: struct<col1:int,col2:int,p2:string> (0 milliseconds)
18:38:56.619 WARN org.apache.spark.sql.execution.datasources.orc.OrcColumnarBatchReaderSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.execution.datasources.orc.OrcColumnarBatchReaderSuite, thread names: shuffle-boss-2480-1, rpc-boss-2477-1 =====

[info] Test run started
[info] Test test.org.apache.spark.sql.JavaUDFSuite.udf1Test started
[info] Test test.org.apache.spark.sql.JavaUDFSuite.udf2Test started
[info] Test test.org.apache.spark.sql.JavaUDFSuite.udf3Test started
[info] Test test.org.apache.spark.sql.JavaUDFSuite.udf4Test started
[info] Test test.org.apache.spark.sql.JavaUDFSuite.udf5Test started
[info] Test test.org.apache.spark.sql.JavaUDFSuite.udf6Test started
[info] Test test.org.apache.spark.sql.JavaUDFSuite.udf7Test started
[info] Test run finished: 0 failed, 0 ignored, 7 total, 1.213s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaDataFrameReaderWriterSuite.testFormatAPI started
[info] Test test.org.apache.spark.sql.JavaDataFrameReaderWriterSuite.testTextAPI started
18:38:58.058 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test test.org.apache.spark.sql.JavaDataFrameReaderWriterSuite.testJsonAPI started
18:38:58.363 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test test.org.apache.spark.sql.JavaDataFrameReaderWriterSuite.testLoadAPI started
[info] Test test.org.apache.spark.sql.JavaDataFrameReaderWriterSuite.testOptionsAPI started
18:38:58.802 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test test.org.apache.spark.sql.JavaDataFrameReaderWriterSuite.testSaveModeAPI started
[info] Test test.org.apache.spark.sql.JavaDataFrameReaderWriterSuite.testCsvAPI started
18:38:58.978 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test test.org.apache.spark.sql.JavaDataFrameReaderWriterSuite.testParquetAPI started
18:38:59.225 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test test.org.apache.spark.sql.JavaDataFrameReaderWriterSuite.testTextFileAPI started
18:38:59.527 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test run finished: 0 failed, 0 ignored, 9 total, 1.653s
[info] Test run started
[info] Test test.org.apache.spark.sql.Java8DatasetAggregatorSuite.testTypedAggregationCount started
[info] Test test.org.apache.spark.sql.Java8DatasetAggregatorSuite.testTypedAggregationSumDouble started
[info] Test test.org.apache.spark.sql.Java8DatasetAggregatorSuite.testTypedAggregationSumLong started
[info] Test test.org.apache.spark.sql.Java8DatasetAggregatorSuite.testTypedAggregationAverage started
[info] Test run finished: 0 failed, 0 ignored, 4 total, 2.196s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaUDAFSuite.udf1Test started
[info] Test run finished: 0 failed, 0 ignored, 1 total, 0.267s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaDatasetAggregatorSuite.testTypedAggregationCount started
[info] Test test.org.apache.spark.sql.JavaDatasetAggregatorSuite.testTypedAggregationSumDouble started
[info] Test test.org.apache.spark.sql.JavaDatasetAggregatorSuite.testTypedAggregationSumLong started
[info] Test test.org.apache.spark.sql.JavaDatasetAggregatorSuite.testTypedAggregationAnonClass started
[info] Test test.org.apache.spark.sql.JavaDatasetAggregatorSuite.testTypedAggregationAverage started
[info] Test run finished: 0 failed, 0 ignored, 5 total, 2.352s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaApplySchemaSuite.applySchema started
[info] Test test.org.apache.spark.sql.JavaApplySchemaSuite.dataFrameRDDOperations started
[info] Test test.org.apache.spark.sql.JavaApplySchemaSuite.applySchemaToJSON started
[info] Test run finished: 0 failed, 0 ignored, 3 total, 0.669s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaColumnExpressionSuite.isInCollectionCheckExceptionMessage started
[info] Test test.org.apache.spark.sql.JavaColumnExpressionSuite.isInCollectionWorksCorrectlyOnJava started
[info] Test run finished: 0 failed, 0 ignored, 2 total, 0.297s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testRuntimeNullabilityCheck started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testCircularReferenceBean1 started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testCircularReferenceBean2 started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testCircularReferenceBean3 started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testSerializeNull started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testRandomSplit started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testTypedFilterPreservingSchema started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testLocalDateAndInstantEncoders started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testJoin started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testTake started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testToLocalIterator started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testSpecificLists started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testForeach started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testJavaEncoder started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testNonNullField started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testPrimitiveEncoder started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testEmptyBean started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testCommonOperation started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testNullInTopLevelBean started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testGroupBy started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testSetOperation started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testBeanWithEnum started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testKryoEncoder started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.test started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testJavaBeanEncoder2 started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testCollect started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testKryoEncoderErrorMessageForPrivateClass started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testJavaBeanEncoder started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testTupleEncoder started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testNestedTupleEncoder started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testTupleEncoderSchema started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testReduce started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testSelect started
[info] Test test.org.apache.spark.sql.JavaDatasetSuite.testJavaEncoderErrorMessageForPrivateClass started
[info] Test run finished: 0 failed, 0 ignored, 34 total, 16.277s
[info] Test run started
[info] Test test.org.apache.spark.sql.streaming.JavaDataStreamReaderWriterSuite.testForeachBatchAPI started
18:39:21.732 WARN org.apache.spark.sql.streaming.StreamingQueryManager: Temporary checkpoint location created which is deleted normally when the query didn't fail: /home/jenkins/workspace/SparkPullRequestBuilder@3/target/tmp/temporary-f4c8c1c7-17f7-4d9d-b150-c422ae9df980. If it's required to delete it under any circumstances, please set spark.sql.streaming.forceDeleteTempCheckpointLocation to true. Important to know deleting temp checkpoint folder is best effort.
[info] Test test.org.apache.spark.sql.streaming.JavaDataStreamReaderWriterSuite.testForeachAPI started
18:39:21.849 WARN org.apache.spark.sql.streaming.StreamingQueryManager: Temporary checkpoint location created which is deleted normally when the query didn't fail: /home/jenkins/workspace/SparkPullRequestBuilder@3/target/tmp/temporary-4cd47ba3-c78b-40c1-bcb1-1fa3bb15aa44. If it's required to delete it under any circumstances, please set spark.sql.streaming.forceDeleteTempCheckpointLocation to true. Important to know deleting temp checkpoint folder is best effort.
[info] Test run finished: 0 failed, 0 ignored, 2 total, 0.226s
[info] Test run started
[info] Test test.org.apache.spark.sql.execution.sort.RecordBinaryComparatorSuite.testBinaryComparatorForSingleColumnRow started
[info] Test test.org.apache.spark.sql.execution.sort.RecordBinaryComparatorSuite.testBinaryComparatorForArrayColumn started
[info] Test test.org.apache.spark.sql.execution.sort.RecordBinaryComparatorSuite.testBinaryComparatorWhenOnlyTheLastColumnDiffers started
[info] Test test.org.apache.spark.sql.execution.sort.RecordBinaryComparatorSuite.testBinaryComparatorForMixedColumns started
[info] Test test.org.apache.spark.sql.execution.sort.RecordBinaryComparatorSuite.testCompareLongsAsUnsigned started
[info] Test test.org.apache.spark.sql.execution.sort.RecordBinaryComparatorSuite.testCompareLongsAsLittleEndian started
[info] Test test.org.apache.spark.sql.execution.sort.RecordBinaryComparatorSuite.testBinaryComparatorForNullColumns started
[info] Test test.org.apache.spark.sql.execution.sort.RecordBinaryComparatorSuite.testBinaryComparatorForMultipleColumnRow started
[info] Test test.org.apache.spark.sql.execution.sort.RecordBinaryComparatorSuite.testBinaryComparatorWhenSubtractionIsDivisibleByMaxIntValue started
[info] Test test.org.apache.spark.sql.execution.sort.RecordBinaryComparatorSuite.testBinaryComparatorWhenSubtractionCanOverflowLongValue started
[info] Test run finished: 0 failed, 0 ignored, 10 total, 0.008s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaSaveLoadSuite.saveAndLoadWithSchema started
[info] Test test.org.apache.spark.sql.JavaSaveLoadSuite.saveAndLoad started
[info] Test run finished: 0 failed, 0 ignored, 2 total, 1.053s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaHigherOrderFunctionsSuite.testMapZipWith started
[info] Test test.org.apache.spark.sql.JavaHigherOrderFunctionsSuite.testTransformValues started
[info] Test test.org.apache.spark.sql.JavaHigherOrderFunctionsSuite.testZipWith started
[info] Test test.org.apache.spark.sql.JavaHigherOrderFunctionsSuite.testTransformKeys started
[info] Test test.org.apache.spark.sql.JavaHigherOrderFunctionsSuite.testAggregate started
[info] Test test.org.apache.spark.sql.JavaHigherOrderFunctionsSuite.testMapFilter started
[info] Test test.org.apache.spark.sql.JavaHigherOrderFunctionsSuite.testExists started
[info] Test test.org.apache.spark.sql.JavaHigherOrderFunctionsSuite.testFilter started
[info] Test test.org.apache.spark.sql.JavaHigherOrderFunctionsSuite.testForall started
[info] Test test.org.apache.spark.sql.JavaHigherOrderFunctionsSuite.testTransform started
[info] Test run finished: 0 failed, 0 ignored, 10 total, 1.01s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaBeanDeserializationSuite.testBeanWithArrayFieldDeserialization started
[info] Test test.org.apache.spark.sql.JavaBeanDeserializationSuite.testSpark22000FailToUpcast started
[info] Test test.org.apache.spark.sql.JavaBeanDeserializationSuite.testSpark22000 started
[info] Test test.org.apache.spark.sql.JavaBeanDeserializationSuite.testBeanWithLocalDateAndInstant started
[info] Test test.org.apache.spark.sql.JavaBeanDeserializationSuite.testBeanWithMapFieldsDeserialization started
[info] Test run finished: 0 failed, 0 ignored, 5 total, 0.611s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testCollectAndTake started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testJsonRDDToDataFrame started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testVarargMethods started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testBeanWithoutGetter started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testCreateStructTypeFromList started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testSampleBy started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testCrosstab started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testUDF started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testCreateDataFromFromList started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testCircularReferenceBean started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testFrequentItems started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testSampleByColumn started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testExecution started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testTextLoad started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.pivot started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testGenericLoad started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testCountMinSketch started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.pivotColumnValues started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testCreateDataFrameFromJavaBeans started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testCorrelation started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testBloomFilter started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testCovariance started
[info] Test test.org.apache.spark.sql.JavaDataFrameSuite.testCreateDataFrameFromLocalJavaBeans started
[info] Test run finished: 0 failed, 0 ignored, 23 total, 11.135s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaDataFrameWriterV2Suite.testOverwritePartitionsAPI started
18:39:35.815 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:35.854 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test test.org.apache.spark.sql.JavaDataFrameWriterV2Suite.testReplaceAPI started
18:39:35.991 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.027 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.059 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.092 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.212 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test test.org.apache.spark.sql.JavaDataFrameWriterV2Suite.testAppendAPI started
18:39:36.329 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.373 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test test.org.apache.spark.sql.JavaDataFrameWriterV2Suite.testCreateOrReplaceAPI started
18:39:36.523 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.556 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.585 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.617 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.648 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test test.org.apache.spark.sql.JavaDataFrameWriterV2Suite.testOverwriteAPI started
18:39:36.794 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.830 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test test.org.apache.spark.sql.JavaDataFrameWriterV2Suite.testCreateAPI started
18:39:36.945 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:36.996 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:37.043 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:37.090 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
18:39:37.171 WARN org.apache.spark.sql.execution.datasources.DataSource: All paths were ignored:
  
[info] Test run finished: 0 failed, 0 ignored, 6 total, 1.532s
[info] Test run started
[info] Test test.org.apache.spark.sql.JavaRowSuite.constructSimpleRow started
[info] Test test.org.apache.spark.sql.JavaRowSuite.constructComplexRow started
[info] Test run finished: 0 failed, 0 ignored, 2 total, 0.003s
[info] ScalaTest
[info] Run completed in 1 hour, 33 minutes, 22 seconds.
[info] Total number of tests run: 8686
[info] Suites: completed 368, aborted 0
[info] Tests: succeeded 8682, failed 4, canceled 1, ignored 52, pending 0
[info] *** 4 TESTS FAILED ***
[error] Failed: Total 8811, Failed 4, Errors 0, Passed 8807, Ignored 52, Canceled 1
[error] Failed tests:
[error] 	org.apache.spark.sql.execution.streaming.state.StateStoreCompatibleSuite
[error] (sql / Test / test) sbt.TestsFailedException: Tests unsuccessful
[error] Total time: 5608 s (01:33:28), completed Oct 28, 2020 6:39:43 PM
[error] running /home/jenkins/workspace/SparkPullRequestBuilder@3/build/sbt -Phadoop-3.2 -Phive-2.3 -Phive-thriftserver -Phive -Dtest.exclude.tags=org.apache.spark.tags.ExtendedHiveTest,org.apache.spark.tags.ExtendedYarnTest hive-thriftserver/test sql-kafka-0-10/test sql/test repl/test mllib/test examples/test catalyst/test avro/test hive/test ; received return code 1
Attempting to post to Github...
 > Post successful.
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Test FAILed.
Refer to this link for build results (access rights to CI server needed): 
https://amplab.cs.berkeley.edu/jenkins//job/SparkPullRequestBuilder/130385/
Test FAILed.
Finished: FAILURE