Console Output

Skipping 25,191 KB.. Full Log
00:02:10.152 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13682.0 (TID 25468)
org.apache.spark.SparkException: Exception thrown in awaitResult: 
	at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:302)
	at org.apache.spark.util.ThreadUtils$.parmap(ThreadUtils.scala:376)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.readOrcSchemasInParallel(OrcFileOperator.scala:112)
	at org.apache.spark.sql.hive.orc.HiveOrcSourceSuite.$anonfun$new$19(HiveOrcSourceSuite.scala:171)
	at org.apache.spark.sql.hive.orc.HiveOrcSourceSuite.$anonfun$new$19$adapted(HiveOrcSourceSuite.scala:171)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$2(SchemaMergeUtils.scala:73)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Could not read footer for file: file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-b1f67205-c537-49e0-9dbf-85890cceca2f/third/part-00000-235ddba1-3ad0-4f06-920e-8a937cf70fe3-c000.json
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:83)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at scala.collection.TraversableOnce.collectFirst(TraversableOnce.scala:148)
	at scala.collection.TraversableOnce.collectFirst$(TraversableOnce.scala:135)
	at scala.collection.AbstractIterator.collectFirst(Iterator.scala:1429)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.getFileReader(OrcFileOperator.scala:87)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$readOrcSchemasInParallel$1(OrcFileOperator.scala:114)
	at org.apache.spark.util.ThreadUtils$.$anonfun$parmap$2(ThreadUtils.scala:373)
	at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:659)
	at scala.util.Success.$anonfun$map$1(Try.scala:255)
	at scala.util.Success.map(Try.scala:213)
	at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
	at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
	at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
	at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
	at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
Caused by: org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-b1f67205-c537-49e0-9dbf-85890cceca2f/third/part-00000-235ddba1-3ad0-4f06-920e-8a937cf70fe3-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:275)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:582)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:370)
	at org.apache.hadoop.hive.ql.io.orc.ReaderImpl.<init>(ReaderImpl.java:63)
	at org.apache.hadoop.hive.ql.io.orc.OrcFile.createReader(OrcFile.java:55)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:76)
	... 19 more
00:02:10.154 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13682.0 (TID 25468, amp-jenkins-worker-04.amp, executor driver): org.apache.spark.SparkException: Exception thrown in awaitResult: 
	at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:302)
	at org.apache.spark.util.ThreadUtils$.parmap(ThreadUtils.scala:376)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.readOrcSchemasInParallel(OrcFileOperator.scala:112)
	at org.apache.spark.sql.hive.orc.HiveOrcSourceSuite.$anonfun$new$19(HiveOrcSourceSuite.scala:171)
	at org.apache.spark.sql.hive.orc.HiveOrcSourceSuite.$anonfun$new$19$adapted(HiveOrcSourceSuite.scala:171)
	at org.apache.spark.sql.execution.datasources.SchemaMergeUtils$.$anonfun$mergeSchemasInParallel$2(SchemaMergeUtils.scala:73)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2(RDD.scala:837)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitions$2$adapted(RDD.scala:837)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.SparkException: Could not read footer for file: file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-b1f67205-c537-49e0-9dbf-85890cceca2f/third/part-00000-235ddba1-3ad0-4f06-920e-8a937cf70fe3-c000.json
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:83)
	at scala.collection.Iterator$$anon$10.next(Iterator.scala:459)
	at scala.collection.TraversableOnce.collectFirst(TraversableOnce.scala:148)
	at scala.collection.TraversableOnce.collectFirst$(TraversableOnce.scala:135)
	at scala.collection.AbstractIterator.collectFirst(Iterator.scala:1429)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.getFileReader(OrcFileOperator.scala:87)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$readOrcSchemasInParallel$1(OrcFileOperator.scala:114)
	at org.apache.spark.util.ThreadUtils$.$anonfun$parmap$2(ThreadUtils.scala:373)
	at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:659)
	at scala.util.Success.$anonfun$map$1(Try.scala:255)
	at scala.util.Success.map(Try.scala:213)
	at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
	at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
	at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
	at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
	at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
	at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
	at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
	at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
	at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
Caused by: org.apache.orc.FileFormatException: Malformed ORC file file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-b1f67205-c537-49e0-9dbf-85890cceca2f/third/part-00000-235ddba1-3ad0-4f06-920e-8a937cf70fe3-c000.json. Invalid postscript.
	at org.apache.orc.impl.ReaderImpl.ensureOrcFooter(ReaderImpl.java:275)
	at org.apache.orc.impl.ReaderImpl.extractFileTail(ReaderImpl.java:582)
	at org.apache.orc.impl.ReaderImpl.<init>(ReaderImpl.java:370)
	at org.apache.hadoop.hive.ql.io.orc.ReaderImpl.<init>(ReaderImpl.java:63)
	at org.apache.hadoop.hive.ql.io.orc.OrcFile.createReader(OrcFile.java:55)
	at org.apache.spark.sql.hive.orc.OrcFileOperator$.$anonfun$getFileReader$3(OrcFileOperator.scala:76)
	... 19 more

00:02:10.155 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13682.0 failed 1 times; aborting job
[info] - SPARK-11412 read and merge orc schemas in parallel (2 seconds, 96 milliseconds)
00:02:18.967 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:19.173 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:19.468 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:19.683 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:20.194 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:20.381 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:20.969 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:20.992 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:21.599 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:21.622 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:21.931 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
00:02:22.043 ERROR org.apache.hadoop.hive.ql.io.AcidUtils: Failed to get files with ID; using regular API: Only supported for DFS; got class org.apache.hadoop.hive.ql.io.ProxyLocalFileSystem
[info] - SPARK-25993 CREATE EXTERNAL TABLE with subdirectories (12 seconds, 873 milliseconds)
[info] - SPARK-31580: Read a file written before ORC-569 (291 milliseconds)
00:02:23.525 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
00:02:23.563 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
00:02:23.564 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
00:02:23.649 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
00:02:23.676 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
00:02:23.676 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] HiveExplainSuite:
00:02:23.724 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59cfedc6-9b17-41de-b8bc-b778a46d748c/src specified for non-external table:src
[info] - show cost in explain command (1 second, 260 milliseconds)
[info] - explain extended command (199 milliseconds)
[info] - explain create table command (221 milliseconds)
[info] - explain output of physical plan should contain proper codegen stage ID (123 milliseconds)
[info] - EXPLAIN CODEGEN command (149 milliseconds)
00:02:25.658 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59cfedc6-9b17-41de-b8bc-b778a46d748c/tab specified for non-external table:tab
[info] - SPARK-23034 show relation names in Hive table scan nodes (231 milliseconds)
[info] - SPARK-26661: Show actual class name of the writing command in CTAS explain (73 milliseconds)
00:02:26.793 WARN org.apache.spark.sql.hive.test.TestHiveExternalCatalog: Couldn't find corresponding Hive SerDe for data source provider json. Persisting data source table `default`.`t` into Hive metastore in Spark SQL specific format, which is NOT compatible with Hive.
== Physical Plan ==
*(1) Sort [i#195296 ASC NULLS FIRST], true, 0
+- Exchange rangepartitioning(i#195296 ASC NULLS FIRST, 5), true, [id=#170612]
   +- FileScan json default.t[i#195296,j#195297] Batched: false, DataFilters: [], Format: JSON, Location: CatalogFileIndex[file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59c..., PartitionFilters: [], PushedFilters: [], ReadSchema: struct<i:int>, SelectedBucketsCount: 4 out of 4


00:02:27.703 WARN org.apache.spark.sql.hive.test.TestHiveExternalCatalog: Couldn't find corresponding Hive SerDe for data source provider json. Persisting data source table `default`.`t` into Hive metastore in Spark SQL specific format, which is NOT compatible with Hive.
== Physical Plan ==
*(1) Sort [i#195304 ASC NULLS FIRST], true, 0
+- Exchange rangepartitioning(i#195304 ASC NULLS FIRST, 5), true, [id=#170644]
   +- FileScan json default.t[i#195304,j#195305] Batched: false, DataFilters: [], Format: JSON, Location: CatalogFileIndex[file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59c..., PartitionFilters: [], PushedFilters: [], ReadSchema: struct<i:int>, SelectedBucketsCount: 4 out of 4


[info] - SPARK-28595: explain should not trigger partition listing (2 seconds, 224 milliseconds)
00:02:28.224 WARN org.apache.hadoop.hive.common.FileUtils: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59cfedc6-9b17-41de-b8bc-b778a46d748c/src does not exist; Force to delete it.
00:02:28.224 ERROR org.apache.hadoop.hive.common.FileUtils: Failed to delete file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59cfedc6-9b17-41de-b8bc-b778a46d748c/src
00:02:28.281 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
00:02:28.283 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
00:02:28.283 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
00:02:28.415 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
00:02:28.415 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
00:02:28.415 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] HiveMetadataCacheSuite:
00:02:29.406 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13714.0 (TID 25512)
java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59cfedc6-9b17-41de-b8bc-b778a46d748c/view_table/part-00000-2aa4fc4d-d124-4e16-9a69-a9916751e3e2-c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:72)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(generated.java:66)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:101)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
00:02:29.409 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13714.0 (TID 25512, amp-jenkins-worker-04.amp, executor driver): java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59cfedc6-9b17-41de-b8bc-b778a46d748c/view_table/part-00000-2aa4fc4d-d124-4e16-9a69-a9916751e3e2-c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:72)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(generated.java:66)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:101)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

00:02:29.409 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13714.0 failed 1 times; aborting job
00:02:29.857 WARN org.apache.spark.sql.execution.datasources.InMemoryFileIndex: The directory file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59cfedc6-9b17-41de-b8bc-b778a46d748c/view_table was not found. Was it deleted very recently?
[info] - SPARK-16337 temporary view refresh (1 second, 403 milliseconds)
00:02:30.997 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13721.0 (TID 25520)
java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59cfedc6-9b17-41de-b8bc-b778a46d748c/view_table/part-00000-d3ac22a3-284a-4593-9060-1747b346c040-c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:72)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(generated.java:66)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:101)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
00:02:30.999 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13721.0 (TID 25520, amp-jenkins-worker-04.amp, executor driver): java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59cfedc6-9b17-41de-b8bc-b778a46d748c/view_table/part-00000-d3ac22a3-284a-4593-9060-1747b346c040-c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:72)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(generated.java:66)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:101)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

00:02:30.999 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13721.0 failed 1 times; aborting job
00:02:31.628 WARN org.apache.spark.sql.execution.command.DropTableCommand: org.apache.spark.sql.AnalysisException: Table or view not found: view_table; line 1 pos 14;
'SubqueryAlias spark_catalog.default.view_refresh
+- View (`default`.`view_refresh`, [id#195371L])
   +- 'Project [*]
      +- 'Filter ('id > -1)
         +- 'UnresolvedRelation [view_table]

org.apache.spark.sql.AnalysisException: Table or view not found: view_table; line 1 pos 14;
'SubqueryAlias spark_catalog.default.view_refresh
+- View (`default`.`view_refresh`, [id#195371L])
   +- 'Project [*]
      +- 'Filter ('id > -1)
         +- 'UnresolvedRelation [view_table]

	at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis$1(CheckAnalysis.scala:106)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis$1$adapted(CheckAnalysis.scala:92)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:177)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:92)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:89)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:130)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:156)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:153)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.$anonfun$analyzed$1(TestHive.scala:619)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.analyzed$lzycompute(TestHive.scala:593)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.analyzed(TestHive.scala:593)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:58)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:91)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89)
	at org.apache.spark.sql.SparkSession.table(SparkSession.scala:589)
	at org.apache.spark.sql.execution.command.DropTableCommand.run(ddl.scala:240)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:79)
	at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:229)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3616)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:100)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:87)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3614)
	at org.apache.spark.sql.Dataset.<init>(Dataset.scala:229)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:92)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89)
	at org.apache.spark.sql.hive.test.TestHiveSparkSession.$anonfun$sql$1(TestHive.scala:239)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.hive.test.TestHiveSparkSession.sql(TestHive.scala:237)
	at org.apache.spark.sql.test.SQLTestUtilsBase.$anonfun$withView$2(SQLTestUtils.scala:317)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:38)
	at org.apache.spark.sql.test.SQLTestUtilsBase.$anonfun$withView$1(SQLTestUtils.scala:316)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1386)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withView(SQLTestUtils.scala:316)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withView$(SQLTestUtils.scala:314)
	at org.apache.spark.sql.hive.HiveMetadataCacheSuite.withView(HiveMetadataCacheSuite.scala:32)
	at org.apache.spark.sql.hive.HiveMetadataCacheSuite.checkRefreshView(HiveMetadataCacheSuite.scala:44)
	at org.apache.spark.sql.hive.HiveMetadataCacheSuite.$anonfun$new$2(HiveMetadataCacheSuite.scala:39)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:151)
	at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
	at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:286)
	at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
	at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:58)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:58)
	at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:393)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:381)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:376)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:458)
	at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
	at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
	at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
	at org.scalatest.Suite.run(Suite.scala:1124)
	at org.scalatest.Suite.run$(Suite.scala:1106)
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
	at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:518)
	at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
	at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:58)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:58)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:317)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:510)
	at sbt.ForkMain$Run$2.call(ForkMain.java:296)
	at sbt.ForkMain$Run$2.call(ForkMain.java:286)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
[info] - view refresh (1 second, 813 milliseconds)
00:02:33.872 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13728.0 (TID 25526)
java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-e67f1694-a91e-4a9b-9222-37ef920bf5f6/f1=4/f2=4/part-00000-934e9565-30dc-41e2-b40e-37d9fe337308.c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:56)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(generated.java:50)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:86)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
00:02:33.875 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13728.0 (TID 25526, amp-jenkins-worker-04.amp, executor driver): java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-e67f1694-a91e-4a9b-9222-37ef920bf5f6/f1=4/f2=4/part-00000-934e9565-30dc-41e2-b40e-37d9fe337308.c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:56)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(generated.java:50)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:86)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

00:02:33.876 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13728.0 failed 1 times; aborting job
00:02:34.745 WARN org.apache.spark.storage.BlockManager: Putting block rdd_55588_0 failed due to exception java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-e67f1694-a91e-4a9b-9222-37ef920bf5f6/f1=0/f2=0/part-00000-934e9565-30dc-41e2-b40e-37d9fe337308.c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved..
00:02:34.745 WARN org.apache.spark.storage.BlockManager: Block rdd_55588_0 could not be removed as it was not found on disk or in memory
00:02:34.746 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13732.0 (TID 25529)
java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-e67f1694-a91e-4a9b-9222-37ef920bf5f6/f1=0/f2=0/part-00000-934e9565-30dc-41e2-b40e-37d9fe337308.c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:29)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:43)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at org.apache.spark.sql.execution.columnar.CachedRDDBuilder$$anon$1.hasNext(InMemoryRelation.scala:132)
	at org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:221)
	at org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:299)
	at org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1371)
	at org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1298)
	at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1362)
	at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1186)
	at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:360)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:311)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
00:02:34.748 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13732.0 (TID 25529, amp-jenkins-worker-04.amp, executor driver): java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-e67f1694-a91e-4a9b-9222-37ef920bf5f6/f1=0/f2=0/part-00000-934e9565-30dc-41e2-b40e-37d9fe337308.c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:29)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:43)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at org.apache.spark.sql.execution.columnar.CachedRDDBuilder$$anon$1.hasNext(InMemoryRelation.scala:132)
	at org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:221)
	at org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:299)
	at org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1371)
	at org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1298)
	at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1362)
	at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1186)
	at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:360)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:311)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

00:02:34.748 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13732.0 failed 1 times; aborting job
[info] - partitioned table is cached when partition pruning is true (3 seconds, 787 milliseconds)
[info] - SPARK-30049 Should not complain for quotes in commented lines (29 seconds, 212 milliseconds)
00:02:38.951 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13739.0 (TID 25535)
java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-15486e47-b35d-4e75-bd37-76e174aec3fe/f1=0/f2=0/part-00000-9967b7a6-ea39-48cb-bbe0-8745a53829a3.c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:56)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(generated.java:38)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:86)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
00:02:38.954 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13739.0 (TID 25535, amp-jenkins-worker-04.amp, executor driver): java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-15486e47-b35d-4e75-bd37-76e174aec3fe/f1=0/f2=0/part-00000-9967b7a6-ea39-48cb-bbe0-8745a53829a3.c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:56)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.agg_doAggregateWithoutKey_0$(generated.java:38)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:86)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
	at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:132)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

00:02:38.954 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13739.0 failed 1 times; aborting job
00:02:40.133 WARN org.apache.spark.storage.BlockManager: Putting block rdd_55639_0 failed due to exception java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-15486e47-b35d-4e75-bd37-76e174aec3fe/f1=1/f2=1/part-00000-9967b7a6-ea39-48cb-bbe0-8745a53829a3.c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved..
00:02:40.133 WARN org.apache.spark.storage.BlockManager: Block rdd_55639_0 could not be removed as it was not found on disk or in memory
00:02:40.133 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13743.0 (TID 25538)
java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-15486e47-b35d-4e75-bd37-76e174aec3fe/f1=1/f2=1/part-00000-9967b7a6-ea39-48cb-bbe0-8745a53829a3.c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:29)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:43)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at org.apache.spark.sql.execution.columnar.CachedRDDBuilder$$anon$1.hasNext(InMemoryRelation.scala:132)
	at org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:221)
	at org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:299)
	at org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1371)
	at org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1298)
	at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1362)
	at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1186)
	at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:360)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:311)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
00:02:40.135 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13743.0 (TID 25538, amp-jenkins-worker-04.amp, executor driver): java.io.FileNotFoundException: File file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/spark-15486e47-b35d-4e75-bd37-76e174aec3fe/f1=1/f2=1/part-00000-9967b7a6-ea39-48cb-bbe0-8745a53829a3.c000.snappy.parquet does not exist
It is possible the underlying files have been updated. You can explicitly invalidate the cache in Spark by running 'REFRESH TABLE tableName' command in SQL or by recreating the Dataset/DataFrame involved.
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.org$apache$spark$sql$execution$datasources$FileScanRDD$$anon$$readCurrentFile(FileScanRDD.scala:124)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.nextIterator(FileScanRDD.scala:169)
	at org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:93)
	at org.apache.spark.sql.execution.FileSourceScanExec$$anon$1.hasNext(DataSourceScanExec.scala:490)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(generated.java:29)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:43)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:729)
	at org.apache.spark.sql.execution.columnar.CachedRDDBuilder$$anon$1.hasNext(InMemoryRelation.scala:132)
	at org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:221)
	at org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:299)
	at org.apache.spark.storage.BlockManager.$anonfun$doPutIterator$1(BlockManager.scala:1371)
	at org.apache.spark.storage.BlockManager.org$apache$spark$storage$BlockManager$$doPut(BlockManager.scala:1298)
	at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1362)
	at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:1186)
	at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:360)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:311)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:349)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:313)
	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:444)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:447)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

00:02:40.135 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13743.0 failed 1 times; aborting job
[info] - partitioned table is cached when partition pruning is false (5 seconds, 150 milliseconds)
00:02:40.731 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
00:02:40.731 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
00:02:40.731 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
00:02:40.866 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
00:02:40.866 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
00:02:40.867 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] OptimizeHiveMetadataOnlyQuerySuite:
00:02:40.886 WARN org.apache.hadoop.hive.metastore.HiveMetaStore: Location: file:/home/jenkins/workspace/NewSparkPullRequestBuilder/target/tmp/warehouse-59cfedc6-9b17-41de-b8bc-b778a46d748c/metadata_only specified for non-external table:metadata_only
00:02:42.878 WARN org.apache.spark.sql.internal.WithTestConf$$anon$4: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.015 WARN org.apache.spark.sql.execution.OptimizeMetadataOnlyQuery: Since configuration `spark.sql.optimizer.metadataOnly` is enabled, Spark will scan partition-level metadata without scanning data files. This could result in wrong results when the partition metadata exists but the inclusive data files are empty.
00:02:43.273 WARN org.apache.spark.sql.internal.WithTestConf$$anon$4: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
[info] - SPARK-23877: validate metadata-only query pushes filters to metastore (397 milliseconds)
00:02:43.274 WARN org.apache.spark.sql.internal.WithTestConf$$anon$4: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.395 WARN org.apache.spark.sql.execution.OptimizeMetadataOnlyQuery: Since configuration `spark.sql.optimizer.metadataOnly` is enabled, Spark will scan partition-level metadata without scanning data files. This could result in wrong results when the partition metadata exists but the inclusive data files are empty.
00:02:43.590 WARN org.apache.spark.sql.internal.SQLConf: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.590 WARN org.apache.spark.sql.internal.SQLConf: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.617 WARN org.apache.spark.sql.internal.SQLConf: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.617 WARN org.apache.spark.sql.internal.SQLConf: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.642 WARN org.apache.spark.sql.internal.SQLConf: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.642 WARN org.apache.spark.sql.internal.SQLConf: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.667 WARN org.apache.spark.sql.internal.SQLConf: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.667 WARN org.apache.spark.sql.internal.SQLConf: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.693 WARN org.apache.spark.sql.internal.SQLConf: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.693 WARN org.apache.spark.sql.internal.SQLConf: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
00:02:43.819 WARN org.apache.spark.sql.internal.WithTestConf$$anon$4: The SQL config 'spark.sql.optimizer.metadataOnly' has been deprecated in Spark v3.0 and may be removed in the future. Avoid to depend on this optimization to prevent a potential correctness issue. If you must use, use 'SparkSessionExtensions' instead to inject it as a custom rule.
[info] - SPARK-23877: filter on projected expression (546 milliseconds)
00:02:44.201 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
00:02:44.201 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
00:02:44.201 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
00:02:44.382 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
00:02:44.383 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
00:02:44.383 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] ParquetHadoopFsRelationSuite:
[info] - SPARK-30049 Should not complain for quotes in commented with multi-lines (52 seconds, 187 milliseconds)
00:03:34.192 WARN org.apache.spark.scheduler.TaskSetManager: Stage 14138 contains a task of very large size (1747 KiB). The maximum recommended task size is 1000 KiB.
00:03:34.332 WARN org.apache.spark.scheduler.TaskSetManager: Stage 14139 contains a task of very large size (1747 KiB). The maximum recommended task size is 1000 KiB.
00:03:34.919 WARN org.apache.spark.scheduler.TaskSetManager: Stage 14141 contains a task of very large size (1747 KiB). The maximum recommended task size is 1000 KiB.
00:03:34.979 WARN org.apache.spark.scheduler.TaskSetManager: Stage 14142 contains a task of very large size (1747 KiB). The maximum recommended task size is 1000 KiB.
00:03:36.578 WARN org.apache.spark.scheduler.TaskSetManager: Stage 14150 contains a task of very large size (2008 KiB). The maximum recommended task size is 1000 KiB.
00:03:36.731 WARN org.apache.spark.scheduler.TaskSetManager: Stage 14151 contains a task of very large size (2008 KiB). The maximum recommended task size is 1000 KiB.
00:03:37.515 WARN org.apache.spark.scheduler.TaskSetManager: Stage 14153 contains a task of very large size (2008 KiB). The maximum recommended task size is 1000 KiB.
00:03:37.583 WARN org.apache.spark.scheduler.TaskSetManager: Stage 14154 contains a task of very large size (2008 KiB). The maximum recommended task size is 1000 KiB.
[info] - test all data types (1 minute, 2 seconds)
[info] - save()/load() - non-partitioned table - Overwrite (963 milliseconds)
[info] - save()/load() - non-partitioned table - Append (2 seconds, 75 milliseconds)
[info] - save()/load() - non-partitioned table - ErrorIfExists (43 milliseconds)
[info] - save()/load() - non-partitioned table - Ignore (45 milliseconds)
[info] - save()/load() - partitioned table - simple queries (3 seconds, 620 milliseconds)
[info] - save()/load() - partitioned table - Overwrite (1 second, 583 milliseconds)
[info] - save()/load() - partitioned table - Append (1 second, 789 milliseconds)
[info] - save()/load() - partitioned table - Append - new partition values (1 second, 112 milliseconds)
[info] - save()/load() - partitioned table - ErrorIfExists (61 milliseconds)
[info] - save()/load() - partitioned table - Ignore (60 milliseconds)
[info] - saveAsTable()/load() - non-partitioned table - Overwrite (595 milliseconds)
[info] - saveAsTable()/load() - non-partitioned table - Append (1 second, 154 milliseconds)
[info] - saveAsTable()/load() - non-partitioned table - ErrorIfExists (342 milliseconds)
[info] - saveAsTable()/load() - non-partitioned table - Ignore (320 milliseconds)
[info] - saveAsTable()/load() - partitioned table - simple queries (1 second, 945 milliseconds)
[info] - saveAsTable()/load() - partitioned table - boolean type (1 second, 563 milliseconds)
[info] - saveAsTable()/load() - partitioned table - Overwrite (2 seconds, 128 milliseconds)
[info] - saveAsTable()/load() - partitioned table - Append (2 seconds, 562 milliseconds)
[info] - saveAsTable()/load() - partitioned table - Append - new partition values (2 seconds, 593 milliseconds)
[info] - saveAsTable()/load() - partitioned table - Append - mismatched partition columns (721 milliseconds)
[info] - saveAsTable()/load() - partitioned table - ErrorIfExists (35 milliseconds)
[info] - saveAsTable()/load() - partitioned table - Ignore (49 milliseconds)
[info] - load() - with directory of unpartitioned data in nested subdirs (1 second, 40 milliseconds)
[info] - Hadoop style globbing - unpartitioned data (1 second, 672 milliseconds)
[info] - Hadoop style globbing - partitioned data with schema inference (2 seconds, 985 milliseconds)
[info] - SPARK-9735 Partition column type casting (1 second, 987 milliseconds)
[info] - SPARK-7616: adjust column name order accordingly when saving partitioned table (1 second, 608 milliseconds)
[info] - SPARK-8887: Explicitly define which data types can be used as dynamic partition columns (87 milliseconds)
[info] - Locality support for FileScanRDD (635 milliseconds)
[info] - SPARK-16975: Partitioned table with the column having '_' should be read correctly (1 second, 64 milliseconds)
[info] - SPARK-31595 Should allow unescaped quote mark in quoted string (52 seconds, 789 milliseconds)
00:04:25.157 WARN org.apache.spark.sql.execution.datasources.DataSource: Found duplicate column(s) in the data schema and the partition schema: `p1`;
[info] - save()/load() - partitioned table - simple queries - partition columns in data (2 seconds, 554 milliseconds)
[info] - SPARK-7868: _temporary directories should be ignored (801 milliseconds)
[info] - SPARK-8014: Avoid scanning output directory when SaveMode isn't SaveMode.Append (545 milliseconds)
[info] - SPARK-8079: Avoid NPE thrown from BaseWriterContainer.abortJob (54 milliseconds)
[info] - SPARK-8604: Parquet data source should write summary file while doing appending (1 second, 190 milliseconds)
[info] - SPARK-10334 Projections and filters should be kept in physical plan (518 milliseconds)
[info] - SPARK-11500: Not deterministic order of columns when using merging schemas. (1 second, 16 milliseconds)
[info] - SPARK-13537: Fix readBytes in VectorizedPlainValuesReader (1 second, 210 milliseconds)
[info] - SPARK-13543: Support for specifying compression codec for Parquet via option() (623 milliseconds)
[info] - SPARK-8406: Avoids name collision while writing files (20 seconds, 815 milliseconds)
00:04:53.156 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
00:04:53.156 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
00:04:53.156 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
00:04:53.277 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.internal.ss.authz.settings.applied.marker does not exist
00:04:53.277 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
00:04:53.277 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
[info] FiltersSuite:
[info] - string filter (7 milliseconds)
[info] - string filter backwards (0 milliseconds)
[info] - int filter (0 milliseconds)
[info] - int filter backwards (0 milliseconds)
[info] - int and string filter (0 milliseconds)
[info] - skip varchar (0 milliseconds)
[info] - SPARK-19912 String literals should be escaped for Hive metastore partition pruning (0 milliseconds)
[info] - SPARK-24879 null literals should be ignored for IN constructs (4 milliseconds)
[info] - SPARK-24879 IN predicates with only NULLs will not cause a NPE (1 millisecond)
[info] - typecast null literals should not be pushed down in simple predicates (0 milliseconds)
[info] - turn on/off ADVANCED_PARTITION_PREDICATE_PUSHDOWN (3 milliseconds)
[info] HiveSQLViewSuite:
00:04:53.623 WARN org.apache.spark.sql.hive.test.TestHiveExternalCatalog: Couldn't find corresponding Hive SerDe for data source provider json. Persisting data source table `default`.`jt` into Hive metastore in Spark SQL specific format, which is NOT compatible with Hive.
00:04:54.227 WARN org.apache.spark.sql.execution.command.DropTableCommand: org.apache.spark.sql.AnalysisException: Table or view not found: jtv1; line 1 pos 14;
'SubqueryAlias spark_catalog.default.jtv2
+- View (`default`.`jtv2`, [id#198071L,id1#198072L])
   +- 'Project [*]
      +- 'Filter ('id < 6)
         +- 'UnresolvedRelation [jtv1]

org.apache.spark.sql.AnalysisException: Table or view not found: jtv1; line 1 pos 14;
'SubqueryAlias spark_catalog.default.jtv2
+- View (`default`.`jtv2`, [id#198071L,id1#198072L])
   +- 'Project [*]
      +- 'Filter ('id < 6)
         +- 'UnresolvedRelation [jtv1]

	at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis$1(CheckAnalysis.scala:106)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis$1$adapted(CheckAnalysis.scala:92)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:177)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:92)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:89)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:130)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:156)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:153)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.$anonfun$analyzed$1(TestHive.scala:619)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.analyzed$lzycompute(TestHive.scala:593)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.analyzed(TestHive.scala:593)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:58)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:91)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89)
	at org.apache.spark.sql.SparkSession.table(SparkSession.scala:589)
	at org.apache.spark.sql.execution.command.DropTableCommand.run(ddl.scala:240)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:79)
	at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:229)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3616)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:100)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:87)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3614)
	at org.apache.spark.sql.Dataset.<init>(Dataset.scala:229)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:92)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89)
	at org.apache.spark.sql.hive.test.TestHiveSparkSession.$anonfun$sql$1(TestHive.scala:239)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.hive.test.TestHiveSparkSession.sql(TestHive.scala:237)
	at org.apache.spark.sql.test.SQLTestUtilsBase.$anonfun$withView$2(SQLTestUtils.scala:317)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:38)
	at org.apache.spark.sql.test.SQLTestUtilsBase.$anonfun$withView$1(SQLTestUtils.scala:316)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1386)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withView(SQLTestUtils.scala:316)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withView$(SQLTestUtils.scala:314)
	at org.apache.spark.sql.execution.SQLViewSuite.withView(SQLViewSuite.scala:31)
	at org.apache.spark.sql.execution.SQLViewSuite.$anonfun$new$1(SQLViewSuite.scala:49)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:151)
	at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
	at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:286)
	at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
	at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:58)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:58)
	at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:393)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:381)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:376)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:458)
	at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
	at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
	at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
	at org.scalatest.Suite.run(Suite.scala:1124)
	at org.scalatest.Suite.run$(Suite.scala:1106)
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
	at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:518)
	at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
	at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:58)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:58)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:317)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:510)
	at sbt.ForkMain$Run$2.call(ForkMain.java:296)
	at sbt.ForkMain$Run$2.call(ForkMain.java:286)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
[info] - create a permanent view on a permanent view (585 milliseconds)
[info] - create a temp view on a permanent view (388 milliseconds)
[info] - create a temp view on a temp view (283 milliseconds)
[info] - create a permanent view on a temp view (105 milliseconds)
[info] - error handling: existing a table with the duplicate name when creating/altering a view (397 milliseconds)
[info] - existing a table with the duplicate name when CREATE VIEW IF NOT EXISTS (437 milliseconds)
[info] - Issue exceptions for ALTER VIEW on the temporary view (32 milliseconds)
[info] - Issue exceptions for ALTER TABLE on the temporary view (86 milliseconds)
[info] - Issue exceptions for other table DDL on the temporary view (129 milliseconds)
[info] - error handling: insert/load/truncate table commands against a view (197 milliseconds)
00:04:56.323 WARN org.apache.hadoop.hive.metastore.ObjectStore: Failed to get database db_not_exist234, returning NoSuchObjectException
[info] - error handling: fail if the view sql itself is invalid (52 milliseconds)
[info] - error handling: fail if the temp view name contains the database prefix (2 milliseconds)
[info] - error handling: disallow IF NOT EXISTS for CREATE TEMPORARY VIEW (3 milliseconds)
00:04:56.380 WARN org.apache.hadoop.hive.metastore.ObjectStore: Failed to get database db_not_exist234, returning NoSuchObjectException
[info] - error handling: fail if the temp view sql itself is invalid (49 milliseconds)
[info] - correctly parse CREATE VIEW statement (1 second, 124 milliseconds)
00:04:57.560 WARN org.apache.hadoop.hive.metastore.ObjectStore: Failed to get database db_c7a47f37_13d7_4114_938b_67af103270b5, returning NoSuchObjectException
00:04:59.373 WARN org.apache.spark.sql.execution.command.DropTableCommand: org.apache.spark.sql.AnalysisException: Table or view not found: view1; line 1 pos 14;
'SubqueryAlias spark_catalog.default.view2
+- View (`default`.`view2`, [id#198192L,id1#198193L])
   +- 'Project [*]
      +- 'UnresolvedRelation [view1]

org.apache.spark.sql.AnalysisException: Table or view not found: view1; line 1 pos 14;
'SubqueryAlias spark_catalog.default.view2
+- View (`default`.`view2`, [id#198192L,id1#198193L])
   +- 'Project [*]
      +- 'UnresolvedRelation [view1]

	at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis$1(CheckAnalysis.scala:106)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis$1$adapted(CheckAnalysis.scala:92)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:177)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:92)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:89)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:130)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:156)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:153)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.$anonfun$analyzed$1(TestHive.scala:619)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.analyzed$lzycompute(TestHive.scala:593)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.analyzed(TestHive.scala:593)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:58)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:91)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89)
	at org.apache.spark.sql.SparkSession.table(SparkSession.scala:589)
	at org.apache.spark.sql.execution.command.DropTableCommand.run(ddl.scala:240)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:79)
	at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:229)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3616)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:100)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:87)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3614)
	at org.apache.spark.sql.Dataset.<init>(Dataset.scala:229)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:92)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89)
	at org.apache.spark.sql.hive.test.TestHiveSparkSession.$anonfun$sql$1(TestHive.scala:239)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.hive.test.TestHiveSparkSession.sql(TestHive.scala:237)
	at org.apache.spark.sql.test.SQLTestUtilsBase.$anonfun$withView$2(SQLTestUtils.scala:317)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:38)
	at org.apache.spark.sql.test.SQLTestUtilsBase.$anonfun$withView$1(SQLTestUtils.scala:316)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1386)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withView(SQLTestUtils.scala:316)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withView$(SQLTestUtils.scala:314)
	at org.apache.spark.sql.execution.SQLViewSuite.withView(SQLViewSuite.scala:31)
	at org.apache.spark.sql.execution.SQLViewSuite.$anonfun$new$52(SQLViewSuite.scala:283)
	at org.apache.spark.sql.execution.SQLViewSuite.$anonfun$new$52$adapted(SQLViewSuite.scala:282)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTempDatabase(SQLTestUtils.scala:361)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTempDatabase$(SQLTestUtils.scala:352)
	at org.apache.spark.sql.execution.SQLViewSuite.withTempDatabase(SQLViewSuite.scala:31)
	at org.apache.spark.sql.execution.SQLViewSuite.$anonfun$new$51(SQLViewSuite.scala:282)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:151)
	at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
	at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:286)
	at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
	at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:58)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:58)
	at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:393)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:381)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:376)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:458)
	at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
	at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
	at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
	at org.scalatest.Suite.run(Suite.scala:1124)
	at org.scalatest.Suite.run$(Suite.scala:1106)
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
	at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:518)
	at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
	at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:58)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:58)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:317)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:510)
	at sbt.ForkMain$Run$2.call(ForkMain.java:296)
	at sbt.ForkMain$Run$2.call(ForkMain.java:286)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
00:04:59.468 WARN org.apache.spark.sql.execution.command.DropTableCommand: org.apache.spark.sql.AnalysisException: Table or view not found: default.view1; line 1 pos 14;
'SubqueryAlias spark_catalog.db_c7a47f37_13d7_4114_938b_67af103270b5.view3
+- View (`db_c7a47f37_13d7_4114_938b_67af103270b5`.`view3`, [id#198194L,id1#198195L])
   +- 'Project [*]
      +- 'UnresolvedRelation [default, view1]

org.apache.spark.sql.AnalysisException: Table or view not found: default.view1; line 1 pos 14;
'SubqueryAlias spark_catalog.db_c7a47f37_13d7_4114_938b_67af103270b5.view3
+- View (`db_c7a47f37_13d7_4114_938b_67af103270b5`.`view3`, [id#198194L,id1#198195L])
   +- 'Project [*]
      +- 'UnresolvedRelation [default, view1]

	at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis$1(CheckAnalysis.scala:106)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis$1$adapted(CheckAnalysis.scala:92)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:177)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$foreachUp$1$adapted(TreeNode.scala:176)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:176)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:92)
	at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:89)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:130)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:156)
	at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201)
	at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:153)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.$anonfun$analyzed$1(TestHive.scala:619)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.analyzed$lzycompute(TestHive.scala:593)
	at org.apache.spark.sql.hive.test.TestHiveQueryExecution.analyzed(TestHive.scala:593)
	at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:58)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:91)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89)
	at org.apache.spark.sql.SparkSession.table(SparkSession.scala:589)
	at org.apache.spark.sql.execution.command.DropTableCommand.run(ddl.scala:240)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)
	at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:79)
	at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:229)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3616)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:100)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:160)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:87)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3614)
	at org.apache.spark.sql.Dataset.<init>(Dataset.scala:229)
	at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:92)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89)
	at org.apache.spark.sql.hive.test.TestHiveSparkSession.$anonfun$sql$1(TestHive.scala:239)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:764)
	at org.apache.spark.sql.hive.test.TestHiveSparkSession.sql(TestHive.scala:237)
	at org.apache.spark.sql.test.SQLTestUtilsBase.$anonfun$withView$2(SQLTestUtils.scala:317)
	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)
	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.WrappedArray.foreach(WrappedArray.scala:38)
	at org.apache.spark.sql.test.SQLTestUtilsBase.$anonfun$withView$1(SQLTestUtils.scala:316)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1386)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withView(SQLTestUtils.scala:316)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withView$(SQLTestUtils.scala:314)
	at org.apache.spark.sql.execution.SQLViewSuite.withView(SQLViewSuite.scala:31)
	at org.apache.spark.sql.execution.SQLViewSuite.$anonfun$new$52(SQLViewSuite.scala:283)
	at org.apache.spark.sql.execution.SQLViewSuite.$anonfun$new$52$adapted(SQLViewSuite.scala:282)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTempDatabase(SQLTestUtils.scala:361)
	at org.apache.spark.sql.test.SQLTestUtilsBase.withTempDatabase$(SQLTestUtils.scala:352)
	at org.apache.spark.sql.execution.SQLViewSuite.withTempDatabase(SQLViewSuite.scala:31)
	at org.apache.spark.sql.execution.SQLViewSuite.$anonfun$new$51(SQLViewSuite.scala:282)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.scalatest.OutcomeOf.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf.outcomeOf$(OutcomeOf.scala:83)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:186)
	at org.apache.spark.SparkFunSuite.withFixture(SparkFunSuite.scala:151)
	at org.scalatest.FunSuiteLike.invokeWithFixture$1(FunSuiteLike.scala:184)
	at org.scalatest.FunSuiteLike.$anonfun$runTest$1(FunSuiteLike.scala:196)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:286)
	at org.scalatest.FunSuiteLike.runTest(FunSuiteLike.scala:196)
	at org.scalatest.FunSuiteLike.runTest$(FunSuiteLike.scala:178)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterEach$$super$runTest(SparkFunSuite.scala:58)
	at org.scalatest.BeforeAndAfterEach.runTest(BeforeAndAfterEach.scala:221)
	at org.scalatest.BeforeAndAfterEach.runTest$(BeforeAndAfterEach.scala:214)
	at org.apache.spark.SparkFunSuite.runTest(SparkFunSuite.scala:58)
	at org.scalatest.FunSuiteLike.$anonfun$runTests$1(FunSuiteLike.scala:229)
	at org.scalatest.SuperEngine.$anonfun$runTestsInBranch$1(Engine.scala:393)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:381)
	at org.scalatest.SuperEngine.runTestsInBranch(Engine.scala:376)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:458)
	at org.scalatest.FunSuiteLike.runTests(FunSuiteLike.scala:229)
	at org.scalatest.FunSuiteLike.runTests$(FunSuiteLike.scala:228)
	at org.scalatest.FunSuite.runTests(FunSuite.scala:1560)
	at org.scalatest.Suite.run(Suite.scala:1124)
	at org.scalatest.Suite.run$(Suite.scala:1106)
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1560)
	at org.scalatest.FunSuiteLike.$anonfun$run$1(FunSuiteLike.scala:233)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:518)
	at org.scalatest.FunSuiteLike.run(FunSuiteLike.scala:233)
	at org.scalatest.FunSuiteLike.run$(FunSuiteLike.scala:232)
	at org.apache.spark.SparkFunSuite.org$scalatest$BeforeAndAfterAll$$super$run(SparkFunSuite.scala:58)
	at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:213)
	at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
	at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
	at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:58)
	at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:317)
	at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:510)
	at sbt.ForkMain$Run$2.call(ForkMain.java:296)
	at sbt.ForkMain$Run$2.call(ForkMain.java:286)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
[info] - correctly parse a nested view (1 second, 993 milliseconds)
[info] - correctly parse CREATE TEMPORARY VIEW statement (638 milliseconds)
[info] - should NOT allow CREATE TEMPORARY VIEW when TEMPORARY VIEW with same name exists (62 milliseconds)
[info] - should allow CREATE TEMPORARY VIEW when a permanent VIEW with same name exists (216 milliseconds)
[info] - should allow CREATE permanent VIEW when a TEMPORARY VIEW with same name exists (201 milliseconds)
00:05:01.086 WARN org.apache.spark.sql.hive.test.TestHiveExternalCatalog: Couldn't find corresponding Hive SerDe for data source provider json. Persisting data source table `default`.`jt2` into Hive metastore in Spark SQL specific format, which is NOT compatible with Hive.
Attempting to post to Github...
[error] running /home/jenkins/workspace/NewSparkPullRequestBuilder/build/sbt -Phadoop-2.7 -Phive-2.3 -Pyarn -Pkubernetes -Phive-thriftserver -Phive -Phadoop-cloud -Pspark-ganglia-lgpl -Pmesos -Pkinesis-asl -Dtest.exclude.tags=org.apache.spark.tags.ExtendedYarnTest test ; process was terminated by signal 9
 > Post successful.
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Finished: FAILURE