FailedConsole Output

Skipping 46,191 KB.. Full Log
.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitStatementDefault(SqlBaseBaseVisitor.java:69)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$StatementDefaultContext.accept(SqlBaseParser.java:1904)
	at org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visit(AbstractParseTreeVisitor.java:18)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitSingleStatement$1(AstBuilder.scala:77)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitSingleStatement(AstBuilder.scala:77)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.$anonfun$parsePlan$1(ParseDriver.scala:82)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parse(ParseDriver.scala:116)
	at org.apache.spark.sql.execution.SparkSqlParser.parse(SparkSqlParser.scala:49)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parsePlan(ParseDriver.scala:81)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$2(SparkSession.scala:604)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:604)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:601)
	at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:650)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:286)
	... 16 more
07:00:56.086 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13676.0 (TID 26078)
java.lang.ArithmeticException: integer overflow
	at java.lang.Math.negateExact(Math.java:977)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.negateExact(IntervalUtils.scala:412)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.negateExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:30)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
07:00:56.087 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13676.0 (TID 26078, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.negateExact(Math.java:977)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.negateExact(IntervalUtils.scala:412)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.negateExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:30)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

07:00:56.087 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13676.0 failed 1 times; aborting job
07:00:56.089 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with afd71d84-01e3-4c92-833e-71434230bac8, currentState RUNNING, 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 13676.0 failed 1 times, most recent failure: Lost task 0.0 in stage 13676.0 (TID 26078, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.negateExact(Math.java:977)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.negateExact(IntervalUtils.scala:412)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.negateExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:30)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2173)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:385)
	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3667)
	at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:2940)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3658)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3656)
	at org.apache.spark.sql.Dataset.collect(Dataset.scala:2940)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:295)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.negateExact(Math.java:977)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.negateExact(IntervalUtils.scala:412)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.negateExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:30)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
07:00:56.089 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: java.lang.ArithmeticException: integer overflow
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.negateExact(Math.java:977)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.negateExact(IntervalUtils.scala:412)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.negateExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:30)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
07:00:56.128 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13677.0 (TID 26079)
java.lang.ArithmeticException: integer overflow
	at java.lang.Math.subtractExact(Math.java:829)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.subtractExact(IntervalUtils.scala:453)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.subtractExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
07:00:56.129 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13677.0 (TID 26079, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.subtractExact(Math.java:829)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.subtractExact(IntervalUtils.scala:453)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.subtractExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

07:00:56.129 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13677.0 failed 1 times; aborting job
07:00:56.131 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with 3fbfa4b6-0b71-4ef9-a264-06e4b04c9f9a, currentState RUNNING, 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 13677.0 failed 1 times, most recent failure: Lost task 0.0 in stage 13677.0 (TID 26079, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.subtractExact(Math.java:829)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.subtractExact(IntervalUtils.scala:453)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.subtractExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2173)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:385)
	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3667)
	at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:2940)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3658)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3656)
	at org.apache.spark.sql.Dataset.collect(Dataset.scala:2940)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:295)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.subtractExact(Math.java:829)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.subtractExact(IntervalUtils.scala:453)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.subtractExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
07:00:56.131 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: java.lang.ArithmeticException: integer overflow
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.subtractExact(Math.java:829)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.subtractExact(IntervalUtils.scala:453)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.subtractExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
07:00:56.168 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13678.0 (TID 26080)
java.lang.ArithmeticException: integer overflow
	at java.lang.Math.addExact(Math.java:790)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.addExact(IntervalUtils.scala:431)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.addExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
07:00:56.169 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13678.0 (TID 26080, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.addExact(Math.java:790)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.addExact(IntervalUtils.scala:431)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.addExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

07:00:56.170 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13678.0 failed 1 times; aborting job
07:00:56.171 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with e85ee152-2c6b-443f-bca3-394ff97830c5, currentState RUNNING, 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 13678.0 failed 1 times, most recent failure: Lost task 0.0 in stage 13678.0 (TID 26080, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.addExact(Math.java:790)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.addExact(IntervalUtils.scala:431)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.addExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2173)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:385)
	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3667)
	at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:2940)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3658)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3656)
	at org.apache.spark.sql.Dataset.collect(Dataset.scala:2940)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:295)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.addExact(Math.java:790)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.addExact(IntervalUtils.scala:431)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.addExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
07:00:56.172 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: java.lang.ArithmeticException: integer overflow
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.addExact(Math.java:790)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.addExact(IntervalUtils.scala:431)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.addExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
07:00:56.215 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13679.0 (TID 26081)
java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.multiplyExact(IntervalUtils.scala:482)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.multiplyExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
07:00:56.216 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13679.0 (TID 26081, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.multiplyExact(IntervalUtils.scala:482)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.multiplyExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

07:00:56.217 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13679.0 failed 1 times; aborting job
07:00:56.220 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with ffa9a213-bc8b-4713-9d0f-c2c6b074bc3f, currentState RUNNING, 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 13679.0 failed 1 times, most recent failure: Lost task 0.0 in stage 13679.0 (TID 26081, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.multiplyExact(IntervalUtils.scala:482)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.multiplyExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2173)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:385)
	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3667)
	at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:2940)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3658)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3656)
	at org.apache.spark.sql.Dataset.collect(Dataset.scala:2940)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:295)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.multiplyExact(IntervalUtils.scala:482)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.multiplyExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
07:00:56.221 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: java.lang.ArithmeticException: integer overflow
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.multiplyExact(IntervalUtils.scala:482)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.multiplyExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
07:00:56.286 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13680.0 (TID 26082)
java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.divideExact(IntervalUtils.scala:500)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.divideExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
07:00:56.288 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13680.0 (TID 26082, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.divideExact(IntervalUtils.scala:500)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.divideExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

07:00:56.288 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13680.0 failed 1 times; aborting job
07:00:56.290 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with b24fdcb6-9676-4ff8-8a2a-d6ffd622121d, currentState RUNNING, 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 13680.0 failed 1 times, most recent failure: Lost task 0.0 in stage 13680.0 (TID 26082, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.divideExact(IntervalUtils.scala:500)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.divideExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2173)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:385)
	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3667)
	at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:2940)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3658)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3656)
	at org.apache.spark.sql.Dataset.collect(Dataset.scala:2940)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:295)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.divideExact(IntervalUtils.scala:500)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.divideExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
07:00:56.290 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: java.lang.ArithmeticException: integer overflow
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.divideExact(IntervalUtils.scala:500)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.divideExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
07:00:56.441 ERROR org.apache.thrift.server.TThreadPoolServer: Thrift error occurred during processing of message.
org.apache.thrift.transport.TTransportException
	at org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:132)
	at org.apache.thrift.transport.TTransport.readAll(TTransport.java:86)
	at org.apache.thrift.transport.TSaslTransport.readLength(TSaslTransport.java:374)
	at org.apache.thrift.transport.TSaslTransport.readFrame(TSaslTransport.java:451)
	at org.apache.thrift.transport.TSaslTransport.read(TSaslTransport.java:433)
	at org.apache.thrift.transport.TSaslServerTransport.read(TSaslServerTransport.java:43)
	at org.apache.thrift.transport.TTransport.readAll(TTransport.java:86)
	at org.apache.thrift.protocol.TBinaryProtocol.readAll(TBinaryProtocol.java:425)
	at org.apache.thrift.protocol.TBinaryProtocol.readI32(TBinaryProtocol.java:321)
	at org.apache.thrift.protocol.TBinaryProtocol.readMessageBegin(TBinaryProtocol.java:225)
	at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:27)
	at org.apache.hive.service.auth.TSetIpAddressProcessor.process(TSetIpAddressProcessor.java:53)
	at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:310)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
[info] - ansi/interval.sql (2 seconds, 714 milliseconds)
[info] - ansi/literals.sql !!! IGNORED !!!
07:00:56.525 ERROR org.apache.thrift.server.TThreadPoolServer: Thrift error occurred during processing of message.
org.apache.thrift.transport.TTransportException
	at org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:132)
	at org.apache.thrift.transport.TTransport.readAll(TTransport.java:86)
	at org.apache.thrift.transport.TSaslTransport.readLength(TSaslTransport.java:374)
	at org.apache.thrift.transport.TSaslTransport.readFrame(TSaslTransport.java:451)
	at org.apache.thrift.transport.TSaslTransport.read(TSaslTransport.java:433)
	at org.apache.thrift.transport.TSaslServerTransport.read(TSaslServerTransport.java:43)
	at org.apache.thrift.transport.TTransport.readAll(TTransport.java:86)
	at org.apache.thrift.protocol.TBinaryProtocol.readAll(TBinaryProtocol.java:425)
	at org.apache.thrift.protocol.TBinaryProtocol.readI32(TBinaryProtocol.java:321)
	at org.apache.thrift.protocol.TBinaryProtocol.readMessageBegin(TBinaryProtocol.java:225)
	at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:27)
	at org.apache.hive.service.auth.TSetIpAddressProcessor.process(TSetIpAddressProcessor.java:53)
	at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:310)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
[info] - Check if ThriftServer can work (82 milliseconds)
07:00:56.667 WARN org.apache.spark.sql.hive.thriftserver.ThriftServerQueryTestSuite: 
=== Metrics of Analyzer/Optimizer Rules ===
Total number of runs: 5430697
Total time: 183.824394879 seconds

Rule                                                                                               Effective Time / Total Time                     Effective Runs / Total Runs                    

org.apache.spark.sql.catalyst.optimizer.Optimizer$OptimizeSubqueries                               21432876352 / 27192229611                       604 / 20769                                    
org.apache.spark.sql.catalyst.optimizer.ColumnPruning                                              2066685711 / 9545200194                         4168 / 48783                                   
org.apache.spark.sql.execution.datasources.FindDataSourceTable                                     6658857136 / 6833883688                         2486 / 55819                                   
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveSubquery                                    3366581141 / 3977331270                         687 / 55857                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences                                  2271932600 / 3705047305                         6900 / 55907                                   
org.apache.spark.sql.catalyst.optimizer.PushDownPredicates                                         556230035 / 2764046871                          1893 / 38528                                   
org.apache.spark.sql.catalyst.optimizer.PruneFilters                                               18665897 / 2719698614                           144 / 38314                                    
org.apache.spark.sql.catalyst.optimizer.RemoveNoopOperators                                        194531312 / 2652415904                          1791 / 48614                                   
org.apache.spark.sql.catalyst.optimizer.LikeSimplification                                         12391663 / 2528301244                           20 / 28014                                     
org.apache.spark.sql.catalyst.optimizer.ConstantFolding                                            966434979 / 2466215455                          3305 / 28014                                   
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAggregateFunctions                          951924079 / 2377936572                          253 / 55840                                    
org.apache.spark.sql.catalyst.optimizer.CollapseProject                                            750120066 / 2280638431                          5063 / 38483                                   
org.apache.spark.sql.catalyst.optimizer.SimplifyCasts                                              2963103 / 1970131958                            20 / 28014                                     
org.apache.spark.sql.catalyst.optimizer.NullPropagation                                            52173936 / 1967907292                           361 / 28183                                    
org.apache.spark.sql.catalyst.optimizer.RemoveDispensableExpressions                               711045 / 1965596656                             6 / 28014                                      
org.apache.spark.sql.catalyst.optimizer.CombineUnions                                              7535660 / 1964476953                            35 / 38652                                     
org.apache.spark.sql.catalyst.optimizer.BooleanSimplification                                      12595656 / 1952381657                           61 / 28014                                     
org.apache.spark.sql.catalyst.optimizer.ReorderAssociativeOperator                                 0 / 1811956117                                  0 / 28014                                      
org.apache.spark.sql.catalyst.optimizer.SimplifyConditionals                                       11657902 / 1769739947                           156 / 28014                                    
org.apache.spark.sql.catalyst.optimizer.OptimizeIn                                                 809524 / 1759448234                             4 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.SimplifyBinaryComparison                                   2273088 / 1695274079                            15 / 28014                                     
org.apache.spark.sql.catalyst.optimizer.SimplifyExtractValueOps                                    358909 / 1694285077                             3 / 28014                                      
org.apache.spark.sql.catalyst.optimizer.SimplifyCaseConversionExpressions                          0 / 1685065942                                  0 / 28014                                      
org.apache.spark.sql.catalyst.optimizer.ReplaceNullWithFalseInPredicate                            500597 / 1651114100                             9 / 28014                                      
org.apache.spark.sql.catalyst.optimizer.EliminateOuterJoin                                         133994778 / 1645960689                          174 / 28183                                    
org.apache.spark.sql.catalyst.optimizer.PushProjectionThroughUnion                                 15313952 / 1552681348                           32 / 28183                                     
org.apache.spark.sql.catalyst.optimizer.ReorderJoin                                                18401402 / 1534258218                           30 / 28183                                     
org.apache.spark.sql.catalyst.optimizer.PushDownLeftSemiAntiJoin                                   65258029 / 1529918763                           151 / 28183                                    
org.apache.spark.sql.catalyst.optimizer.CollapseRepartition                                        0 / 1525691395                                  0 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.EliminateResolvedHint                                      0 / 1522116098                                  0 / 10469                                      
org.apache.spark.sql.catalyst.optimizer.PushLeftSemiLeftAntiThroughJoin                            231892 / 1520141860                             1 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.RewriteCorrelatedScalarSubquery                            5373324 / 1519106744                            5 / 28014                                      
org.apache.spark.sql.catalyst.analysis.CTESubstitution                                             33065712 / 1485953042                           104 / 23832                                    
org.apache.spark.sql.catalyst.optimizer.InferFiltersFromConstraints                                678096079 / 1472797692                          1184 / 10300                                   
org.apache.spark.sql.catalyst.optimizer.ConstantPropagation                                        826900 / 1460025694                             3 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.LimitPushDown                                              0 / 1422886520                                  0 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.CollapseWindow                                             0 / 1422415561                                  0 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.TransposeWindow                                            0 / 1421805673                                  0 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.CombineFilters                                             0 / 1406038489                                  0 / 28183                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations                                   1046551899 / 1398881568                         8703 / 55962                                   
org.apache.spark.sql.catalyst.optimizer.CombineLimits                                              0 / 1384516029                                  0 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.FoldablePropagation                                        64567838 / 1380223800                           163 / 28183                                    
org.apache.spark.sql.catalyst.optimizer.EliminateSerialization                                     0 / 1375878307                                  0 / 28014                                      
org.apache.spark.sql.catalyst.analysis.ResolveSessionCatalog                                       502187969 / 1364836570                          1441 / 55815                                   
org.apache.spark.sql.catalyst.optimizer.NormalizeFloatingNumbers                                   61067426 / 1315151300                           39 / 10300                                     
org.apache.spark.sql.catalyst.analysis.TypeCoercion$ImplicitTypeCasts                              321452311 / 1312151113                          1164 / 55828                                   
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveFunctions                                   614451431 / 1293960494                          3660 / 55857                                   
org.apache.spark.sql.catalyst.optimizer.GetCurrentDatabaseAndCatalog                               471608 / 1285972216                             1 / 10469                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ExtractGenerator                                   6555685 / 1260304132                            8 / 55890                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveDeserializer                                750631931 / 1252947284                          9029 / 55907                                   
org.apache.spark.sql.catalyst.analysis.DecimalPrecision                                            178875257 / 1175377146                          1184 / 55828                                   
org.apache.spark.sql.catalyst.optimizer.PropagateEmptyRelation                                     7821740 / 1074866489                            59 / 20828                                     
org.apache.spark.sql.catalyst.analysis.TypeCoercion$PromoteStrings                                 119223510 / 1022437083                          757 / 55828                                    
org.apache.spark.sql.catalyst.optimizer.RemoveRedundantAliases                                     17132979 / 979608233                            97 / 28014                                     
org.apache.spark.sql.execution.datasources.SchemaPruning                                           12981091 / 964597046                            5 / 10300                                      
org.apache.spark.sql.catalyst.analysis.UpdateAttributeNullability                                  28418751 / 946020371                            114 / 23360                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$LookupFunctions                                    0 / 944926091                                   0 / 23520                                      
org.apache.spark.sql.catalyst.analysis.ResolveTimeZone                                             510006826 / 942744803                           7601 / 55828                                   
org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases                                    782286050 / 930434900                           5807 / 10469                                   
org.apache.spark.sql.catalyst.analysis.TypeCoercion$FunctionArgumentConversion                     148102169 / 920124524                           388 / 55828                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAliases                                     502554018 / 896035972                           4833 / 55857                                   
org.apache.spark.sql.catalyst.optimizer.ComputeCurrentTime                                         2867668 / 864462518                             7 / 10469                                      
org.apache.spark.sql.catalyst.optimizer.ReplaceExpressions                                         41762743 / 860761886                            376 / 10469                                    
org.apache.spark.sql.catalyst.optimizer.EliminateMapObjects                                        0 / 858892056                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$InConversion                                   39626905 / 848083064                            131 / 55828                                    
org.apache.spark.sql.execution.datasources.v2.V2ScanRelationPushDown                               0 / 842916165                                   0 / 10300                                      
org.apache.spark.sql.catalyst.optimizer.PullupCorrelatedPredicates                                 194095090 / 829373242                           263 / 10469                                    
org.apache.spark.sql.execution.dynamicpruning.PartitionPruning                                     0 / 813846395                                   0 / 10300                                      
org.apache.spark.sql.catalyst.optimizer.RewriteNonCorrelatedExists                                 10322002 / 774605092                            63 / 10469                                     
org.apache.spark.sql.catalyst.analysis.TimeWindowing                                               0 / 724511114                                   0 / 55840                                      
org.apache.spark.sql.catalyst.optimizer.ReplaceExceptWithFilter                                    19874643 / 724032448                            21 / 10741                                     
org.apache.spark.sql.catalyst.analysis.TypeCoercion$DateTimeOperations                             6443802 / 719459315                             64 / 55828                                     
org.apache.spark.sql.execution.dynamicpruning.CleanupDynamicPruningFilters                         0 / 710691478                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$IfCoercion                                     14764930 / 695666820                            65 / 55828                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveBinaryArithmetic                            23992630 / 692105580                            182 / 55828                                    
org.apache.spark.sql.execution.python.ExtractPythonUDFs                                            0 / 683786277                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$BooleanEquality                                4233097 / 682218530                             68 / 55828                                     
org.apache.spark.sql.catalyst.analysis.ResolveCreateNamedStruct                                    6224486 / 673475923                             37 / 55907                                     
org.apache.spark.sql.execution.datasources.PruneFileSourcePartitions                               13095697 / 672760048                            2 / 10300                                      
org.apache.spark.sql.catalyst.optimizer.RewriteDistinctAggregates                                  25881377 / 668705940                            8 / 10469                                      
org.apache.spark.sql.catalyst.optimizer.ReassignLambdaVariableID                                   0 / 666437304                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$StringLiteralCoercion                          3011019 / 651548811                             6 / 55819                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$Division                                       15332342 / 625814937                            93 / 55828                                     
org.apache.spark.sql.catalyst.expressions.codegen.package$ExpressionCanonicalizer$CleanExpressions 15433277 / 621916835                            4992 / 143389                                  
org.apache.spark.sql.catalyst.optimizer.RewriteIntersectAll                                        12171917 / 618806053                            16 / 10741                                     
org.apache.spark.sql.catalyst.optimizer.RewritePredicateSubquery                                   179015932 / 609458422                           547 / 10300                                    
org.apache.spark.sql.catalyst.analysis.TypeCoercion$IntegralDivision                               619005 / 600750540                              4 / 55828                                      
org.apache.spark.sql.catalyst.optimizer.RewriteExceptAll                                           10374240 / 597537100                            23 / 10741                                     
org.apache.spark.sql.catalyst.optimizer.ReplaceIntersectWithSemiJoin                               13569463 / 596754735                            51 / 10741                                     
org.apache.spark.sql.catalyst.analysis.EliminateView                                               3221699 / 595121554                             4 / 10469                                      
org.apache.spark.sql.catalyst.optimizer.PushCNFPredicateThroughJoin                                9337502 / 591560792                             38 / 10300                                     
org.apache.spark.sql.catalyst.optimizer.ReplaceDistinctWithAggregate                               46482137 / 591033411                            245 / 10741                                    
org.apache.spark.sql.catalyst.optimizer.ReplaceExceptWithAntiJoin                                  10515484 / 590599077                            29 / 10741                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ExtractWindowExpressions                           166056142 / 583700539                           424 / 55840                                    
org.apache.spark.sql.catalyst.optimizer.DecimalAggregates                                          3222494 / 570856020                             6 / 10306                                      
org.apache.spark.sql.catalyst.analysis.ResolveHigherOrderFunctions                                 17671026 / 567251339                            121 / 55828                                    
org.apache.spark.sql.catalyst.analysis.TypeCoercion$ConcatCoercion                                 5280817 / 566153439                             20 / 55828                                     
org.apache.spark.sql.catalyst.optimizer.RemoveRepetitionFromGroupExpressions                       3463211 / 563508496                             27 / 10518                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveMissingReferences                           15646430 / 561030539                            32 / 55890                                     
org.apache.spark.sql.catalyst.optimizer.ReplaceDeduplicateWithAggregate                            0 / 560434392                                   0 / 10469                                      
org.apache.spark.sql.execution.python.ExtractPythonUDFFromAggregate                                0 / 560051683                                   0 / 10300                                      
org.apache.spark.sql.catalyst.optimizer.RemoveLiteralFromGroupExpressions                          1661813 / 553078837                             22 / 10518                                     
org.apache.spark.sql.catalyst.optimizer.OptimizeLimitZero                                          842277 / 550918943                              3 / 10469                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$EltCoercion                                    2504992 / 546102133                             3 / 55828                                      
org.apache.spark.sql.catalyst.optimizer.EliminateSorts                                             4346076 / 544408779                             28 / 10300                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRandomSeed                                  481320 / 540239715                              2 / 55828                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveWindowFrame                                 16079100 / 532707728                            170 / 55852                                    
org.apache.spark.sql.catalyst.optimizer.PushPredicateThroughNonJoin                                0 / 528475915                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveWindowOrder                                 7000219 / 521903157                             24 / 55852                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveGroupingAnalytics                           163200722 / 514780983                           89 / 55903                                     
org.apache.spark.sql.catalyst.optimizer.ExtractPythonUDFFromJoinCondition                          0 / 503731837                                   0 / 10300                                      
org.apache.spark.sql.execution.python.ExtractGroupingPythonUDFFromAggregate                        0 / 498369731                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$CaseWhenCoercion                               21459370 / 496843876                            71 / 55828                                     
org.apache.spark.sql.catalyst.analysis.TypeCoercion$WindowFrameCoercion                            18146654 / 492084475                            81 / 55828                                     
org.apache.spark.sql.catalyst.analysis.ResolveLambdaVariables                                      41943372 / 491478866                            123 / 55828                                    
org.apache.spark.sql.catalyst.analysis.ResolveInlineTables                                         282734208 / 478106356                           1644 / 55832                                   
org.apache.spark.sql.catalyst.optimizer.ObjectSerializerPruning                                    0 / 475541777                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$MapZipWithCoercion                             8938454 / 472594998                             13 / 55828                                     
org.apache.spark.sql.catalyst.analysis.TypeCoercion$StackCoercion                                  0 / 455125400                                   0 / 55828                                      
org.apache.spark.sql.catalyst.analysis.CleanupAliases                                              193687773 / 451505969                           6952 / 30312                                   
org.apache.spark.sql.catalyst.analysis.ResolveHints$ResolveJoinStrategyHints                       0 / 449565775                                   0 / 23527                                      
org.apache.spark.sql.catalyst.optimizer.CombineTypedFilters                                        0 / 448947440                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$GlobalAggregates                                   10660618 / 434401478                            450 / 55840                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNewInstance                                 0 / 419122509                                   0 / 55907                                      
org.apache.spark.sql.catalyst.analysis.ResolveCatalogs                                             38172215 / 410261485                            252 / 55962                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveTables                                      0 / 357494073                                   0 / 55962                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$PullOutNondeterministic                            5494630 / 341354311                             2 / 23360                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveUpCast                                      0 / 332249640                                   0 / 55907                                      
org.apache.spark.sql.execution.analysis.DetectAmbiguousSelfJoin                                    0 / 301967524                                   0 / 23361                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveOrdinalInOrderByAndGroupBy                  7631146 / 282696559                             143 / 55890                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$HandleNullInputsForUDF                             0 / 272750370                                   0 / 23360                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$WidenSetOperationTypes                         37785098 / 265668421                            106 / 55828                                    
org.apache.spark.sql.catalyst.analysis.ResolveTableValuedFunctions                                 34799196 / 242401052                            133 / 55962                                    
org.apache.spark.sql.execution.datasources.PreprocessTableInsertion                                148267362 / 238636650                           867 / 23360                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolvePivot                                       39360628 / 236491626                            21 / 55897                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNaturalAndUsingJoin                         58160283 / 235305029                            312 / 55852                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveEncodersInUDF                               0 / 233468772                                   0 / 23360                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAggAliasInGroupBy                           2389373 / 224977816                             15 / 55890                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveSubqueryColumnAliases                       22558755 / 209962043                            600 / 55855                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveGenerate                                    1631666 / 209241482                             3 / 55890                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNamespace                                   4186531 / 198354590                             25 / 55962                                     
org.apache.spark.sql.execution.datasources.ResolveSQLOnFile                                        0 / 190218406                                   0 / 55819                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveOutputRelation                              0 / 186374370                                   0 / 55852                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveInsertInto                                  0 / 178168458                                   0 / 55962                                      
org.apache.spark.sql.execution.datasources.FallBackFileSourceV2                                    0 / 173313900                                   0 / 55819                                      
org.apache.spark.sql.execution.datasources.PreprocessTableCreation                                 299554 / 169789238                              1 / 23361                                      
org.apache.spark.sql.catalyst.analysis.ResolveHints$ResolveCoalesceHints                           165666 / 152712443                              1 / 23527                                      
org.apache.spark.sql.execution.datasources.DataSourceAnalysis                                      55400357 / 149413112                            1067 / 23360                                   
org.apache.spark.sql.catalyst.analysis.SubstituteUnresolvedOrdinals                                13111187 / 121999609                            152 / 23832                                    
org.apache.spark.sql.catalyst.optimizer.CombineConcats                                             1507011 / 116646272                             7 / 28014                                      
org.apache.spark.sql.catalyst.analysis.UpdateOuterReferences                                       5522220 / 114616120                             9 / 23360                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$WindowsSubstitution                                13189279 / 106731301                            78 / 23832                                     
org.apache.spark.sql.catalyst.analysis.EliminateUnions                                             0 / 94615379                                    0 / 23832                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAlterTableChanges                           0 / 93177285                                    0 / 23360                                      
org.apache.spark.sql.catalyst.optimizer.CheckCartesianProducts                                     0 / 83183894                                    0 / 20600                                      
org.apache.spark.sql.catalyst.analysis.ResolveHints$RemoveAllHints                                 0 / 82561223                                    0 / 23360                                      
org.apache.spark.sql.catalyst.optimizer.EliminateDistinct                                          0 / 81667919                                    0 / 10469                                      
org.apache.spark.sql.execution.OptimizeMetadataOnlyQuery                                           0 / 56634168                                    0 / 10300                                      
org.apache.spark.sql.catalyst.optimizer.CostBasedJoinReorder                                       0 / 54457325                                    0 / 10300                                      
     
07:00:56.667 WARN org.apache.spark.sql.hive.thriftserver.ThriftServerQueryTestSuite: 
=== Metrics of Whole-stage Codegen ===
Total code generation time: 34.767007009 seconds
Total compile time: 59.474663827 seconds
         
07:00:56.902 WARN org.apache.spark.sql.hive.thriftserver.ThriftServerQueryTestSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.hive.thriftserver.ThriftServerQueryTestSuite, thread names: rpc-boss-3-1, subquery-43, derby.rawStoreDaemon, com.google.common.base.internal.Finalizer, subquery-42, Timer-3, subquery-41, BoneCP-keep-alive-scheduler, shuffle-boss-6-1, BoneCP-pool-watch-thread =====

[info] - SPARK-28840 test --jars command (23 seconds, 262 milliseconds)
[info] - SPARK-28840 test --jars and hive.aux.jars.path command (24 seconds, 674 milliseconds)
[info] - SPARK-29022 Commands using SerDe provided in ADD JAR sql (26 seconds, 842 milliseconds)
[info] - SPARK-26321 Should not split semicolon within quoted string literals (20 seconds, 646 milliseconds)
[info] - Pad Decimal numbers with trailing zeros to the scale of the column (21 seconds, 561 milliseconds)
[info] - SPARK-30049 Should not complain for quotes in commented lines (20 seconds, 589 milliseconds)
[info] - SPARK-31102 spark-sql fails to parse when contains comment (20 seconds, 354 milliseconds)
[info] - SPARK-30049 Should not complain for quotes in commented with multi-lines (21 seconds, 471 milliseconds)
[info] - SPARK-31595 Should allow unescaped quote mark in quoted string (39 seconds, 339 milliseconds)
[info] - AnalysisException with root cause will be printStacktrace (30 seconds, 471 milliseconds)
[info] - SPARK-30808: use Java 8 time API in Thrift SQL CLI by default (18 seconds, 396 milliseconds)
[info] HiveSessionImplSuite:
[info] org.apache.spark.sql.hive.thriftserver.HiveSessionImplSuite *** ABORTED *** (675 milliseconds)
[info]   org.mockito.exceptions.base.MockitoException: ClassCastException occurred while creating the mockito mock :
[info]   class to mock : 'org.apache.hive.service.cli.session.SessionManager', loaded by classloader : 'sun.misc.Launcher$AppClassLoader@2d6d8735'
[info]   created class : 'org.mockito.codegen.SessionManager$MockitoMock$1350255146', loaded by classloader : 'net.bytebuddy.dynamic.loading.MultipleParentClassLoader@78ded1c6'
[info]   proxy instance class : 'org.mockito.codegen.SessionManager$MockitoMock$1350255146', loaded by classloader : 'net.bytebuddy.dynamic.loading.MultipleParentClassLoader@78ded1c6'
[info]   instance creation by : ObjenesisInstantiator
[info] 
[info] You might experience classloading issues, please ask the mockito mailing-list.
[info]   at org.apache.spark.sql.hive.thriftserver.HiveSessionImplSuite.beforeAll(HiveSessionImplSuite.scala:44)
[info]   at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:212)
[info]   at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
[info]   at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
[info]   at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:59)
[info]   at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:317)
[info]   at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:510)
[info]   at sbt.ForkMain$Run$2.call(ForkMain.java:296)
[info]   at sbt.ForkMain$Run$2.call(ForkMain.java:286)
[info]   at java.util.concurrent.FutureTask.run(FutureTask.java:266)
[info]   at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[info]   at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[info]   at java.lang.Thread.run(Thread.java:748)
[info]   Cause: java.lang.ClassCastException: org.mockito.codegen.SessionManager$MockitoMock$1350255146 cannot be cast to org.mockito.internal.creation.bytebuddy.MockAccess
[info]   at org.mockito.internal.creation.bytebuddy.SubclassByteBuddyMockMaker.createMock(SubclassByteBuddyMockMaker.java:48)
[info]   at org.mockito.internal.creation.bytebuddy.ByteBuddyMockMaker.createMock(ByteBuddyMockMaker.java:25)
[info]   at org.mockito.internal.util.MockUtil.createMock(MockUtil.java:35)
[info]   at org.mockito.internal.MockitoCore.mock(MockitoCore.java:63)
[info]   at org.mockito.Mockito.mock(Mockito.java:1908)
[info]   at org.mockito.Mockito.mock(Mockito.java:1817)
[info]   at org.apache.spark.sql.hive.thriftserver.HiveSessionImplSuite.beforeAll(HiveSessionImplSuite.scala:44)
[info]   at org.scalatest.BeforeAndAfterAll.liftedTree1$1(BeforeAndAfterAll.scala:212)
[info]   at org.scalatest.BeforeAndAfterAll.run(BeforeAndAfterAll.scala:210)
[info]   at org.scalatest.BeforeAndAfterAll.run$(BeforeAndAfterAll.scala:208)
[info]   at org.apache.spark.SparkFunSuite.run(SparkFunSuite.scala:59)
[info]   at org.scalatest.tools.Framework.org$scalatest$tools$Framework$$runSuite(Framework.scala:317)
[info]   at org.scalatest.tools.Framework$ScalaTestTask.execute(Framework.scala:510)
[info]   at sbt.ForkMain$Run$2.call(ForkMain.java:296)
[info]   at sbt.ForkMain$Run$2.call(ForkMain.java:286)
[info]   at java.util.concurrent.FutureTask.run(FutureTask.java:266)
[info]   at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
[info]   at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[info]   at java.lang.Thread.run(Thread.java:748)
[info] HiveCliSessionStateSuite:
07:05:20.930 WARN org.apache.hadoop.hive.metastore.ObjectStore: Version information not found in metastore. hive.metastore.schema.verification is not enabled so recording the schema version 2.3.0
07:05:20.930 WARN org.apache.hadoop.hive.metastore.ObjectStore: setMetaStoreSchemaVersion called but recording version is disabled: version = 2.3.0, comment = Set by MetaStore jenkins@192.168.10.24
07:05:20.938 WARN org.apache.hadoop.hive.metastore.ObjectStore: Failed to get database default, returning NoSuchObjectException
[info] - CliSessionState will be reused (5 seconds, 95 milliseconds)
07:05:23.033 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.jdbc.timeout does not exist
07:05:23.033 WARN org.apache.hadoop.hive.conf.HiveConf: HiveConf of name hive.stats.retries.wait does not exist
07:05:27.986 WARN org.apache.hadoop.hive.metastore.ObjectStore: Version information not found in metastore. hive.metastore.schema.verification is not enabled so recording the schema version 2.3.0
07:05:27.986 WARN org.apache.hadoop.hive.metastore.ObjectStore: setMetaStoreSchemaVersion called but recording version is disabled: version = 2.3.0, comment = Set by MetaStore jenkins@192.168.10.24
07:05:28.006 WARN org.apache.hadoop.hive.metastore.ObjectStore: Failed to get database default, returning NoSuchObjectException
[info] - SessionState will not be reused (7 seconds, 206 milliseconds)
07:05:28.620 WARN org.apache.spark.sql.hive.thriftserver.HiveCliSessionStateSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.hive.thriftserver.HiveCliSessionStateSuite, thread names: derby.rawStoreDaemon, com.google.common.base.internal.Finalizer, Timer-8, BoneCP-keep-alive-scheduler, BoneCP-pool-watch-thread, Timer-9 =====

[info] JdbcConnectionUriSuite:
[info] - SPARK-17819 Support default database in connection URIs (2 seconds, 171 milliseconds)
[info] ScalaTest
[info] Run completed in 24 minutes, 49 seconds.
[info] Total number of tests run: 447
[info] Suites: completed 16, aborted 3
[info] Tests: succeeded 447, failed 0, canceled 0, ignored 17, pending 0
[info] *** 3 SUITES ABORTED ***
[error] Error: Total 450, Failed 0, Errors 3, Passed 447, Ignored 17
[error] Error during tests:
[error] 	org.apache.spark.sql.hive.thriftserver.HiveSessionImplSuite
[error] 	org.apache.spark.sql.hive.thriftserver.ThriftServerWithSparkContextInHttpSuite
[error] 	org.apache.spark.sql.hive.thriftserver.ThriftServerWithSparkContextInBinarySuite
[error] (hive-thriftserver/test:test) sbt.TestsFailedException: Tests unsuccessful
[error] Total time: 1499 s, completed Jul 1, 2020 7:06:25 AM
[error] running /home/jenkins/workspace/NewSparkPullRequestBuilder/build/sbt -Phadoop-3.2 -Phive-2.3 -Phive -Phive-thriftserver -Dtest.exclude.tags=org.apache.spark.tags.ExtendedHiveTest,org.apache.spark.tags.ExtendedYarnTest sql/test sql-kafka-0-10/test hive/test catalyst/test mllib/test repl/test avro/test examples/test hive-thriftserver/test ; received return code 1
Attempting to post to Github...
 > Post successful.
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Finished: FAILURE