FailedConsole Output

Skipping 35,779 KB.. Full Log
t.accept(SqlBaseParser.java:6186)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitStatementDefault(SqlBaseBaseVisitor.java:69)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$StatementDefaultContext.accept(SqlBaseParser.java:1904)
	at org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visit(AbstractParseTreeVisitor.java:18)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitSingleStatement$1(AstBuilder.scala:77)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitSingleStatement(AstBuilder.scala:77)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.$anonfun$parsePlan$1(ParseDriver.scala:82)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parse(ParseDriver.scala:116)
	at org.apache.spark.sql.execution.SparkSqlParser.parse(SparkSqlParser.scala:49)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parsePlan(ParseDriver.scala:81)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$2(SparkSession.scala:604)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:604)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:601)
	at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:650)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:286)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
22:35:50.057 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: org.apache.spark.sql.catalyst.parser.ParseException: 
Interval string does not match year-month format of 'y-m': -	2-2	(line 1, pos 16)

== SQL ==
select interval '-\t2-2\t' year to month
----------------^^^

	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.sql.catalyst.parser.ParseException: 
Interval string does not match year-month format of 'y-m': -	2-2	(line 1, pos 16)

== SQL ==
select interval '-\t2-2\t' year to month
----------------^^^

	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromYearMonthString(IntervalUtils.scala:113)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitUnitToUnitInterval$1(AstBuilder.scala:2146)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitUnitToUnitInterval(AstBuilder.scala:2137)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitInterval$1(AstBuilder.scala:2099)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitInterval(AstBuilder.scala:2083)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitInterval(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$IntervalContext.accept(SqlBaseParser.java:16469)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitIntervalLiteral(SqlBaseBaseVisitor.java:1497)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$IntervalLiteralContext.accept(SqlBaseParser.java:16090)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitConstantDefault(SqlBaseBaseVisitor.java:1427)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$ConstantDefaultContext.accept(SqlBaseParser.java:15100)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitValueExpressionDefault(SqlBaseBaseVisitor.java:1308)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$ValueExpressionDefaultContext.accept(SqlBaseParser.java:14496)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.expression(AstBuilder.scala:1220)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitPredicated$1(AstBuilder.scala:1356)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitPredicated(AstBuilder.scala:1355)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitPredicated(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$PredicatedContext.accept(SqlBaseParser.java:13943)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitExpression(SqlBaseBaseVisitor.java:1266)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$ExpressionContext.accept(SqlBaseParser.java:13868)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.expression(AstBuilder.scala:1220)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitNamedExpression$1(AstBuilder.scala:1242)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitNamedExpression(AstBuilder.scala:1241)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitNamedExpression(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$NamedExpressionContext.accept(SqlBaseParser.java:13472)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitNamedExpressionSeq$2(AstBuilder.scala:610)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at scala.collection.immutable.List.map(List.scala:298)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitNamedExpressionSeq(AstBuilder.scala:610)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$withSelectQuerySpecification$1(AstBuilder.scala:702)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.withSelectQuerySpecification(AstBuilder.scala:695)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitRegularQuerySpecification$1(AstBuilder.scala:602)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitRegularQuerySpecification(AstBuilder.scala:590)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitRegularQuerySpecification(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$RegularQuerySpecificationContext.accept(SqlBaseParser.java:9405)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitQueryPrimaryDefault(SqlBaseBaseVisitor.java:804)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$QueryPrimaryDefaultContext.accept(SqlBaseParser.java:8910)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitQueryTermDefault(SqlBaseBaseVisitor.java:790)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$QueryTermDefaultContext.accept(SqlBaseParser.java:8679)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.plan(AstBuilder.scala:114)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitQuery$1(AstBuilder.scala:120)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitQuery(AstBuilder.scala:119)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitQuery(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$QueryContext.accept(SqlBaseParser.java:6186)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitStatementDefault(SqlBaseBaseVisitor.java:69)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$StatementDefaultContext.accept(SqlBaseParser.java:1904)
	at org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visit(AbstractParseTreeVisitor.java:18)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitSingleStatement$1(AstBuilder.scala:77)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitSingleStatement(AstBuilder.scala:77)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.$anonfun$parsePlan$1(ParseDriver.scala:82)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parse(ParseDriver.scala:116)
	at org.apache.spark.sql.execution.SparkSqlParser.parse(SparkSqlParser.scala:49)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parsePlan(ParseDriver.scala:81)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$2(SparkSession.scala:604)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:604)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:601)
	at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:650)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:286)
	... 16 more
22:35:50.105 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with 7fa34594-fc72-4df8-88c9-2e75ed39b3a1, currentState RUNNING, 
org.apache.spark.sql.catalyst.parser.ParseException: 
requirement failed: Interval string must match day-time format of '^(?<sign>[+|-])?(?<day>\d+) (?<hour>\d{1,2}):(?<minute>\d{1,2}):(?<second>(\d{1,2})(\.(\d{1,9}))?)$': 
-	10	 12:34:46.789	, set spark.sql.legacy.fromDayTimeString.enabled to true to restore the behavior before Spark 3.0.(line 1, pos 16)

== SQL ==
select interval '\n-\t10\t 12:34:46.789\t' day to second
----------------^^^

	at scala.Predef$.require(Predef.scala:281)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.parseDayTime(IntervalUtils.scala:270)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDayTimeString(IntervalUtils.scala:139)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitUnitToUnitInterval$1(AstBuilder.scala:2152)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitUnitToUnitInterval(AstBuilder.scala:2137)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitInterval$1(AstBuilder.scala:2099)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitInterval(AstBuilder.scala:2083)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitInterval(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$IntervalContext.accept(SqlBaseParser.java:16469)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitIntervalLiteral(SqlBaseBaseVisitor.java:1497)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$IntervalLiteralContext.accept(SqlBaseParser.java:16090)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitConstantDefault(SqlBaseBaseVisitor.java:1427)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$ConstantDefaultContext.accept(SqlBaseParser.java:15100)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitValueExpressionDefault(SqlBaseBaseVisitor.java:1308)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$ValueExpressionDefaultContext.accept(SqlBaseParser.java:14496)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.expression(AstBuilder.scala:1220)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitPredicated$1(AstBuilder.scala:1356)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitPredicated(AstBuilder.scala:1355)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitPredicated(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$PredicatedContext.accept(SqlBaseParser.java:13943)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitExpression(SqlBaseBaseVisitor.java:1266)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$ExpressionContext.accept(SqlBaseParser.java:13868)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.expression(AstBuilder.scala:1220)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitNamedExpression$1(AstBuilder.scala:1242)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitNamedExpression(AstBuilder.scala:1241)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitNamedExpression(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$NamedExpressionContext.accept(SqlBaseParser.java:13472)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitNamedExpressionSeq$2(AstBuilder.scala:610)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at scala.collection.immutable.List.map(List.scala:298)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitNamedExpressionSeq(AstBuilder.scala:610)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$withSelectQuerySpecification$1(AstBuilder.scala:702)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.withSelectQuerySpecification(AstBuilder.scala:695)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitRegularQuerySpecification$1(AstBuilder.scala:602)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitRegularQuerySpecification(AstBuilder.scala:590)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitRegularQuerySpecification(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$RegularQuerySpecificationContext.accept(SqlBaseParser.java:9405)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitQueryPrimaryDefault(SqlBaseBaseVisitor.java:804)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$QueryPrimaryDefaultContext.accept(SqlBaseParser.java:8910)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitQueryTermDefault(SqlBaseBaseVisitor.java:790)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$QueryTermDefaultContext.accept(SqlBaseParser.java:8679)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.plan(AstBuilder.scala:114)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitQuery$1(AstBuilder.scala:120)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitQuery(AstBuilder.scala:119)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitQuery(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$QueryContext.accept(SqlBaseParser.java:6186)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitStatementDefault(SqlBaseBaseVisitor.java:69)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$StatementDefaultContext.accept(SqlBaseParser.java:1904)
	at org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visit(AbstractParseTreeVisitor.java:18)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitSingleStatement$1(AstBuilder.scala:77)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitSingleStatement(AstBuilder.scala:77)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.$anonfun$parsePlan$1(ParseDriver.scala:82)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parse(ParseDriver.scala:116)
	at org.apache.spark.sql.execution.SparkSqlParser.parse(SparkSqlParser.scala:49)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parsePlan(ParseDriver.scala:81)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$2(SparkSession.scala:604)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:604)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:601)
	at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:650)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:286)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
22:35:50.106 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: org.apache.spark.sql.catalyst.parser.ParseException: 
requirement failed: Interval string must match day-time format of '^(?<sign>[+|-])?(?<day>\d+) (?<hour>\d{1,2}):(?<minute>\d{1,2}):(?<second>(\d{1,2})(\.(\d{1,9}))?)$': 
-	10	 12:34:46.789	, set spark.sql.legacy.fromDayTimeString.enabled to true to restore the behavior before Spark 3.0.(line 1, pos 16)

== SQL ==
select interval '\n-\t10\t 12:34:46.789\t' day to second
----------------^^^

	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.sql.catalyst.parser.ParseException: 
requirement failed: Interval string must match day-time format of '^(?<sign>[+|-])?(?<day>\d+) (?<hour>\d{1,2}):(?<minute>\d{1,2}):(?<second>(\d{1,2})(\.(\d{1,9}))?)$': 
-	10	 12:34:46.789	, set spark.sql.legacy.fromDayTimeString.enabled to true to restore the behavior before Spark 3.0.(line 1, pos 16)

== SQL ==
select interval '\n-\t10\t 12:34:46.789\t' day to second
----------------^^^

	at scala.Predef$.require(Predef.scala:281)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.parseDayTime(IntervalUtils.scala:270)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDayTimeString(IntervalUtils.scala:139)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitUnitToUnitInterval$1(AstBuilder.scala:2152)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitUnitToUnitInterval(AstBuilder.scala:2137)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitInterval$1(AstBuilder.scala:2099)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitInterval(AstBuilder.scala:2083)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitInterval(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$IntervalContext.accept(SqlBaseParser.java:16469)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitIntervalLiteral(SqlBaseBaseVisitor.java:1497)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$IntervalLiteralContext.accept(SqlBaseParser.java:16090)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitConstantDefault(SqlBaseBaseVisitor.java:1427)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$ConstantDefaultContext.accept(SqlBaseParser.java:15100)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitValueExpressionDefault(SqlBaseBaseVisitor.java:1308)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$ValueExpressionDefaultContext.accept(SqlBaseParser.java:14496)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.expression(AstBuilder.scala:1220)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitPredicated$1(AstBuilder.scala:1356)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitPredicated(AstBuilder.scala:1355)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitPredicated(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$PredicatedContext.accept(SqlBaseParser.java:13943)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitExpression(SqlBaseBaseVisitor.java:1266)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$ExpressionContext.accept(SqlBaseParser.java:13868)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.expression(AstBuilder.scala:1220)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitNamedExpression$1(AstBuilder.scala:1242)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitNamedExpression(AstBuilder.scala:1241)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitNamedExpression(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$NamedExpressionContext.accept(SqlBaseParser.java:13472)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitNamedExpressionSeq$2(AstBuilder.scala:610)
	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:238)
	at scala.collection.immutable.List.foreach(List.scala:392)
	at scala.collection.TraversableLike.map(TraversableLike.scala:238)
	at scala.collection.TraversableLike.map$(TraversableLike.scala:231)
	at scala.collection.immutable.List.map(List.scala:298)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitNamedExpressionSeq(AstBuilder.scala:610)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$withSelectQuerySpecification$1(AstBuilder.scala:702)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.withSelectQuerySpecification(AstBuilder.scala:695)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitRegularQuerySpecification$1(AstBuilder.scala:602)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitRegularQuerySpecification(AstBuilder.scala:590)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitRegularQuerySpecification(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$RegularQuerySpecificationContext.accept(SqlBaseParser.java:9405)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitQueryPrimaryDefault(SqlBaseBaseVisitor.java:804)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$QueryPrimaryDefaultContext.accept(SqlBaseParser.java:8910)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitQueryTermDefault(SqlBaseBaseVisitor.java:790)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$QueryTermDefaultContext.accept(SqlBaseParser.java:8679)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.typedVisit(AstBuilder.scala:60)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.plan(AstBuilder.scala:114)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitQuery$1(AstBuilder.scala:120)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitQuery(AstBuilder.scala:119)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitQuery(AstBuilder.scala:54)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$QueryContext.accept(SqlBaseParser.java:6186)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitChildren(AstBuilder.scala:70)
	at org.apache.spark.sql.catalyst.parser.SqlBaseBaseVisitor.visitStatementDefault(SqlBaseBaseVisitor.java:69)
	at org.apache.spark.sql.catalyst.parser.SqlBaseParser$StatementDefaultContext.accept(SqlBaseParser.java:1904)
	at org.antlr.v4.runtime.tree.AbstractParseTreeVisitor.visit(AbstractParseTreeVisitor.java:18)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.$anonfun$visitSingleStatement$1(AstBuilder.scala:77)
	at org.apache.spark.sql.catalyst.parser.ParserUtils$.withOrigin(ParserUtils.scala:108)
	at org.apache.spark.sql.catalyst.parser.AstBuilder.visitSingleStatement(AstBuilder.scala:77)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.$anonfun$parsePlan$1(ParseDriver.scala:82)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parse(ParseDriver.scala:116)
	at org.apache.spark.sql.execution.SparkSqlParser.parse(SparkSqlParser.scala:49)
	at org.apache.spark.sql.catalyst.parser.AbstractSqlParser.parsePlan(ParseDriver.scala:81)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$2(SparkSession.scala:604)
	at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111)
	at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:604)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:601)
	at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:650)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:286)
	... 16 more
22:35:50.153 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13676.0 (TID 26078)
java.lang.ArithmeticException: integer overflow
	at java.lang.Math.negateExact(Math.java:977)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.negateExact(IntervalUtils.scala:412)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.negateExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:30)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
22:35:50.155 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13676.0 (TID 26078, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.negateExact(Math.java:977)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.negateExact(IntervalUtils.scala:412)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.negateExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:30)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

22:35:50.155 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13676.0 failed 1 times; aborting job
22:35:50.158 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with 1516e2fc-5da1-420b-9e13-33a7f8931af3, currentState RUNNING, 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 13676.0 failed 1 times, most recent failure: Lost task 0.0 in stage 13676.0 (TID 26078, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.negateExact(Math.java:977)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.negateExact(IntervalUtils.scala:412)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.negateExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:30)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2173)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:385)
	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3655)
	at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:2940)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3646)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3644)
	at org.apache.spark.sql.Dataset.collect(Dataset.scala:2940)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:295)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.negateExact(Math.java:977)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.negateExact(IntervalUtils.scala:412)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.negateExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:30)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
22:35:50.159 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: java.lang.ArithmeticException: integer overflow
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.negateExact(Math.java:977)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.negateExact(IntervalUtils.scala:412)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.negateExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:30)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
22:35:50.202 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13677.0 (TID 26079)
java.lang.ArithmeticException: integer overflow
	at java.lang.Math.subtractExact(Math.java:829)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.subtractExact(IntervalUtils.scala:453)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.subtractExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
22:35:50.203 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13677.0 (TID 26079, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.subtractExact(Math.java:829)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.subtractExact(IntervalUtils.scala:453)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.subtractExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

22:35:50.203 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13677.0 failed 1 times; aborting job
22:35:50.206 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with 1afbe5bd-ab0f-4f96-8e01-0c461c30c4ca, currentState RUNNING, 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 13677.0 failed 1 times, most recent failure: Lost task 0.0 in stage 13677.0 (TID 26079, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.subtractExact(Math.java:829)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.subtractExact(IntervalUtils.scala:453)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.subtractExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2173)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:385)
	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3655)
	at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:2940)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3646)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3644)
	at org.apache.spark.sql.Dataset.collect(Dataset.scala:2940)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:295)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.subtractExact(Math.java:829)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.subtractExact(IntervalUtils.scala:453)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.subtractExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
22:35:50.206 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: java.lang.ArithmeticException: integer overflow
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.subtractExact(Math.java:829)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.subtractExact(IntervalUtils.scala:453)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.subtractExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
22:35:50.248 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13678.0 (TID 26080)
java.lang.ArithmeticException: integer overflow
	at java.lang.Math.addExact(Math.java:790)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.addExact(IntervalUtils.scala:431)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.addExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
22:35:50.250 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13678.0 (TID 26080, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.addExact(Math.java:790)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.addExact(IntervalUtils.scala:431)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.addExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

22:35:50.250 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13678.0 failed 1 times; aborting job
22:35:50.253 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with 532ec295-34df-4547-ba54-c62d14030a0a, currentState RUNNING, 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 13678.0 failed 1 times, most recent failure: Lost task 0.0 in stage 13678.0 (TID 26080, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.addExact(Math.java:790)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.addExact(IntervalUtils.scala:431)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.addExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2173)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:385)
	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3655)
	at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:2940)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3646)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3644)
	at org.apache.spark.sql.Dataset.collect(Dataset.scala:2940)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:295)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.addExact(Math.java:790)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.addExact(IntervalUtils.scala:431)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.addExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
22:35:50.253 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: java.lang.ArithmeticException: integer overflow
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.addExact(Math.java:790)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.addExact(IntervalUtils.scala:431)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.addExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:31)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
22:35:50.302 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13679.0 (TID 26081)
java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.multiplyExact(IntervalUtils.scala:482)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.multiplyExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
22:35:50.304 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13679.0 (TID 26081, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.multiplyExact(IntervalUtils.scala:482)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.multiplyExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

22:35:50.304 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13679.0 failed 1 times; aborting job
22:35:50.308 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with c17f6aff-5437-4c13-908a-69fab855de58, currentState RUNNING, 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 13679.0 failed 1 times, most recent failure: Lost task 0.0 in stage 13679.0 (TID 26081, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.multiplyExact(IntervalUtils.scala:482)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.multiplyExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2173)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:385)
	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3655)
	at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:2940)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3646)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3644)
	at org.apache.spark.sql.Dataset.collect(Dataset.scala:2940)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:295)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.multiplyExact(IntervalUtils.scala:482)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.multiplyExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
22:35:50.308 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: java.lang.ArithmeticException: integer overflow
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.multiplyExact(IntervalUtils.scala:482)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.multiplyExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
22:35:50.353 ERROR org.apache.spark.executor.Executor: Exception in task 0.0 in stage 13680.0 (TID 26082)
java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.divideExact(IntervalUtils.scala:500)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.divideExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
22:35:50.356 WARN org.apache.spark.scheduler.TaskSetManager: Lost task 0.0 in stage 13680.0 (TID 26082, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.divideExact(IntervalUtils.scala:500)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.divideExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

22:35:50.356 ERROR org.apache.spark.scheduler.TaskSetManager: Task 0 in stage 13680.0 failed 1 times; aborting job
22:35:50.358 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error executing query with d989714c-4a5b-4f92-9631-d3c17fa07aaa, currentState RUNNING, 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 13680.0 failed 1 times, most recent failure: Lost task 0.0 in stage 13680.0 (TID 26082, amp-jenkins-worker-04.amp, executor driver): java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.divideExact(IntervalUtils.scala:500)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.divideExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

Driver stacktrace:
	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
	at scala.Option.foreach(Option.scala:407)
	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2173)
	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1030)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
	at org.apache.spark.rdd.RDD.withScope(RDD.scala:414)
	at org.apache.spark.rdd.RDD.collect(RDD.scala:1029)
	at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:385)
	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3655)
	at org.apache.spark.sql.Dataset.$anonfun$collect$1(Dataset.scala:2940)
	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3646)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3644)
	at org.apache.spark.sql.Dataset.collect(Dataset.scala:2940)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:295)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.divideExact(IntervalUtils.scala:500)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.divideExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
22:35:50.358 ERROR org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation: Error running hive query: 
org.apache.hive.service.cli.HiveSQLException: Error running query: java.lang.ArithmeticException: integer overflow
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:327)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.$anonfun$run$1(SparkExecuteStatementOperation.scala:222)
	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
	at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:222)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:217)
	at java.security.AccessController.doPrivileged(Native Method)
	at javax.security.auth.Subject.doAs(Subject.java:422)
	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)
	at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:233)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.ArithmeticException: integer overflow
	at java.lang.Math.toIntExact(Math.java:1011)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.fromDoubles(IntervalUtils.scala:384)
	at org.apache.spark.sql.catalyst.util.IntervalUtils$.divideExact(IntervalUtils.scala:500)
	at org.apache.spark.sql.catalyst.util.IntervalUtils.divideExact(IntervalUtils.scala)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(generated.java:33)
	at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
	at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
	at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
	at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:127)
	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:464)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:467)
	... 3 more
22:35:50.533 ERROR org.apache.thrift.server.TThreadPoolServer: Thrift error occurred during processing of message.
org.apache.thrift.transport.TTransportException
	at org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:132)
	at org.apache.thrift.transport.TTransport.readAll(TTransport.java:86)
	at org.apache.thrift.transport.TSaslTransport.readLength(TSaslTransport.java:374)
	at org.apache.thrift.transport.TSaslTransport.readFrame(TSaslTransport.java:451)
	at org.apache.thrift.transport.TSaslTransport.read(TSaslTransport.java:433)
	at org.apache.thrift.transport.TSaslServerTransport.read(TSaslServerTransport.java:43)
	at org.apache.thrift.transport.TTransport.readAll(TTransport.java:86)
	at org.apache.thrift.protocol.TBinaryProtocol.readAll(TBinaryProtocol.java:425)
	at org.apache.thrift.protocol.TBinaryProtocol.readI32(TBinaryProtocol.java:321)
	at org.apache.thrift.protocol.TBinaryProtocol.readMessageBegin(TBinaryProtocol.java:225)
	at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:27)
	at org.apache.hive.service.auth.TSetIpAddressProcessor.process(TSetIpAddressProcessor.java:53)
	at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:310)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
22:35:50.651 ERROR org.apache.thrift.server.TThreadPoolServer: Thrift error occurred during processing of message.
org.apache.thrift.transport.TTransportException
	at org.apache.thrift.transport.TIOStreamTransport.read(TIOStreamTransport.java:132)
	at org.apache.thrift.transport.TTransport.readAll(TTransport.java:86)
	at org.apache.thrift.transport.TSaslTransport.readLength(TSaslTransport.java:374)
	at org.apache.thrift.transport.TSaslTransport.readFrame(TSaslTransport.java:451)
	at org.apache.thrift.transport.TSaslTransport.read(TSaslTransport.java:433)
	at org.apache.thrift.transport.TSaslServerTransport.read(TSaslServerTransport.java:43)
	at org.apache.thrift.transport.TTransport.readAll(TTransport.java:86)
	at org.apache.thrift.protocol.TBinaryProtocol.readAll(TBinaryProtocol.java:425)
	at org.apache.thrift.protocol.TBinaryProtocol.readI32(TBinaryProtocol.java:321)
	at org.apache.thrift.protocol.TBinaryProtocol.readMessageBegin(TBinaryProtocol.java:225)
	at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:27)
	at org.apache.hive.service.auth.TSetIpAddressProcessor.process(TSetIpAddressProcessor.java:53)
	at org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:310)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
22:35:50.792 WARN org.apache.spark.sql.hive.thriftserver.ThriftServerQueryTestSuite: 
=== Metrics of Analyzer/Optimizer Rules ===
Total number of runs: 5430697
Total time: 168.233821777 seconds

Rule                                                                                               Effective Time / Total Time                     Effective Runs / Total Runs                    

org.apache.spark.sql.catalyst.optimizer.Optimizer$OptimizeSubqueries                               16640464931 / 20747480906                       604 / 20769                                    
org.apache.spark.sql.execution.datasources.FindDataSourceTable                                     7832862740 / 8017142403                         2486 / 55819                                   
org.apache.spark.sql.catalyst.optimizer.ColumnPruning                                              1369948738 / 6709281887                         4168 / 48783                                   
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveSubquery                                    3710983153 / 4404329382                         687 / 55857                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveReferences                                  2583980167 / 4179560711                         6900 / 55907                                   
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAggregateFunctions                          1257723016 / 2435763354                         253 / 55840                                    
org.apache.spark.sql.catalyst.optimizer.RemoveNoopOperators                                        152470102 / 2260379226                          1791 / 48614                                   
org.apache.spark.sql.catalyst.optimizer.PushDownPredicates                                         415756986 / 2135320219                          1893 / 38528                                   
org.apache.spark.sql.catalyst.optimizer.ConstantFolding                                            810372801 / 2069917628                          3305 / 28014                                   
org.apache.spark.sql.catalyst.optimizer.CollapseProject                                            635407096 / 1962565035                          5063 / 38483                                   
org.apache.spark.sql.catalyst.optimizer.PruneFilters                                               16068561 / 1946011046                           144 / 38314                                    
org.apache.spark.sql.catalyst.optimizer.SimplifyCasts                                              2204996 / 1761765531                            20 / 28014                                     
org.apache.spark.sql.catalyst.optimizer.RemoveDispensableExpressions                               1075205 / 1749775737                            6 / 28014                                      
org.apache.spark.sql.catalyst.optimizer.LikeSimplification                                         4173976 / 1698157002                            20 / 28014                                     
org.apache.spark.sql.catalyst.optimizer.NullPropagation                                            44488203 / 1694569284                           361 / 28183                                    
org.apache.spark.sql.catalyst.optimizer.EliminateResolvedHint                                      0 / 1675298331                                  0 / 10469                                      
org.apache.spark.sql.catalyst.optimizer.CombineUnions                                              5734867 / 1641588586                            35 / 38652                                     
org.apache.spark.sql.catalyst.optimizer.BooleanSimplification                                      9694533 / 1621553595                            61 / 28014                                     
org.apache.spark.sql.catalyst.optimizer.OptimizeIn                                                 598779 / 1606278110                             4 / 28183                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations                                   1217115804 / 1597262109                         8703 / 55962                                   
org.apache.spark.sql.catalyst.analysis.CTESubstitution                                             40291508 / 1583145108                           104 / 23832                                    
org.apache.spark.sql.catalyst.optimizer.ReorderAssociativeOperator                                 0 / 1571918328                                  0 / 28014                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$ImplicitTypeCasts                              398282742 / 1557381474                          1164 / 55828                                   
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveFunctions                                   724369101 / 1544211709                          3660 / 55857                                   
org.apache.spark.sql.catalyst.optimizer.SimplifyCaseConversionExpressions                          0 / 1476397422                                  0 / 28014                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ExtractGenerator                                   5437653 / 1469499110                            8 / 55890                                      
org.apache.spark.sql.catalyst.optimizer.SimplifyBinaryComparison                                   1810635 / 1450818272                            15 / 28014                                     
org.apache.spark.sql.catalyst.optimizer.SimplifyExtractValueOps                                    269217 / 1439846253                             3 / 28014                                      
org.apache.spark.sql.catalyst.optimizer.SimplifyConditionals                                       9373976 / 1439037028                            156 / 28014                                    
org.apache.spark.sql.catalyst.optimizer.ReplaceNullWithFalseInPredicate                            482699 / 1419082137                             9 / 28014                                      
org.apache.spark.sql.catalyst.optimizer.EliminateOuterJoin                                         104854162 / 1397096795                          174 / 28183                                    
org.apache.spark.sql.catalyst.analysis.ResolveSessionCatalog                                       525018360 / 1396075976                          1441 / 55815                                   
org.apache.spark.sql.catalyst.analysis.DecimalPrecision                                            256434050 / 1358560876                          1184 / 55828                                   
org.apache.spark.sql.catalyst.optimizer.CollapseRepartition                                        0 / 1354706182                                  0 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.PushDownLeftSemiAntiJoin                                   49120709 / 1339511543                           151 / 28183                                    
org.apache.spark.sql.catalyst.optimizer.ReorderJoin                                                14651389 / 1304387792                           30 / 28183                                     
org.apache.spark.sql.catalyst.optimizer.RewriteCorrelatedScalarSubquery                            4414719 / 1265820710                            5 / 28014                                      
org.apache.spark.sql.catalyst.optimizer.GetCurrentDatabaseAndCatalog                               199725 / 1265652247                             1 / 10469                                      
org.apache.spark.sql.catalyst.optimizer.PushProjectionThroughUnion                                 11739201 / 1246630952                           32 / 28183                                     
org.apache.spark.sql.catalyst.optimizer.LimitPushDown                                              0 / 1243876597                                  0 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.InferFiltersFromConstraints                                521530937 / 1230084984                          1184 / 10300                                   
org.apache.spark.sql.catalyst.optimizer.TransposeWindow                                            0 / 1223393013                                  0 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.PushLeftSemiLeftAntiThroughJoin                            232617 / 1220911088                             1 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.CombineFilters                                             0 / 1219286636                                  0 / 28183                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveDeserializer                                596512257 / 1202639656                          9029 / 55907                                   
org.apache.spark.sql.catalyst.optimizer.CollapseWindow                                             0 / 1184731550                                  0 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.ConstantPropagation                                        531538 / 1178729815                             3 / 28183                                      
org.apache.spark.sql.catalyst.analysis.ResolveTimeZone                                             662341392 / 1164787903                          7601 / 55828                                   
org.apache.spark.sql.catalyst.optimizer.CombineLimits                                              0 / 1148010465                                  0 / 28183                                      
org.apache.spark.sql.catalyst.optimizer.EliminateSerialization                                     0 / 1115221372                                  0 / 28014                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAliases                                     635058076 / 1097031666                          4833 / 55857                                   
org.apache.spark.sql.catalyst.optimizer.FoldablePropagation                                        47759161 / 1090607763                           163 / 28183                                    
org.apache.spark.sql.catalyst.analysis.TypeCoercion$PromoteStrings                                 155268649 / 1084599212                          757 / 55828                                    
org.apache.spark.sql.catalyst.analysis.TypeCoercion$InConversion                                   41671018 / 1021505179                           131 / 55828                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ExtractWindowExpressions                           188725146 / 1016418434                          424 / 55840                                    
org.apache.spark.sql.catalyst.analysis.TypeCoercion$FunctionArgumentConversion                     171619044 / 1009308304                          388 / 55828                                    
org.apache.spark.sql.catalyst.optimizer.ComputeCurrentTime                                         1999458 / 986943792                             7 / 10469                                      
org.apache.spark.sql.catalyst.optimizer.PropagateEmptyRelation                                     6326784 / 973778171                             59 / 20828                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$LookupFunctions                                    0 / 961985973                                   0 / 23520                                      
org.apache.spark.sql.catalyst.optimizer.ReplaceExpressions                                         50824997 / 877513434                            376 / 10469                                    
org.apache.spark.sql.catalyst.analysis.UpdateAttributeNullability                                  20648775 / 877413727                            114 / 23360                                    
org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases                                    704395745 / 873959942                           5807 / 10469                                   
org.apache.spark.sql.catalyst.analysis.ResolveCreateNamedStruct                                    7893275 / 813823863                             37 / 55907                                     
org.apache.spark.sql.catalyst.analysis.TimeWindowing                                               0 / 813227480                                   0 / 55840                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveBinaryArithmetic                            30900312 / 813154735                            182 / 55828                                    
org.apache.spark.sql.catalyst.optimizer.RewriteNonCorrelatedExists                                 8658013 / 808885276                             63 / 10469                                     
org.apache.spark.sql.catalyst.analysis.TypeCoercion$BooleanEquality                                5504855 / 803550550                             68 / 55828                                     
org.apache.spark.sql.catalyst.analysis.TypeCoercion$DateTimeOperations                             8636097 / 799571080                             64 / 55828                                     
org.apache.spark.sql.execution.datasources.SchemaPruning                                           12226152 / 798109996                            5 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$IfCoercion                                     17044412 / 777400425                            65 / 55828                                     
org.apache.spark.sql.catalyst.optimizer.PullupCorrelatedPredicates                                 143527189 / 745684178                           263 / 10469                                    
org.apache.spark.sql.catalyst.analysis.TypeCoercion$StringLiteralCoercion                          3038427 / 744526429                             6 / 55819                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$Division                                       18800408 / 730629605                            93 / 55828                                     
org.apache.spark.sql.catalyst.optimizer.NormalizeFloatingNumbers                                   15450578 / 723651178                            39 / 10300                                     
org.apache.spark.sql.catalyst.analysis.TypeCoercion$IntegralDivision                               1038164 / 717119563                             4 / 55828                                      
org.apache.spark.sql.catalyst.optimizer.RemoveRedundantAliases                                     11004656 / 705621698                            97 / 28014                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRandomSeed                                  822876 / 702594124                              2 / 55828                                      
org.apache.spark.sql.catalyst.analysis.ResolveHigherOrderFunctions                                 23096084 / 688528942                            121 / 55828                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveWindowOrder                                 7283203 / 687958588                             24 / 55852                                     
org.apache.spark.sql.execution.dynamicpruning.PartitionPruning                                     0 / 676999298                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$ConcatCoercion                                 5521148 / 674583544                             20 / 55828                                     
org.apache.spark.sql.catalyst.optimizer.ReplaceExceptWithFilter                                    15864611 / 663992177                            21 / 10741                                     
org.apache.spark.sql.execution.datasources.PruneFileSourcePartitions                               12410113 / 648145924                            2 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$EltCoercion                                    2222182 / 641859431                             3 / 55828                                      
org.apache.spark.sql.catalyst.optimizer.RewriteDistinctAggregates                                  22589041 / 627915283                            8 / 10469                                      
org.apache.spark.sql.catalyst.analysis.EliminateView                                               2534675 / 610414313                             4 / 10469                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveMissingReferences                           12880642 / 610269566                            32 / 55890                                     
org.apache.spark.sql.execution.datasources.v2.V2ScanRelationPushDown                               0 / 606641054                                   0 / 10300                                      
org.apache.spark.sql.catalyst.optimizer.RewriteExceptAll                                           7734861 / 598973919                             23 / 10741                                     
org.apache.spark.sql.catalyst.analysis.TypeCoercion$WindowFrameCoercion                            19365517 / 592501211                            81 / 55828                                     
org.apache.spark.sql.catalyst.expressions.codegen.package$ExpressionCanonicalizer$CleanExpressions 16141674 / 591556206                            4992 / 143389                                  
org.apache.spark.sql.catalyst.optimizer.ReplaceExceptWithAntiJoin                                  7739063 / 588671948                             29 / 10741                                     
org.apache.spark.sql.execution.dynamicpruning.CleanupDynamicPruningFilters                         0 / 588281626                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$CaseWhenCoercion                               19774483 / 583416367                            71 / 55828                                     
org.apache.spark.sql.catalyst.analysis.ResolveLambdaVariables                                      53059053 / 575237984                            123 / 55828                                    
org.apache.spark.sql.catalyst.optimizer.ReassignLambdaVariableID                                   0 / 573604998                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveWindowFrame                                 17379549 / 572376527                            170 / 55852                                    
org.apache.spark.sql.execution.python.ExtractPythonUDFs                                            0 / 571710635                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$MapZipWithCoercion                             13546489 / 567287999                            13 / 55828                                     
org.apache.spark.sql.catalyst.optimizer.ReplaceIntersectWithSemiJoin                               10937476 / 563146861                            51 / 10741                                     
org.apache.spark.sql.catalyst.optimizer.EliminateMapObjects                                        0 / 559164805                                   0 / 10300                                      
org.apache.spark.sql.catalyst.optimizer.RewriteIntersectAll                                        10582855 / 554687100                            16 / 10741                                     
org.apache.spark.sql.catalyst.optimizer.ReplaceDistinctWithAggregate                               38279425 / 549485947                            245 / 10741                                    
org.apache.spark.sql.catalyst.analysis.ResolveInlineTables                                         314094786 / 547189084                           1644 / 55832                                   
org.apache.spark.sql.catalyst.optimizer.ReplaceDeduplicateWithAggregate                            0 / 540628526                                   0 / 10469                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$StackCoercion                                  0 / 534664395                                   0 / 55828                                      
org.apache.spark.sql.catalyst.optimizer.RemoveRepetitionFromGroupExpressions                       2542958 / 523453957                             27 / 10518                                     
org.apache.spark.sql.catalyst.optimizer.RemoveLiteralFromGroupExpressions                          1715935 / 523452350                             22 / 10518                                     
org.apache.spark.sql.catalyst.optimizer.OptimizeLimitZero                                          596848 / 519364595                              3 / 10469                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$GlobalAggregates                                   14248810 / 515312846                            450 / 55840                                    
org.apache.spark.sql.catalyst.optimizer.PushCNFPredicateThroughJoin                                7565040 / 505980140                             38 / 10300                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveGroupingAnalytics                           127019945 / 504624160                           89 / 55903                                     
org.apache.spark.sql.catalyst.optimizer.RewritePredicateSubquery                                   137020359 / 500517185                           547 / 10300                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNewInstance                                 0 / 468815501                                   0 / 55907                                      
org.apache.spark.sql.catalyst.optimizer.DecimalAggregates                                          3302961 / 465033552                             6 / 10306                                      
org.apache.spark.sql.catalyst.optimizer.EliminateSorts                                             3201193 / 455028307                             28 / 10300                                     
org.apache.spark.sql.catalyst.analysis.ResolveHints$ResolveJoinStrategyHints                       0 / 441396476                                   0 / 23527                                      
org.apache.spark.sql.catalyst.analysis.CleanupAliases                                              178971223 / 435065453                           6952 / 30312                                   
org.apache.spark.sql.execution.python.ExtractGroupingPythonUDFFromAggregate                        0 / 432489267                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.ResolveCatalogs                                             47574055 / 429896597                            252 / 55962                                    
org.apache.spark.sql.execution.python.ExtractPythonUDFFromAggregate                                0 / 426177118                                   0 / 10300                                      
org.apache.spark.sql.catalyst.optimizer.ExtractPythonUDFFromJoinCondition                          0 / 417643537                                   0 / 10300                                      
org.apache.spark.sql.catalyst.optimizer.PushPredicateThroughNonJoin                                0 / 415907078                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveTables                                      0 / 414919855                                   0 / 55962                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveUpCast                                      0 / 413759541                                   0 / 55907                                      
org.apache.spark.sql.catalyst.optimizer.ObjectSerializerPruning                                    0 / 389920267                                   0 / 10300                                      
org.apache.spark.sql.catalyst.optimizer.CombineTypedFilters                                        0 / 381098383                                   0 / 10300                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$PullOutNondeterministic                            7282280 / 332530087                             2 / 23360                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveOrdinalInOrderByAndGroupBy                  7487943 / 313555505                             143 / 55890                                    
org.apache.spark.sql.execution.analysis.DetectAmbiguousSelfJoin                                    0 / 295154208                                   0 / 23361                                      
org.apache.spark.sql.catalyst.analysis.TypeCoercion$WidenSetOperationTypes                         42383162 / 291268723                            106 / 55828                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$HandleNullInputsForUDF                             0 / 283090921                                   0 / 23360                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNaturalAndUsingJoin                         75295100 / 277102454                            312 / 55852                                    
org.apache.spark.sql.catalyst.analysis.ResolveTableValuedFunctions                                 38590922 / 267615239                            133 / 55962                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolvePivot                                       39462506 / 249915708                            21 / 55897                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAggAliasInGroupBy                           4058264 / 241455413                             15 / 55890                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveSubqueryColumnAliases                       27618593 / 236975804                            600 / 55855                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveGenerate                                    1181366 / 234609826                             3 / 55890                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveEncodersInUDF                               0 / 232095843                                   0 / 23360                                      
org.apache.spark.sql.execution.datasources.PreprocessTableInsertion                                148615387 / 229636562                           867 / 23360                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveNamespace                                   5272537 / 222228900                             25 / 55962                                     
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveOutputRelation                              0 / 205942823                                   0 / 55852                                      
org.apache.spark.sql.execution.datasources.ResolveSQLOnFile                                        0 / 205876271                                   0 / 55819                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveInsertInto                                  0 / 200905259                                   0 / 55962                                      
org.apache.spark.sql.execution.datasources.FallBackFileSourceV2                                    0 / 180129046                                   0 / 55819                                      
org.apache.spark.sql.execution.datasources.PreprocessTableCreation                                 453167 / 170180835                              1 / 23361                                      
org.apache.spark.sql.catalyst.analysis.ResolveHints$ResolveCoalesceHints                           201448 / 160484064                              1 / 23527                                      
org.apache.spark.sql.execution.datasources.DataSourceAnalysis                                      59094213 / 145630447                            1067 / 23360                                   
org.apache.spark.sql.catalyst.analysis.SubstituteUnresolvedOrdinals                                16441761 / 132867557                            152 / 23832                                    
org.apache.spark.sql.catalyst.analysis.Analyzer$WindowsSubstitution                                13763945 / 118124329                            78 / 23832                                     
org.apache.spark.sql.catalyst.analysis.UpdateOuterReferences                                       3990016 / 112892820                             9 / 23360                                      
org.apache.spark.sql.catalyst.analysis.EliminateUnions                                             0 / 102640279                                   0 / 23832                                      
org.apache.spark.sql.catalyst.optimizer.CombineConcats                                             1506334 / 98994844                              7 / 28014                                      
org.apache.spark.sql.catalyst.optimizer.EliminateDistinct                                          0 / 94448597                                    0 / 10469                                      
org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveAlterTableChanges                           0 / 92650229                                    0 / 23360                                      
org.apache.spark.sql.catalyst.analysis.ResolveHints$RemoveAllHints                                 0 / 78561576                                    0 / 23360                                      
org.apache.spark.sql.catalyst.optimizer.CheckCartesianProducts                                     0 / 74540282                                    0 / 20600                                      
org.apache.spark.sql.catalyst.optimizer.CostBasedJoinReorder                                       0 / 47836002                                    0 / 10300                                      
org.apache.spark.sql.execution.OptimizeMetadataOnlyQuery                                           0 / 40062049                                    0 / 10300                                      
     
22:36:51.046 WARN org.apache.spark.sql.hive.thriftserver.ThriftServerQueryTestSuite: 
=== Metrics of Whole-stage Codegen ===
Total code generation time: 35.577851367 seconds
Total compile time: 49.059339865 seconds
         
22:36:51.229 WARN org.apache.spark.sql.hive.thriftserver.ThriftServerQueryTestSuite: 

===== POSSIBLE THREAD LEAK IN SUITE o.a.s.sql.hive.thriftserver.ThriftServerQueryTestSuite, thread names: rpc-boss-3-1, derby.rawStoreDaemon, com.google.common.base.internal.Finalizer, Timer-3, BoneCP-keep-alive-scheduler, shuffle-boss-6-1, BoneCP-pool-watch-thread =====

[info] - ansi/interval.sql (33 seconds, 590 milliseconds)
[info] - ansi/literals.sql !!! IGNORED !!!
[info] - Check if ThriftServer can work (117 milliseconds)
[info] ScalaTest
[info] Run completed in 49 minutes, 59 seconds.
[info] Total number of tests run: 447
[info] Suites: completed 16, aborted 3
[info] Tests: succeeded 447, failed 0, canceled 0, ignored 17, pending 0
[info] *** 3 SUITES ABORTED ***
[error] Error: Total 450, Failed 0, Errors 3, Passed 447, Ignored 17
[error] Error during tests:
[error] 	org.apache.spark.sql.hive.thriftserver.HiveSessionImplSuite
[error] 	org.apache.spark.sql.hive.thriftserver.ThriftServerWithSparkContextInHttpSuite
[error] 	org.apache.spark.sql.hive.thriftserver.ThriftServerWithSparkContextInBinarySuite
[error] (hive-thriftserver/test:test) sbt.TestsFailedException: Tests unsuccessful
[error] Total time: 3014 s, completed Jun 28, 2020 10:36:53 PM
[error] running /home/jenkins/workspace/NewSparkPullRequestBuilder/build/sbt -Phadoop-3.2 -Phive-2.3 -Phive -Phive-thriftserver -Dtest.exclude.tags=org.apache.spark.tags.ExtendedHiveTest,org.apache.spark.tags.ExtendedYarnTest hive/test repl/test catalyst/test hive-thriftserver/test examples/test mllib/test avro/test sql/test sql-kafka-0-10/test ; received return code 1
Attempting to post to Github...
 > Post successful.
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Finished: FAILURE