FailedConsole Output

Skipping 1,897 KB.. Full Log
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: stages/stage/json->[{GzipHandler@437e951d{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@62417a16{/stages/stage/json,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: executors/threadDump/json->[{GzipHandler@2fb68ec6{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@43ed0ff3{/executors/threadDump/json,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: environment/json->[{GzipHandler@79c4715d{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@4816c290{/environment/json,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: jobs/job/json->[{GzipHandler@6a1ebcff{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@66971f6b{/jobs/job/json,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: jobs->[{GzipHandler@3fabf088{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@29cfd92b{/jobs,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: stages/json->[{GzipHandler@28fa700e{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@2c715e84{/stages/json,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: stages/stage->[{GzipHandler@3f3c966c{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@72b16078{/stages/stage,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: storage/json->[{GzipHandler@1bb9aa43{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@54107f42{/storage/json,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: stages/stage/kill->[{GzipHandler@54f5f647{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@9573b3b{/stages/stage/kill,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: jobs/job->[{GzipHandler@26f143ed{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@5471388b{/jobs/job,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: environment->[{GzipHandler@5fd9b663{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@5e77f0f4{/environment,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: stages->[{GzipHandler@3ef41c66{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@c9413d8{/stages,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: executors->[{GzipHandler@1556f2dd{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@4e4efc1b{/executors,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: jobs/job/kill->[{GzipHandler@5bbbdd4b{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@724bade8{/jobs/job/kill,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: metrics/json->[{GzipHandler@e042c99{STOPPED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@35f639fa{/metrics/json,null,STOPPED,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.ContextHandlerCollection: executors/threadDump->[{GzipHandler@71652c98{STARTED,min=32,inflate=-1},[o.s.j.s.ServletContextHandler@61d9efe0{/executors/threadDump,null,AVAILABLE,@Spark}]}]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.ContainerLifeCycle: ContextHandlerCollection@4263b080{STARTED} added {GzipHandler@e042c99{STOPPED,min=32,inflate=-1},UNMANAGED}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: starting o.s.j.s.ServletContextHandler@35f639fa{/metrics/json,null,STOPPED,@Spark}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.AbstractHandler: starting o.s.j.s.ServletContextHandler@35f639fa{/metrics/json,null,STARTING,@Spark}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: starting ServletHandler@5aaaa446{STOPPED}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.servlet.ServletHandler: Path=/[EMBEDDED:null] mapped to servlet=org.apache.spark.ui.JettyUtils$$anon$1-6c6333cd[EMBEDDED:null]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.http.pathmap.PathMappings: Added MappedResource[pathSpec=ServletPathSpec@4e{/},resource=org.apache.spark.ui.JettyUtils$$anon$1-6c6333cd==org.apache.spark.ui.JettyUtils$$anon$1@55f007d9{jsp=null,order=-1,inst=false,async=true,src=EMBEDDED:null}] to PathMappings[size=1]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.servlet.ServletHandler: filterNameMap={org.apache.spark.ui.HttpSecurityFilter-1a2909ae=org.apache.spark.ui.HttpSecurityFilter-1a2909ae==org.apache.spark.ui.HttpSecurityFilter@1a2909ae{inst=false,async=true,src=EMBEDDED:null}}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.servlet.ServletHandler: pathFilters=[[/*]/[]/[REQUEST, INCLUDE, ASYNC, ERROR, FORWARD]=>org.apache.spark.ui.HttpSecurityFilter-1a2909ae]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.servlet.ServletHandler: servletFilterMap={}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.servlet.ServletHandler: servletPathMap=PathMappings[size=1]
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.servlet.ServletHandler: servletNameMap={org.apache.spark.ui.JettyUtils$$anon$1-6c6333cd=org.apache.spark.ui.JettyUtils$$anon$1-6c6333cd==org.apache.spark.ui.JettyUtils$$anon$1@55f007d9{jsp=null,order=-1,inst=false,async=true,src=EMBEDDED:null}}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.AbstractHandler: starting ServletHandler@5aaaa446{STARTING}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: STARTED @6351ms ServletHandler@5aaaa446{STARTED}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: starting org.apache.spark.ui.HttpSecurityFilter-1a2909ae==org.apache.spark.ui.HttpSecurityFilter@1a2909ae{inst=false,async=true,src=EMBEDDED:null}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: STARTED @6352ms org.apache.spark.ui.HttpSecurityFilter-1a2909ae==org.apache.spark.ui.HttpSecurityFilter@1a2909ae{inst=false,async=true,src=EMBEDDED:null}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.servlet.FilterHolder: Filter.init org.apache.spark.ui.HttpSecurityFilter@42b6d0cc
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: starting org.apache.spark.ui.JettyUtils$$anon$1-6c6333cd==org.apache.spark.ui.JettyUtils$$anon$1@55f007d9{jsp=null,order=-1,inst=false,async=true,src=EMBEDDED:null}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: STARTED @6352ms org.apache.spark.ui.JettyUtils$$anon$1-6c6333cd==org.apache.spark.ui.JettyUtils$$anon$1@55f007d9{jsp=null,order=-1,inst=false,async=true,src=EMBEDDED:null}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.servlet.ServletHolder: Servlet.init null for org.apache.spark.ui.JettyUtils$$anon$1-6c6333cd
  21/02/06 12:39:24 INFO org.sparkproject.jetty.server.handler.ContextHandler: Started o.s.j.s.ServletContextHandler@35f639fa{/metrics/json,null,AVAILABLE,@Spark}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: STARTED @6352ms o.s.j.s.ServletContextHandler@35f639fa{/metrics/json,null,AVAILABLE,@Spark}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: starting GzipHandler@e042c99{STOPPED,min=32,inflate=-1}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.ContainerLifeCycle: GzipHandler@e042c99{STARTING,min=32,inflate=-1} added {DeflaterPool@1000d54d{STOPPED,size=0,capacity=UNLIMITED},AUTO}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.server.handler.AbstractHandler: starting GzipHandler@e042c99{STARTING,min=32,inflate=-1}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: starting DeflaterPool@1000d54d{STOPPED,size=0,capacity=UNLIMITED}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: STARTED @6353ms DeflaterPool@1000d54d{STARTED,size=0,capacity=UNLIMITED}
  21/02/06 12:39:24 DEBUG org.sparkproject.jetty.util.component.AbstractLifeCycle: STARTED @6353ms GzipHandler@e042c99{STARTED,min=32,inflate=-1}
  21/02/06 12:39:24 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: Requested executor with id 1 from Kubernetes.
  21/02/06 12:39:24 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsWatchSnapshotSource: Received executor pod update for pod named spark-pi-c037ee77775a98e5-exec-1, action ADDED
  21/02/06 12:39:24 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsWatchSnapshotSource: Received executor pod update for pod named spark-pi-c037ee77775a98e5-exec-1, action MODIFIED
  21/02/06 12:39:25 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:25 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: Still waiting for 1 executors for ResourceProfile Id 0 before requesting more.
  21/02/06 12:39:26 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:27 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:28 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:29 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:30 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:31 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:32 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:33 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:34 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:35 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:36 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:37 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:38 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:39 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:40 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:41 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:42 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:43 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:44 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:45 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:46 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:47 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:48 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:49 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:50 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:51 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:52 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:53 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:53 INFO org.apache.spark.scheduler.cluster.k8s.KubernetesClusterSchedulerBackend: SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000000000(ns)
  21/02/06 12:39:53 DEBUG org.apache.spark.SparkContext: Adding shutdown hook
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsPollingSnapshotSource: Resynchronizing full executor pod state from Kubernetes.
  21/02/06 12:39:54 DEBUG org.apache.spark.util.ClosureCleaner: Cleaning indylambda closure: $anonfun$main$1
  21/02/06 12:39:54 DEBUG org.apache.spark.util.ClosureCleaner:  +++ indylambda closure ($anonfun$main$1) is now cleaned +++
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: Still waiting for 1 executors for ResourceProfile Id 0 before requesting more.
  21/02/06 12:39:54 DEBUG org.apache.spark.util.ClosureCleaner: Cleaning indylambda closure: $anonfun$main$2
  21/02/06 12:39:54 DEBUG org.apache.spark.util.ClosureCleaner:  +++ indylambda closure ($anonfun$main$2) is now cleaned +++
  21/02/06 12:39:54 DEBUG org.apache.spark.util.ClosureCleaner: Cleaning indylambda closure: $anonfun$runJob$6
  21/02/06 12:39:54 DEBUG org.apache.spark.util.ClosureCleaner:  +++ indylambda closure ($anonfun$runJob$6) is now cleaned +++
  21/02/06 12:39:54 INFO org.apache.spark.SparkContext: Starting job: reduce at SparkPi.scala:38
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.DAGScheduler: Merging stage rdd profiles: Set()
  21/02/06 12:39:54 INFO org.apache.spark.scheduler.DAGScheduler: Got job 0 (reduce at SparkPi.scala:38) with 2 output partitions
  21/02/06 12:39:54 INFO org.apache.spark.scheduler.DAGScheduler: Final stage: ResultStage 0 (reduce at SparkPi.scala:38)
  21/02/06 12:39:54 INFO org.apache.spark.scheduler.DAGScheduler: Parents of final stage: List()
  21/02/06 12:39:54 INFO org.apache.spark.scheduler.DAGScheduler: Missing parents: List()
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.DAGScheduler: submitStage(ResultStage 0 (name=reduce at SparkPi.scala:38;jobs=0))
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.DAGScheduler: missing: List()
  21/02/06 12:39:54 INFO org.apache.spark.scheduler.DAGScheduler: Submitting ResultStage 0 (MapPartitionsRDD[1] at map at SparkPi.scala:34), which has no missing parents
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.DAGScheduler: submitMissingTasks(ResultStage 0)
  21/02/06 12:39:54 INFO org.apache.spark.storage.memory.MemoryStore: Block broadcast_0 stored as values in memory (estimated size 3.1 KiB, free 593.9 MiB)
  21/02/06 12:39:54 DEBUG org.apache.spark.storage.BlockManager: Put block broadcast_0 locally took 43 ms
  21/02/06 12:39:54 DEBUG org.apache.spark.storage.BlockManager: Putting block broadcast_0 without replication took 46 ms
  21/02/06 12:39:54 INFO org.apache.spark.storage.memory.MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 1816.0 B, free 593.9 MiB)
  21/02/06 12:39:54 DEBUG org.apache.spark.storage.BlockManagerMasterEndpoint: Updating block info on master broadcast_0_piece0 for BlockManagerId(driver, spark-test-app-90bf9e77775a7f95-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:39:54 INFO org.apache.spark.storage.BlockManagerInfo: Added broadcast_0_piece0 in memory on spark-test-app-90bf9e77775a7f95-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 (size: 1816.0 B, free: 593.9 MiB)
  21/02/06 12:39:54 DEBUG org.apache.spark.storage.BlockManagerMaster: Updated info of block broadcast_0_piece0
  21/02/06 12:39:54 DEBUG org.apache.spark.storage.BlockManager: Told master about block broadcast_0_piece0
  21/02/06 12:39:54 DEBUG org.apache.spark.storage.BlockManager: Put block broadcast_0_piece0 locally took 10 ms
  21/02/06 12:39:54 DEBUG org.apache.spark.storage.BlockManager: Putting block broadcast_0_piece0 without replication took 10 ms
  21/02/06 12:39:54 INFO org.apache.spark.SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:1387
  21/02/06 12:39:54 INFO org.apache.spark.scheduler.DAGScheduler: Submitting 2 missing tasks from ResultStage 0 (MapPartitionsRDD[1] at map at SparkPi.scala:34) (first 15 tasks are for partitions Vector(0, 1))
  21/02/06 12:39:54 INFO org.apache.spark.scheduler.TaskSchedulerImpl: Adding task set 0.0 with 2 tasks resource profile 0
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.TaskSetManager: Epoch for TaskSet 0.0: 0
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.TaskSetManager: Adding pending tasks took 3 ms
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.TaskSetManager: Valid locality levels for TaskSet 0.0: NO_PREF, ANY
  21/02/06 12:39:54 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:39:55 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:55 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:39:56 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:56 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:39:57 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:57 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:39:58 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:58 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:39:59 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:39:59 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:00 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:00 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:01 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:01 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:02 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:02 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:03 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:03 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:04 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:04 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:05 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:05 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:06 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:06 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:07 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:07 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:08 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:08 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:09 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:09 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:09 WARN org.apache.spark.scheduler.TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:40:10 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:10 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:11 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:11 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:12 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:12 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:13 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:13 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:14 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:14 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:15 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:15 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:16 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:16 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:17 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:17 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:18 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:18 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:19 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:19 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:20 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:20 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:21 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:21 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:22 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:22 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:23 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:23 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:24 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsPollingSnapshotSource: Resynchronizing full executor pod state from Kubernetes.
  21/02/06 12:40:24 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:24 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: Still waiting for 1 executors for ResourceProfile Id 0 before requesting more.
  21/02/06 12:40:24 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:24 WARN org.apache.spark.scheduler.TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:40:25 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:25 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:26 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:26 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:27 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:27 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:28 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:28 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:29 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:29 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:30 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:30 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:31 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:31 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:32 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:32 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:33 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:33 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:34 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:34 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:35 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:35 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:36 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:36 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:37 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:37 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:38 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:38 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:39 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:39 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:39 WARN org.apache.spark.scheduler.TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:40:40 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:40 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:41 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:41 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:42 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:42 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:43 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsWatchSnapshotSource: Received executor pod update for pod named spark-pi-c037ee77775a98e5-exec-1, action MODIFIED
  21/02/06 12:40:43 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:43 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: Still waiting for 1 executors for ResourceProfile Id 0 before requesting more.
  21/02/06 12:40:43 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:44 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:44 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:45 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:45 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:46 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:46 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:47 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:47 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:48 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:48 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:49 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:49 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:50 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:50 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:51 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:51 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:52 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:52 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:53 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:53 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:54 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsPollingSnapshotSource: Resynchronizing full executor pod state from Kubernetes.
  21/02/06 12:40:54 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:54 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: Still waiting for 1 executors for ResourceProfile Id 0 before requesting more.
  21/02/06 12:40:54 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:54 WARN org.apache.spark.scheduler.TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:40:55 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:55 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:56 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:56 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:57 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:57 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:58 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:58 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:40:59 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:40:59 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:00 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:00 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:01 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:01 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:02 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:02 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:03 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:03 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:04 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:04 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:05 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:05 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:06 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:06 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:07 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:07 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:08 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:08 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:09 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:09 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:09 WARN org.apache.spark.scheduler.TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:41:10 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:10 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:11 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:11 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:12 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:12 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:13 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:13 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:14 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:14 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:15 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:15 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:16 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:16 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:17 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:17 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:18 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:18 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:19 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:19 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:20 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:20 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:21 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:21 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:22 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:22 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:23 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:23 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:24 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsPollingSnapshotSource: Resynchronizing full executor pod state from Kubernetes.
  21/02/06 12:41:24 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:24 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: Still waiting for 1 executors for ResourceProfile Id 0 before requesting more.
  21/02/06 12:41:24 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:24 WARN org.apache.spark.scheduler.TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:41:25 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:25 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:26 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:26 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:27 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:27 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:28 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:28 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:29 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:29 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:30 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:30 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:31 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:31 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:32 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:32 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:33 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:33 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:34 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:34 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:35 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:35 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:36 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:36 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:37 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:37 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:38 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:38 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:39 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:39 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:39 WARN org.apache.spark.scheduler.TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:41:40 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:40 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:41 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:41 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:42 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:42 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:43 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsWatchSnapshotSource: Received executor pod update for pod named spark-pi-c037ee77775a98e5-exec-1, action MODIFIED
  21/02/06 12:41:43 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:43 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: Still waiting for 1 executors for ResourceProfile Id 0 before requesting more.
  21/02/06 12:41:43 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:44 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:44 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:45 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:45 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:46 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:46 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:47 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:47 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:48 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:48 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:49 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:49 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:50 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:50 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:51 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:51 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:52 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:52 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:53 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:53 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:54 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsPollingSnapshotSource: Resynchronizing full executor pod state from Kubernetes.
  21/02/06 12:41:54 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:54 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: Still waiting for 1 executors for ResourceProfile Id 0 before requesting more.
  21/02/06 12:41:54 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:54 WARN org.apache.spark.scheduler.TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:41:55 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:55 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:56 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:56 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:57 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:57 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:58 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:58 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:41:59 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:41:59 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:00 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:00 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:01 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:01 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:02 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:02 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:03 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:03 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:04 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:04 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:05 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:05 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:06 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:06 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:07 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:07 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:08 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:08 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:09 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:09 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:09 WARN org.apache.spark.scheduler.TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:42:10 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:10 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:11 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:11 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:12 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:12 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:13 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:13 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:14 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:14 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:15 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:15 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:16 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:16 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:17 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:17 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:18 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:18 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:19 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:19 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:20 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:20 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:21 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:21 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:22 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:22 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:23 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsAllocator: ResourceProfile Id: 0 pod allocation status: 0 running, 1 pending. 0 unacknowledged.
  21/02/06 12:42:23 DEBUG org.apache.spark.scheduler.TaskSchedulerImpl: parentName: , name: TaskSet_0.0, runningTasks: 0
  21/02/06 12:42:24 DEBUG org.apache.spark.scheduler.cluster.k8s.ExecutorPodsPollingSnapshotSource: Resynchronizing full executor pod state from Kubernetes.
  " did not contain "Pi is roughly 3" The application did not complete, driver log did not contain str Pi is roughly 3. (KubernetesSuite.scala:405)
- Run SparkPi with env and mount secrets. *** FAILED ***
  The code passed to eventually never returned normally. Attempted 188 times over 3.003993833166667 minutes. Last failure message: pod spark-pi-50efbe77775de296-exec-1 does not have a host assigned. (SecretsTestsSuite.scala:96)
- Run PySpark on simple pi.py example *** FAILED ***
  The code passed to eventually never returned normally. Attempted 186 times over 3.0121293985833333 minutes. Last failure message: "++ id -u
  + myuid=185
  ++ id -g
  + mygid=0
  + set +e
  ++ getent passwd 185
  + uidentry=
  + set -e
  + '[' -z '' ']'
  + '[' -w /etc/passwd ']'
  + echo '185:x:185:0:anonymous uid:/opt/spark:/bin/false'
  + SPARK_CLASSPATH=':/opt/spark/jars/*'
  + env
  + grep SPARK_JAVA_OPT_
  + sort -t_ -k4 -n
  + sed 's/[^=]*=\(.*\)/\1/g'
  + readarray -t SPARK_EXECUTOR_JAVA_OPTS
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z ']'
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z x ']'
  + SPARK_CLASSPATH='/opt/spark/conf::/opt/spark/jars/*'
  + case "$1" in
  + shift 1
  + CMD=("$SPARK_HOME/bin/spark-submit" --conf "spark.driver.bindAddress=$SPARK_DRIVER_BIND_ADDRESS" --deploy-mode client "$@")
  + exec /usr/bin/tini -s -- /opt/spark/bin/spark-submit --conf spark.driver.bindAddress=172.17.0.8 --deploy-mode client --properties-file /opt/spark/conf/spark.properties --class org.apache.spark.deploy.PythonRunner local:///opt/spark/examples/src/main/python/pi.py 5
  21/02/06 12:46:31 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
  Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
  21/02/06 12:46:31 INFO SparkContext: Running Spark version 3.2.0-SNAPSHOT
  21/02/06 12:46:31 INFO ResourceUtils: ==============================================================
  21/02/06 12:46:31 INFO ResourceUtils: No custom resources configured for spark.driver.
  21/02/06 12:46:31 INFO ResourceUtils: ==============================================================
  21/02/06 12:46:31 INFO SparkContext: Submitted application: PythonPi
  21/02/06 12:46:32 INFO ResourceProfile: Default ResourceProfile created, executor resources: Map(cores -> name: cores, amount: 1, script: , vendor: , memory -> name: memory, amount: 1024, script: , vendor: , offHeap -> name: offHeap, amount: 0, script: , vendor: ), task resources: Map(cpus -> name: cpus, amount: 1.0)
  21/02/06 12:46:32 INFO ResourceProfile: Limiting resource is cpus at 1 tasks per executor
  21/02/06 12:46:32 INFO ResourceProfileManager: Added ResourceProfile id: 0
  21/02/06 12:46:32 INFO SecurityManager: Changing view acls to: 185,jenkins
  21/02/06 12:46:32 INFO SecurityManager: Changing modify acls to: 185,jenkins
  21/02/06 12:46:32 INFO SecurityManager: Changing view acls groups to: 
  21/02/06 12:46:32 INFO SecurityManager: Changing modify acls groups to: 
  21/02/06 12:46:32 INFO SecurityManager: SecurityManager: authentication enabled; ui acls disabled; users  with view permissions: Set(185, jenkins); groups with view permissions: Set(); users  with modify permissions: Set(185, jenkins); groups with modify permissions: Set()
  21/02/06 12:46:32 INFO Utils: Successfully started service 'sparkDriver' on port 7078.
  21/02/06 12:46:32 INFO SparkEnv: Registering MapOutputTracker
  21/02/06 12:46:32 INFO SparkEnv: Registering BlockManagerMaster
  21/02/06 12:46:32 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
  21/02/06 12:46:32 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
  21/02/06 12:46:32 INFO SparkEnv: Registering BlockManagerMasterHeartbeat
  21/02/06 12:46:32 INFO DiskBlockManager: Created local directory at /var/data/spark-fb0ce654-f3dd-45e6-b887-a7a92d3f0f4b/blockmgr-79b77251-25f2-48ce-a292-079dd85af665
  21/02/06 12:46:32 INFO MemoryStore: MemoryStore started with capacity 593.9 MiB
  21/02/06 12:46:32 INFO SparkEnv: Registering OutputCommitCoordinator
  21/02/06 12:46:33 INFO Utils: Successfully started service 'SparkUI' on port 4040.
  21/02/06 12:46:33 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://spark-test-app-b0ffe5777761126f-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:4040
  21/02/06 12:46:33 INFO SparkKubernetesClientFactory: Auto-configuring K8S client using current context from users K8S config file
  21/02/06 12:46:35 INFO ExecutorPodsAllocator: Going to request 1 executors from Kubernetes for ResourceProfile Id: 0, target: 1 running: 0.
  21/02/06 12:46:35 INFO BasicExecutorFeatureStep: Decommissioning not enabled, skipping shutdown script
  21/02/06 12:46:35 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 7079.
  21/02/06 12:46:35 INFO NettyBlockTransferService: Server created on spark-test-app-b0ffe5777761126f-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079
  21/02/06 12:46:35 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
  21/02/06 12:46:35 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, spark-test-app-b0ffe5777761126f-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:46:35 INFO BlockManagerMasterEndpoint: Registering block manager spark-test-app-b0ffe5777761126f-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 with 593.9 MiB RAM, BlockManagerId(driver, spark-test-app-b0ffe5777761126f-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:46:35 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, spark-test-app-b0ffe5777761126f-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:46:35 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, spark-test-app-b0ffe5777761126f-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:47:05 INFO KubernetesClusterSchedulerBackend: SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000000000(ns)
  21/02/06 12:47:05 INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/opt/spark/work-dir/spark-warehouse').
  21/02/06 12:47:05 INFO SharedState: Warehouse path is 'file:/opt/spark/work-dir/spark-warehouse'.
  21/02/06 12:47:07 INFO SparkContext: Starting job: reduce at /opt/spark/examples/src/main/python/pi.py:42
  21/02/06 12:47:07 INFO DAGScheduler: Got job 0 (reduce at /opt/spark/examples/src/main/python/pi.py:42) with 5 output partitions
  21/02/06 12:47:07 INFO DAGScheduler: Final stage: ResultStage 0 (reduce at /opt/spark/examples/src/main/python/pi.py:42)
  21/02/06 12:47:07 INFO DAGScheduler: Parents of final stage: List()
  21/02/06 12:47:07 INFO DAGScheduler: Missing parents: List()
  21/02/06 12:47:07 INFO DAGScheduler: Submitting ResultStage 0 (PythonRDD[1] at reduce at /opt/spark/examples/src/main/python/pi.py:42), which has no missing parents
  21/02/06 12:47:08 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 10.4 KiB, free 593.9 MiB)
  21/02/06 12:47:08 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 7.9 KiB, free 593.9 MiB)
  21/02/06 12:47:08 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on spark-test-app-b0ffe5777761126f-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 (size: 7.9 KiB, free: 593.9 MiB)
  21/02/06 12:47:08 INFO SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:1387
  21/02/06 12:47:08 INFO DAGScheduler: Submitting 5 missing tasks from ResultStage 0 (PythonRDD[1] at reduce at /opt/spark/examples/src/main/python/pi.py:42) (first 15 tasks are for partitions Vector(0, 1, 2, 3, 4))
  21/02/06 12:47:08 INFO TaskSchedulerImpl: Adding task set 0.0 with 5 tasks resource profile 0
  21/02/06 12:47:23 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:47:38 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:47:53 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:48:08 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:48:23 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:48:38 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:48:53 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:49:08 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:49:23 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  " did not contain "Pi is roughly 3" The application did not complete, driver log did not contain str Pi is roughly 3. (KubernetesSuite.scala:405)
- Run PySpark to test a pyfiles example *** FAILED ***
  The code passed to eventually never returned normally. Attempted 181 times over 3.007478728716667 minutes. Last failure message: "++ id -u
  + myuid=185
  ++ id -g
  + mygid=0
  + set +e
  ++ getent passwd 185
  + uidentry=
  + set -e
  + '[' -z '' ']'
  + '[' -w /etc/passwd ']'
  + echo '185:x:185:0:anonymous uid:/opt/spark:/bin/false'
  + SPARK_CLASSPATH=':/opt/spark/jars/*'
  + env
  + grep SPARK_JAVA_OPT_
  + sort -t_ -k4 -n
  + sed 's/[^=]*=\(.*\)/\1/g'
  + readarray -t SPARK_EXECUTOR_JAVA_OPTS
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z ']'
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z x ']'
  + SPARK_CLASSPATH='/opt/spark/conf::/opt/spark/jars/*'
  + case "$1" in
  + shift 1
  + CMD=("$SPARK_HOME/bin/spark-submit" --conf "spark.driver.bindAddress=$SPARK_DRIVER_BIND_ADDRESS" --deploy-mode client "$@")
  + exec /usr/bin/tini -s -- /opt/spark/bin/spark-submit --conf spark.driver.bindAddress=172.17.0.8 --deploy-mode client --properties-file /opt/spark/conf/spark.properties --class org.apache.spark.deploy.PythonRunner local:///opt/spark/tests/pyfiles.py python3
  21/02/06 12:50:07 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
  Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
  21/02/06 12:50:08 INFO SparkContext: Running Spark version 3.2.0-SNAPSHOT
  21/02/06 12:50:08 INFO ResourceUtils: ==============================================================
  21/02/06 12:50:08 INFO ResourceUtils: No custom resources configured for spark.driver.
  21/02/06 12:50:08 INFO ResourceUtils: ==============================================================
  21/02/06 12:50:08 INFO SparkContext: Submitted application: PyFilesTest
  21/02/06 12:50:08 INFO ResourceProfile: Default ResourceProfile created, executor resources: Map(cores -> name: cores, amount: 1, script: , vendor: , memory -> name: memory, amount: 1024, script: , vendor: , offHeap -> name: offHeap, amount: 0, script: , vendor: ), task resources: Map(cpus -> name: cpus, amount: 1.0)
  21/02/06 12:50:08 INFO ResourceProfile: Limiting resource is cpus at 1 tasks per executor
  21/02/06 12:50:08 INFO ResourceProfileManager: Added ResourceProfile id: 0
  21/02/06 12:50:08 INFO SecurityManager: Changing view acls to: 185,jenkins
  21/02/06 12:50:08 INFO SecurityManager: Changing modify acls to: 185,jenkins
  21/02/06 12:50:08 INFO SecurityManager: Changing view acls groups to: 
  21/02/06 12:50:08 INFO SecurityManager: Changing modify acls groups to: 
  21/02/06 12:50:08 INFO SecurityManager: SecurityManager: authentication enabled; ui acls disabled; users  with view permissions: Set(185, jenkins); groups with view permissions: Set(); users  with modify permissions: Set(185, jenkins); groups with modify permissions: Set()
  21/02/06 12:50:08 INFO Utils: Successfully started service 'sparkDriver' on port 7078.
  21/02/06 12:50:09 INFO SparkEnv: Registering MapOutputTracker
  21/02/06 12:50:09 INFO SparkEnv: Registering BlockManagerMaster
  21/02/06 12:50:09 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
  21/02/06 12:50:09 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
  21/02/06 12:50:09 INFO SparkEnv: Registering BlockManagerMasterHeartbeat
  21/02/06 12:50:09 INFO DiskBlockManager: Created local directory at /var/data/spark-36c2f421-f467-435f-90f3-e9bcd11a6151/blockmgr-137734c5-9abd-4bec-b1b2-a62761d8b1aa
  21/02/06 12:50:09 INFO MemoryStore: MemoryStore started with capacity 593.9 MiB
  21/02/06 12:50:09 INFO SparkEnv: Registering OutputCommitCoordinator
  21/02/06 12:50:09 INFO Utils: Successfully started service 'SparkUI' on port 4040.
  21/02/06 12:50:09 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://spark-test-app-a3ffd077776460e8-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:4040
  21/02/06 12:50:09 WARN SparkContext: File with 'local' scheme local:///opt/spark/tests/py_container_checks.py is not supported to add to file server, since it is already available on every node.
  21/02/06 12:50:09 INFO SparkKubernetesClientFactory: Auto-configuring K8S client using current context from users K8S config file
  21/02/06 12:50:11 INFO ExecutorPodsAllocator: Going to request 1 executors from Kubernetes for ResourceProfile Id: 0, target: 1 running: 0.
  21/02/06 12:50:12 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 7079.
  21/02/06 12:50:12 INFO NettyBlockTransferService: Server created on spark-test-app-a3ffd077776460e8-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079
  21/02/06 12:50:12 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
  21/02/06 12:50:12 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, spark-test-app-a3ffd077776460e8-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:50:12 INFO BasicExecutorFeatureStep: Decommissioning not enabled, skipping shutdown script
  21/02/06 12:50:12 INFO BlockManagerMasterEndpoint: Registering block manager spark-test-app-a3ffd077776460e8-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 with 593.9 MiB RAM, BlockManagerId(driver, spark-test-app-a3ffd077776460e8-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:50:12 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, spark-test-app-a3ffd077776460e8-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:50:12 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, spark-test-app-a3ffd077776460e8-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:50:41 INFO KubernetesClusterSchedulerBackend: SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000000000(ns)
  21/02/06 12:50:42 INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/opt/spark/work-dir/spark-warehouse').
  21/02/06 12:50:42 INFO SharedState: Warehouse path is 'file:/opt/spark/work-dir/spark-warehouse'.
  Python runtime version check is: True
  Python environment version check is: True
  21/02/06 12:50:47 INFO CodeGenerator: Code generated in 381.21087 ms
  21/02/06 12:50:47 INFO CodeGenerator: Code generated in 24.962654 ms
  21/02/06 12:50:48 INFO SparkContext: Starting job: collect at /opt/spark/tests/pyfiles.py:39
  21/02/06 12:50:48 INFO DAGScheduler: Got job 0 (collect at /opt/spark/tests/pyfiles.py:39) with 1 output partitions
  21/02/06 12:50:48 INFO DAGScheduler: Final stage: ResultStage 0 (collect at /opt/spark/tests/pyfiles.py:39)
  21/02/06 12:50:48 INFO DAGScheduler: Parents of final stage: List()
  21/02/06 12:50:48 INFO DAGScheduler: Missing parents: List()
  21/02/06 12:50:48 INFO DAGScheduler: Submitting ResultStage 0 (MapPartitionsRDD[5] at collect at /opt/spark/tests/pyfiles.py:39), which has no missing parents
  21/02/06 12:50:48 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 12.3 KiB, free 593.9 MiB)
  21/02/06 12:50:48 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 6.1 KiB, free 593.9 MiB)
  21/02/06 12:50:48 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on spark-test-app-a3ffd077776460e8-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 (size: 6.1 KiB, free: 593.9 MiB)
  21/02/06 12:50:48 INFO SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:1387
  21/02/06 12:50:48 INFO DAGScheduler: Submitting 1 missing tasks from ResultStage 0 (MapPartitionsRDD[5] at collect at /opt/spark/tests/pyfiles.py:39) (first 15 tasks are for partitions Vector(0))
  21/02/06 12:50:48 INFO TaskSchedulerImpl: Adding task set 0.0 with 1 tasks resource profile 0
  21/02/06 12:51:03 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:51:18 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:51:33 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:51:48 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:52:03 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:52:18 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:52:33 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:52:48 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:53:03 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  " did not contain "Python runtime version check for executor is: True" The application did not complete, driver log did not contain str Python runtime version check for executor is: True. (KubernetesSuite.scala:405)
- Run PySpark with memory customization *** FAILED ***
  The code passed to eventually never returned normally. Attempted 186 times over 3.0111437903166665 minutes. Last failure message: "++ id -u
  + myuid=185
  ++ id -g
  + mygid=0
  + set +e
  ++ getent passwd 185
  + uidentry=
  + set -e
  + '[' -z '' ']'
  + '[' -w /etc/passwd ']'
  + echo '185:x:185:0:anonymous uid:/opt/spark:/bin/false'
  + SPARK_CLASSPATH=':/opt/spark/jars/*'
  + env
  + grep SPARK_JAVA_OPT_
  + sort -t_ -k4 -n
  + sed 's/[^=]*=\(.*\)/\1/g'
  + readarray -t SPARK_EXECUTOR_JAVA_OPTS
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z ']'
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z x ']'
  + SPARK_CLASSPATH='/opt/spark/conf::/opt/spark/jars/*'
  + case "$1" in
  + shift 1
  + CMD=("$SPARK_HOME/bin/spark-submit" --conf "spark.driver.bindAddress=$SPARK_DRIVER_BIND_ADDRESS" --deploy-mode client "$@")
  + exec /usr/bin/tini -s -- /opt/spark/bin/spark-submit --conf spark.driver.bindAddress=172.17.0.8 --deploy-mode client --properties-file /opt/spark/conf/spark.properties --class org.apache.spark.deploy.PythonRunner local:///opt/spark/tests/worker_memory_check.py 209715200
  21/02/06 12:53:43 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
  Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
  21/02/06 12:53:43 INFO SparkContext: Running Spark version 3.2.0-SNAPSHOT
  21/02/06 12:53:43 INFO ResourceUtils: ==============================================================
  21/02/06 12:53:43 INFO ResourceUtils: No custom resources configured for spark.driver.
  21/02/06 12:53:43 INFO ResourceUtils: ==============================================================
  21/02/06 12:53:43 INFO SparkContext: Submitted application: PyMemoryTest
  21/02/06 12:53:44 INFO ResourceProfile: Default ResourceProfile created, executor resources: Map(cores -> name: cores, amount: 1, script: , vendor: , memory -> name: memory, amount: 1024, script: , vendor: , pyspark.memory -> name: pyspark.memory, amount: 200, script: , vendor: , offHeap -> name: offHeap, amount: 0, script: , vendor: ), task resources: Map(cpus -> name: cpus, amount: 1.0)
  21/02/06 12:53:44 INFO ResourceProfile: Limiting resource is cpus at 1 tasks per executor
  21/02/06 12:53:44 INFO ResourceProfileManager: Added ResourceProfile id: 0
  21/02/06 12:53:44 INFO SecurityManager: Changing view acls to: 185,jenkins
  21/02/06 12:53:44 INFO SecurityManager: Changing modify acls to: 185,jenkins
  21/02/06 12:53:44 INFO SecurityManager: Changing view acls groups to: 
  21/02/06 12:53:44 INFO SecurityManager: Changing modify acls groups to: 
  21/02/06 12:53:44 INFO SecurityManager: SecurityManager: authentication enabled; ui acls disabled; users  with view permissions: Set(185, jenkins); groups with view permissions: Set(); users  with modify permissions: Set(185, jenkins); groups with modify permissions: Set()
  21/02/06 12:53:44 INFO Utils: Successfully started service 'sparkDriver' on port 7078.
  21/02/06 12:53:44 INFO SparkEnv: Registering MapOutputTracker
  21/02/06 12:53:44 INFO SparkEnv: Registering BlockManagerMaster
  21/02/06 12:53:44 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
  21/02/06 12:53:44 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
  21/02/06 12:53:44 INFO SparkEnv: Registering BlockManagerMasterHeartbeat
  21/02/06 12:53:44 INFO DiskBlockManager: Created local directory at /var/data/spark-6abdebb3-54e9-4602-95aa-42fd9664b597/blockmgr-f98a6f3b-9ce8-480b-9f3b-09273526dcf7
  21/02/06 12:53:44 INFO MemoryStore: MemoryStore started with capacity 546.3 MiB
  21/02/06 12:53:44 INFO SparkEnv: Registering OutputCommitCoordinator
  21/02/06 12:53:45 INFO Utils: Successfully started service 'SparkUI' on port 4040.
  21/02/06 12:53:45 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://spark-test-app-584860777767ac9b-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:4040
  21/02/06 12:53:45 WARN SparkContext: File with 'local' scheme local:///opt/spark/tests/py_container_checks.py is not supported to add to file server, since it is already available on every node.
  21/02/06 12:53:45 INFO SparkKubernetesClientFactory: Auto-configuring K8S client using current context from users K8S config file
  21/02/06 12:53:47 INFO ExecutorPodsAllocator: Going to request 1 executors from Kubernetes for ResourceProfile Id: 0, target: 1 running: 0.
  21/02/06 12:53:47 INFO BasicExecutorFeatureStep: Decommissioning not enabled, skipping shutdown script
  21/02/06 12:53:47 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 7079.
  21/02/06 12:53:47 INFO NettyBlockTransferService: Server created on spark-test-app-584860777767ac9b-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079
  21/02/06 12:53:47 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
  21/02/06 12:53:47 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, spark-test-app-584860777767ac9b-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:53:47 INFO BlockManagerMasterEndpoint: Registering block manager spark-test-app-584860777767ac9b-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 with 546.3 MiB RAM, BlockManagerId(driver, spark-test-app-584860777767ac9b-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:53:47 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, spark-test-app-584860777767ac9b-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:53:47 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, spark-test-app-584860777767ac9b-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:54:17 INFO KubernetesClusterSchedulerBackend: SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000000000(ns)
  21/02/06 12:54:17 INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/opt/spark/work-dir/spark-warehouse').
  21/02/06 12:54:17 INFO SharedState: Warehouse path is 'file:/opt/spark/work-dir/spark-warehouse'.
  21/02/06 12:54:19 INFO SparkContext: Starting job: collect at /opt/spark/tests/worker_memory_check.py:41
  21/02/06 12:54:19 INFO DAGScheduler: Got job 0 (collect at /opt/spark/tests/worker_memory_check.py:41) with 2 output partitions
  21/02/06 12:54:19 INFO DAGScheduler: Final stage: ResultStage 0 (collect at /opt/spark/tests/worker_memory_check.py:41)
  21/02/06 12:54:19 INFO DAGScheduler: Parents of final stage: List()
  21/02/06 12:54:19 INFO DAGScheduler: Missing parents: List()
  21/02/06 12:54:19 INFO DAGScheduler: Submitting ResultStage 0 (PythonRDD[1] at collect at /opt/spark/tests/worker_memory_check.py:41), which has no missing parents
  21/02/06 12:54:19 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 4.9 KiB, free 546.3 MiB)
  21/02/06 12:54:19 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 3.2 KiB, free 546.3 MiB)
  21/02/06 12:54:19 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on spark-test-app-584860777767ac9b-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 (size: 3.2 KiB, free: 546.3 MiB)
  21/02/06 12:54:19 INFO SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:1387
  21/02/06 12:54:19 INFO DAGScheduler: Submitting 2 missing tasks from ResultStage 0 (PythonRDD[1] at collect at /opt/spark/tests/worker_memory_check.py:41) (first 15 tasks are for partitions Vector(0, 1))
  21/02/06 12:54:19 INFO TaskSchedulerImpl: Adding task set 0.0 with 2 tasks resource profile 0
  21/02/06 12:54:34 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:54:49 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:55:04 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:55:19 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:55:34 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:55:49 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:56:04 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:56:19 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:56:34 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  " did not contain "PySpark Worker Memory Check is: True" The application did not complete, driver log did not contain str PySpark Worker Memory Check is: True. (KubernetesSuite.scala:405)
- Run in client mode.
- Start pod creation from template *** FAILED ***
  The code passed to eventually never returned normally. Attempted 186 times over 3.011695879216666 minutes. Last failure message: "++ id -u
  + myuid=185
  ++ id -g
  + mygid=0
  + set +e
  ++ getent passwd 185
  + uidentry=
  + set -e
  + '[' -z '' ']'
  + '[' -w /etc/passwd ']'
  + echo '185:x:185:0:anonymous uid:/opt/spark:/bin/false'
  + SPARK_CLASSPATH=':/opt/spark/jars/*'
  + env
  + grep SPARK_JAVA_OPT_
  + sort -t_ -k4 -n
  + sed 's/[^=]*=\(.*\)/\1/g'
  + readarray -t SPARK_EXECUTOR_JAVA_OPTS
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z ']'
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z x ']'
  + SPARK_CLASSPATH='/opt/spark/conf::/opt/spark/jars/*'
  + case "$1" in
  + shift 1
  + CMD=("$SPARK_HOME/bin/spark-submit" --conf "spark.driver.bindAddress=$SPARK_DRIVER_BIND_ADDRESS" --deploy-mode client "$@")
  + exec /usr/bin/tini -s -- /opt/spark/bin/spark-submit --conf spark.driver.bindAddress=172.17.0.8 --deploy-mode client --properties-file /opt/spark/conf/spark.properties --class org.apache.spark.examples.SparkPi local:///opt/spark/examples/jars/spark-examples_2.12-3.2.0-SNAPSHOT.jar
  21/02/06 12:57:43 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
  Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
  21/02/06 12:57:43 INFO SparkContext: Running Spark version 3.2.0-SNAPSHOT
  21/02/06 12:57:44 INFO ResourceUtils: ==============================================================
  21/02/06 12:57:44 INFO ResourceUtils: No custom resources configured for spark.driver.
  21/02/06 12:57:44 INFO ResourceUtils: ==============================================================
  21/02/06 12:57:44 INFO SparkContext: Submitted application: Spark Pi
  21/02/06 12:57:44 INFO ResourceProfile: Default ResourceProfile created, executor resources: Map(cores -> name: cores, amount: 1, script: , vendor: , memory -> name: memory, amount: 1024, script: , vendor: , offHeap -> name: offHeap, amount: 0, script: , vendor: ), task resources: Map(cpus -> name: cpus, amount: 1.0)
  21/02/06 12:57:44 INFO ResourceProfile: Limiting resource is cpus at 1 tasks per executor
  21/02/06 12:57:44 INFO ResourceProfileManager: Added ResourceProfile id: 0
  21/02/06 12:57:44 INFO SecurityManager: Changing view acls to: 185,jenkins
  21/02/06 12:57:44 INFO SecurityManager: Changing modify acls to: 185,jenkins
  21/02/06 12:57:44 INFO SecurityManager: Changing view acls groups to: 
  21/02/06 12:57:44 INFO SecurityManager: Changing modify acls groups to: 
  21/02/06 12:57:44 INFO SecurityManager: SecurityManager: authentication enabled; ui acls disabled; users  with view permissions: Set(185, jenkins); groups with view permissions: Set(); users  with modify permissions: Set(185, jenkins); groups with modify permissions: Set()
  21/02/06 12:57:44 INFO Utils: Successfully started service 'sparkDriver' on port 7078.
  21/02/06 12:57:45 INFO SparkEnv: Registering MapOutputTracker
  21/02/06 12:57:45 INFO SparkEnv: Registering BlockManagerMaster
  21/02/06 12:57:45 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
  21/02/06 12:57:45 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
  21/02/06 12:57:45 INFO SparkEnv: Registering BlockManagerMasterHeartbeat
  21/02/06 12:57:45 INFO DiskBlockManager: Created local directory at /var/data/spark-777bafb0-cc80-4fd9-a7c4-f363e2bcbf01/blockmgr-a610b715-157d-48b9-91c8-676f69d8dc1a
  21/02/06 12:57:45 INFO MemoryStore: MemoryStore started with capacity 593.9 MiB
  21/02/06 12:57:45 INFO SparkEnv: Registering OutputCommitCoordinator
  21/02/06 12:57:45 INFO Utils: Successfully started service 'SparkUI' on port 4040.
  21/02/06 12:57:45 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://spark-test-app-29094677776b542e-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:4040
  21/02/06 12:57:45 INFO SparkContext: Added JAR local:///opt/spark/examples/jars/spark-examples_2.12-3.2.0-SNAPSHOT.jar at file:/opt/spark/examples/jars/spark-examples_2.12-3.2.0-SNAPSHOT.jar with timestamp 1612616263900
  21/02/06 12:57:45 INFO SparkContext: The JAR local:///opt/spark/examples/jars/spark-examples_2.12-3.2.0-SNAPSHOT.jar at file:/opt/spark/examples/jars/spark-examples_2.12-3.2.0-SNAPSHOT.jar has been added already. Overwriting of added jar is not supported in the current version.
  21/02/06 12:57:45 INFO SparkKubernetesClientFactory: Auto-configuring K8S client using current context from users K8S config file
  21/02/06 12:57:48 INFO ExecutorPodsAllocator: Going to request 1 executors from Kubernetes for ResourceProfile Id: 0, target: 1 running: 0.
  21/02/06 12:57:48 INFO BasicExecutorFeatureStep: Decommissioning not enabled, skipping shutdown script
  21/02/06 12:57:48 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 7079.
  21/02/06 12:57:48 INFO NettyBlockTransferService: Server created on spark-test-app-29094677776b542e-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079
  21/02/06 12:57:48 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
  21/02/06 12:57:48 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, spark-test-app-29094677776b542e-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:57:48 INFO BlockManagerMasterEndpoint: Registering block manager spark-test-app-29094677776b542e-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 with 593.9 MiB RAM, BlockManagerId(driver, spark-test-app-29094677776b542e-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:57:48 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, spark-test-app-29094677776b542e-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:57:48 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, spark-test-app-29094677776b542e-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 12:58:17 INFO KubernetesClusterSchedulerBackend: SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000000000(ns)
  21/02/06 12:58:18 INFO SparkContext: Starting job: reduce at SparkPi.scala:38
  21/02/06 12:58:18 INFO DAGScheduler: Got job 0 (reduce at SparkPi.scala:38) with 2 output partitions
  21/02/06 12:58:18 INFO DAGScheduler: Final stage: ResultStage 0 (reduce at SparkPi.scala:38)
  21/02/06 12:58:18 INFO DAGScheduler: Parents of final stage: List()
  21/02/06 12:58:18 INFO DAGScheduler: Missing parents: List()
  21/02/06 12:58:18 INFO DAGScheduler: Submitting ResultStage 0 (MapPartitionsRDD[1] at map at SparkPi.scala:34), which has no missing parents
  21/02/06 12:58:18 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 3.1 KiB, free 593.9 MiB)
  21/02/06 12:58:19 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 1816.0 B, free 593.9 MiB)
  21/02/06 12:58:19 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on spark-test-app-29094677776b542e-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 (size: 1816.0 B, free: 593.9 MiB)
  21/02/06 12:58:19 INFO SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:1387
  21/02/06 12:58:19 INFO DAGScheduler: Submitting 2 missing tasks from ResultStage 0 (MapPartitionsRDD[1] at map at SparkPi.scala:34) (first 15 tasks are for partitions Vector(0, 1))
  21/02/06 12:58:19 INFO TaskSchedulerImpl: Adding task set 0.0 with 2 tasks resource profile 0
  21/02/06 12:58:34 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:58:49 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:59:04 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:59:19 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:59:34 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 12:59:49 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:00:04 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:00:19 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:00:34 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:00:49 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  " did not contain "Pi is roughly 3" The application did not complete, driver log did not contain str Pi is roughly 3. (KubernetesSuite.scala:405)
- PVs with local storage *** FAILED ***
  io.fabric8.kubernetes.client.KubernetesClientException: Failure executing: POST at: https://192.168.39.167:8443/api/v1/persistentvolumes. Message: object is being deleted: persistentvolumes "test-local-pv" already exists. Received status: Status(apiVersion=v1, code=409, details=StatusDetails(causes=[], group=null, kind=persistentvolumes, name=test-local-pv, retryAfterSeconds=null, uid=null, additionalProperties={}), kind=Status, message=object is being deleted: persistentvolumes "test-local-pv" already exists, metadata=ListMeta(_continue=null, remainingItemCount=null, resourceVersion=null, selfLink=null, additionalProperties={}), reason=AlreadyExists, status=Failure, additionalProperties={}).
  at io.fabric8.kubernetes.client.dsl.base.OperationSupport.requestFailure(OperationSupport.java:589)
  at io.fabric8.kubernetes.client.dsl.base.OperationSupport.assertResponseCode(OperationSupport.java:528)
  at io.fabric8.kubernetes.client.dsl.base.OperationSupport.handleResponse(OperationSupport.java:492)
  at io.fabric8.kubernetes.client.dsl.base.OperationSupport.handleResponse(OperationSupport.java:451)
  at io.fabric8.kubernetes.client.dsl.base.OperationSupport.handleCreate(OperationSupport.java:252)
  at io.fabric8.kubernetes.client.dsl.base.BaseOperation.handleCreate(BaseOperation.java:879)
  at io.fabric8.kubernetes.client.dsl.base.BaseOperation.create(BaseOperation.java:341)
  at io.fabric8.kubernetes.client.dsl.base.BaseOperation.create(BaseOperation.java:84)
  at org.apache.spark.deploy.k8s.integrationtest.PVTestsSuite.setupLocalStorage(PVTestsSuite.scala:87)
  at org.apache.spark.deploy.k8s.integrationtest.PVTestsSuite.$anonfun$$init$$1(PVTestsSuite.scala:137)
  ...
- Launcher client dependencies *** FAILED ***
  The code passed to eventually never returned normally. Attempted 190 times over 3.001118395066667 minutes. Last failure message: false was not true. (KubernetesSuite.scala:388)
- SPARK-33615: Launcher client archives *** FAILED ***
  The code passed to eventually never returned normally. Attempted 190 times over 3.0010403063999997 minutes. Last failure message: false was not true. (KubernetesSuite.scala:388)
- SPARK-33748: Launcher python client respecting PYSPARK_PYTHON *** FAILED ***
  The code passed to eventually never returned normally. Attempted 190 times over 3.0010149961 minutes. Last failure message: false was not true. (KubernetesSuite.scala:388)
- SPARK-33748: Launcher python client respecting spark.pyspark.python and spark.pyspark.driver.python *** FAILED ***
  The code passed to eventually never returned normally. Attempted 190 times over 3.001022415483333 minutes. Last failure message: false was not true. (KubernetesSuite.scala:388)
- Launcher python client dependencies using a zip file *** FAILED ***
  The code passed to eventually never returned normally. Attempted 190 times over 3.000970205133333 minutes. Last failure message: false was not true. (KubernetesSuite.scala:388)
- Test basic decommissioning *** FAILED ***
  The code passed to eventually never returned normally. Attempted 186 times over 3.0105085664499995 minutes. Last failure message: "++ id -u
  + myuid=185
  ++ id -g
  + mygid=0
  + set +e
  ++ getent passwd 185
  + uidentry=
  + set -e
  + '[' -z '' ']'
  + '[' -w /etc/passwd ']'
  + echo '185:x:185:0:anonymous uid:/opt/spark:/bin/false'
  + SPARK_CLASSPATH=':/opt/spark/jars/*'
  + env
  + grep SPARK_JAVA_OPT_
  + sort -t_ -k4 -n
  + sed 's/[^=]*=\(.*\)/\1/g'
  + readarray -t SPARK_EXECUTOR_JAVA_OPTS
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z ']'
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z x ']'
  + SPARK_CLASSPATH='/opt/spark/conf::/opt/spark/jars/*'
  + case "$1" in
  + shift 1
  + CMD=("$SPARK_HOME/bin/spark-submit" --conf "spark.driver.bindAddress=$SPARK_DRIVER_BIND_ADDRESS" --deploy-mode client "$@")
  + exec /usr/bin/tini -s -- /opt/spark/bin/spark-submit --conf spark.driver.bindAddress=172.17.0.8 --deploy-mode client --properties-file /opt/spark/conf/spark.properties --class org.apache.spark.deploy.PythonRunner local:///opt/spark/tests/decommissioning.py
  21/02/06 13:19:12 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
  Starting decom test
  Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
  21/02/06 13:19:13 INFO SparkContext: Running Spark version 3.2.0-SNAPSHOT
  21/02/06 13:19:13 INFO ResourceUtils: ==============================================================
  21/02/06 13:19:13 INFO ResourceUtils: No custom resources configured for spark.driver.
  21/02/06 13:19:13 INFO ResourceUtils: ==============================================================
  21/02/06 13:19:13 INFO SparkContext: Submitted application: DecomTest
  21/02/06 13:19:13 INFO ResourceProfile: Default ResourceProfile created, executor resources: Map(cores -> name: cores, amount: 1, script: , vendor: , memory -> name: memory, amount: 1024, script: , vendor: , offHeap -> name: offHeap, amount: 0, script: , vendor: ), task resources: Map(cpus -> name: cpus, amount: 1.0)
  21/02/06 13:19:13 INFO ResourceProfile: Limiting resource is cpus at 1 tasks per executor
  21/02/06 13:19:13 INFO ResourceProfileManager: Added ResourceProfile id: 0
  21/02/06 13:19:14 INFO SecurityManager: Changing view acls to: 185,jenkins
  21/02/06 13:19:14 INFO SecurityManager: Changing modify acls to: 185,jenkins
  21/02/06 13:19:14 INFO SecurityManager: Changing view acls groups to: 
  21/02/06 13:19:14 INFO SecurityManager: Changing modify acls groups to: 
  21/02/06 13:19:14 INFO SecurityManager: SecurityManager: authentication enabled; ui acls disabled; users  with view permissions: Set(185, jenkins); groups with view permissions: Set(); users  with modify permissions: Set(185, jenkins); groups with modify permissions: Set()
  21/02/06 13:19:14 INFO Utils: Successfully started service 'sparkDriver' on port 7078.
  21/02/06 13:19:14 INFO SparkEnv: Registering MapOutputTracker
  21/02/06 13:19:14 INFO SparkEnv: Registering BlockManagerMaster
  21/02/06 13:19:14 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
  21/02/06 13:19:14 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
  21/02/06 13:19:14 INFO SparkEnv: Registering BlockManagerMasterHeartbeat
  21/02/06 13:19:14 INFO DiskBlockManager: Created local directory at /var/data/spark-3450da23-934a-498d-b215-ecbc48bf3976/blockmgr-674d26cd-5816-40ee-9376-47df9c1f1fbf
  21/02/06 13:19:14 INFO MemoryStore: MemoryStore started with capacity 593.9 MiB
  21/02/06 13:19:14 INFO SparkEnv: Registering OutputCommitCoordinator
  21/02/06 13:19:15 INFO Utils: Successfully started service 'SparkUI' on port 4040.
  21/02/06 13:19:15 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://spark-test-app-d709aa77777f0171-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:4040
  21/02/06 13:19:15 INFO SparkKubernetesClientFactory: Auto-configuring K8S client using current context from users K8S config file
  21/02/06 13:19:17 INFO ExecutorPodsAllocator: Going to request 3 executors from Kubernetes for ResourceProfile Id: 0, target: 3 running: 0.
  21/02/06 13:19:17 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:19:17 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 7079.
  21/02/06 13:19:17 INFO NettyBlockTransferService: Server created on spark-test-app-d709aa77777f0171-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079
  21/02/06 13:19:17 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
  21/02/06 13:19:17 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, spark-test-app-d709aa77777f0171-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:19:17 INFO BlockManagerMasterEndpoint: Registering block manager spark-test-app-d709aa77777f0171-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 with 593.9 MiB RAM, BlockManagerId(driver, spark-test-app-d709aa77777f0171-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:19:17 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, spark-test-app-d709aa77777f0171-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:19:17 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, spark-test-app-d709aa77777f0171-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:19:17 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:19:17 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:19:47 INFO KubernetesClusterSchedulerBackend: SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000000000(ns)
  21/02/06 13:19:47 INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/opt/spark/work-dir/spark-warehouse').
  21/02/06 13:19:47 INFO SharedState: Warehouse path is 'file:/opt/spark/work-dir/spark-warehouse'.
  21/02/06 13:19:49 INFO SparkContext: Starting job: collect at /opt/spark/tests/decommissioning.py:44
  21/02/06 13:19:49 INFO DAGScheduler: Registering RDD 2 (groupByKey at /opt/spark/tests/decommissioning.py:43) as input to shuffle 0
  21/02/06 13:19:49 INFO DAGScheduler: Got job 0 (collect at /opt/spark/tests/decommissioning.py:44) with 5 output partitions
  21/02/06 13:19:49 INFO DAGScheduler: Final stage: ResultStage 1 (collect at /opt/spark/tests/decommissioning.py:44)
  21/02/06 13:19:49 INFO DAGScheduler: Parents of final stage: List(ShuffleMapStage 0)
  21/02/06 13:19:49 INFO DAGScheduler: Missing parents: List(ShuffleMapStage 0)
  21/02/06 13:19:49 INFO DAGScheduler: Submitting ShuffleMapStage 0 (PairwiseRDD[2] at groupByKey at /opt/spark/tests/decommissioning.py:43), which has no missing parents
  21/02/06 13:19:49 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 10.7 KiB, free 593.9 MiB)
  21/02/06 13:19:49 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 6.5 KiB, free 593.9 MiB)
  21/02/06 13:19:49 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on spark-test-app-d709aa77777f0171-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 (size: 6.5 KiB, free: 593.9 MiB)
  21/02/06 13:19:50 INFO SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:1387
  21/02/06 13:19:50 INFO DAGScheduler: Submitting 5 missing tasks from ShuffleMapStage 0 (PairwiseRDD[2] at groupByKey at /opt/spark/tests/decommissioning.py:43) (first 15 tasks are for partitions Vector(0, 1, 2, 3, 4))
  21/02/06 13:19:50 INFO TaskSchedulerImpl: Adding task set 0.0 with 5 tasks resource profile 0
  21/02/06 13:20:05 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:20:20 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:20:35 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:20:50 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:21:05 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:21:20 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:21:35 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:21:50 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:22:05 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  " did not contain "Finished waiting, stopping Spark" The application did not complete, driver log did not contain str Finished waiting, stopping Spark. (KubernetesSuite.scala:405)
- Test basic decommissioning with shuffle cleanup *** FAILED ***
  The code passed to eventually never returned normally. Attempted 186 times over 3.0102277 minutes. Last failure message: "++ id -u
  + myuid=185
  ++ id -g
  + mygid=0
  + set +e
  ++ getent passwd 185
  + uidentry=
  + set -e
  + '[' -z '' ']'
  + '[' -w /etc/passwd ']'
  + echo '185:x:185:0:anonymous uid:/opt/spark:/bin/false'
  + SPARK_CLASSPATH=':/opt/spark/jars/*'
  + env
  + grep SPARK_JAVA_OPT_
  + sort -t_ -k4 -n
  + sed 's/[^=]*=\(.*\)/\1/g'
  + readarray -t SPARK_EXECUTOR_JAVA_OPTS
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z ']'
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z x ']'
  + SPARK_CLASSPATH='/opt/spark/conf::/opt/spark/jars/*'
  + case "$1" in
  + shift 1
  + CMD=("$SPARK_HOME/bin/spark-submit" --conf "spark.driver.bindAddress=$SPARK_DRIVER_BIND_ADDRESS" --deploy-mode client "$@")
  + exec /usr/bin/tini -s -- /opt/spark/bin/spark-submit --conf spark.driver.bindAddress=172.17.0.8 --deploy-mode client --properties-file /opt/spark/conf/spark.properties --class org.apache.spark.deploy.PythonRunner local:///opt/spark/tests/decommissioning_cleanup.py
  21/02/06 13:22:49 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
  Starting decom test
  Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
  21/02/06 13:22:50 INFO SparkContext: Running Spark version 3.2.0-SNAPSHOT
  21/02/06 13:22:50 INFO ResourceUtils: ==============================================================
  21/02/06 13:22:50 INFO ResourceUtils: No custom resources configured for spark.driver.
  21/02/06 13:22:50 INFO ResourceUtils: ==============================================================
  21/02/06 13:22:50 INFO SparkContext: Submitted application: DecomTest
  21/02/06 13:22:50 INFO ResourceProfile: Default ResourceProfile created, executor resources: Map(cores -> name: cores, amount: 1, script: , vendor: , memory -> name: memory, amount: 1024, script: , vendor: , offHeap -> name: offHeap, amount: 0, script: , vendor: ), task resources: Map(cpus -> name: cpus, amount: 1.0)
  21/02/06 13:22:50 INFO ResourceProfile: Limiting resource is cpus at 1 tasks per executor
  21/02/06 13:22:50 INFO ResourceProfileManager: Added ResourceProfile id: 0
  21/02/06 13:22:50 INFO SecurityManager: Changing view acls to: 185,jenkins
  21/02/06 13:22:50 INFO SecurityManager: Changing modify acls to: 185,jenkins
  21/02/06 13:22:50 INFO SecurityManager: Changing view acls groups to: 
  21/02/06 13:22:50 INFO SecurityManager: Changing modify acls groups to: 
  21/02/06 13:22:50 INFO SecurityManager: SecurityManager: authentication enabled; ui acls disabled; users  with view permissions: Set(185, jenkins); groups with view permissions: Set(); users  with modify permissions: Set(185, jenkins); groups with modify permissions: Set()
  21/02/06 13:22:51 INFO Utils: Successfully started service 'sparkDriver' on port 7078.
  21/02/06 13:22:51 INFO SparkEnv: Registering MapOutputTracker
  21/02/06 13:22:51 INFO SparkEnv: Registering BlockManagerMaster
  21/02/06 13:22:51 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
  21/02/06 13:22:51 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
  21/02/06 13:22:51 INFO SparkEnv: Registering BlockManagerMasterHeartbeat
  21/02/06 13:22:51 INFO DiskBlockManager: Created local directory at /var/data/spark-122c6151-6ec6-404a-af3c-f056234d8983/blockmgr-429ae44d-209c-4530-bca5-f92c4d1ff0b3
  21/02/06 13:22:51 INFO MemoryStore: MemoryStore started with capacity 593.9 MiB
  21/02/06 13:22:51 INFO SparkEnv: Registering OutputCommitCoordinator
  21/02/06 13:22:51 INFO Utils: Successfully started service 'SparkUI' on port 4040.
  21/02/06 13:22:51 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://spark-test-app-e684f177778250e1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:4040
  21/02/06 13:22:52 INFO SparkKubernetesClientFactory: Auto-configuring K8S client using current context from users K8S config file
  21/02/06 13:22:54 INFO ExecutorPodsAllocator: Going to request 3 executors from Kubernetes for ResourceProfile Id: 0, target: 3 running: 0.
  21/02/06 13:22:54 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:22:54 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 7079.
  21/02/06 13:22:54 INFO NettyBlockTransferService: Server created on spark-test-app-e684f177778250e1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079
  21/02/06 13:22:54 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
  21/02/06 13:22:54 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, spark-test-app-e684f177778250e1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:22:54 INFO BlockManagerMasterEndpoint: Registering block manager spark-test-app-e684f177778250e1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 with 593.9 MiB RAM, BlockManagerId(driver, spark-test-app-e684f177778250e1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:22:54 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, spark-test-app-e684f177778250e1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:22:54 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, spark-test-app-e684f177778250e1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:22:54 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:22:54 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:23:23 INFO KubernetesClusterSchedulerBackend: SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000000000(ns)
  21/02/06 13:23:24 INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/opt/spark/work-dir/spark-warehouse').
  21/02/06 13:23:24 INFO SharedState: Warehouse path is 'file:/opt/spark/work-dir/spark-warehouse'.
  21/02/06 13:23:26 INFO SparkContext: Starting job: collect at /opt/spark/tests/decommissioning_cleanup.py:47
  21/02/06 13:23:26 INFO DAGScheduler: Registering RDD 7 (groupByKey at /opt/spark/tests/decommissioning_cleanup.py:46) as input to shuffle 0
  21/02/06 13:23:26 INFO DAGScheduler: Got job 0 (collect at /opt/spark/tests/decommissioning_cleanup.py:47) with 5 output partitions
  21/02/06 13:23:26 INFO DAGScheduler: Final stage: ResultStage 1 (collect at /opt/spark/tests/decommissioning_cleanup.py:47)
  21/02/06 13:23:26 INFO DAGScheduler: Parents of final stage: List(ShuffleMapStage 0)
  21/02/06 13:23:26 INFO DAGScheduler: Missing parents: List(ShuffleMapStage 0)
  21/02/06 13:23:26 INFO DAGScheduler: Submitting ShuffleMapStage 0 (PairwiseRDD[7] at groupByKey at /opt/spark/tests/decommissioning_cleanup.py:46), which has no missing parents
  21/02/06 13:23:26 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 10.2 KiB, free 593.9 MiB)
  21/02/06 13:23:27 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 6.3 KiB, free 593.9 MiB)
  21/02/06 13:23:27 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on spark-test-app-e684f177778250e1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 (size: 6.3 KiB, free: 593.9 MiB)
  21/02/06 13:23:27 INFO SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:1387
  21/02/06 13:23:27 INFO DAGScheduler: Submitting 5 missing tasks from ShuffleMapStage 0 (PairwiseRDD[7] at groupByKey at /opt/spark/tests/decommissioning_cleanup.py:46) (first 15 tasks are for partitions Vector(0, 1, 2, 3, 4))
  21/02/06 13:23:27 INFO TaskSchedulerImpl: Adding task set 0.0 with 5 tasks resource profile 0
  21/02/06 13:23:42 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:23:57 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:24:12 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:24:27 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:24:42 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:24:57 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:25:12 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:25:27 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:25:42 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  " did not contain "Finished waiting, stopping Spark" The application did not complete, driver log did not contain str Finished waiting, stopping Spark. (KubernetesSuite.scala:405)
- Test decommissioning with dynamic allocation & shuffle cleanups *** FAILED ***
  The code passed to eventually never returned normally. Attempted 186 times over 3.0134878753333334 minutes. Last failure message: "++ id -u
  + myuid=185
  ++ id -g
  + mygid=0
  + set +e
  ++ getent passwd 185
  + uidentry=
  + set -e
  + '[' -z '' ']'
  + '[' -w /etc/passwd ']'
  + echo '185:x:185:0:anonymous uid:/opt/spark:/bin/false'
  + SPARK_CLASSPATH=':/opt/spark/jars/*'
  + env
  + grep SPARK_JAVA_OPT_
  + sort -t_ -k4 -n
  + sed 's/[^=]*=\(.*\)/\1/g'
  + readarray -t SPARK_EXECUTOR_JAVA_OPTS
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z ']'
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z x ']'
  + SPARK_CLASSPATH='/opt/spark/conf::/opt/spark/jars/*'
  + case "$1" in
  + shift 1
  + CMD=("$SPARK_HOME/bin/spark-submit" --conf "spark.driver.bindAddress=$SPARK_DRIVER_BIND_ADDRESS" --deploy-mode client "$@")
  + exec /usr/bin/tini -s -- /opt/spark/bin/spark-submit --conf spark.driver.bindAddress=172.17.0.8 --deploy-mode client --properties-file /opt/spark/conf/spark.properties --class org.apache.spark.deploy.PythonRunner local:///opt/spark/tests/autoscale.py
  21/02/06 13:26:27 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
  Starting autoscale test
  Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
  21/02/06 13:26:27 INFO SparkContext: Running Spark version 3.2.0-SNAPSHOT
  21/02/06 13:26:27 INFO ResourceUtils: ==============================================================
  21/02/06 13:26:27 INFO ResourceUtils: No custom resources configured for spark.driver.
  21/02/06 13:26:27 INFO ResourceUtils: ==============================================================
  21/02/06 13:26:27 INFO SparkContext: Submitted application: AutoScale
  21/02/06 13:26:28 INFO ResourceProfile: Default ResourceProfile created, executor resources: Map(cores -> name: cores, amount: 1, script: , vendor: , memory -> name: memory, amount: 1024, script: , vendor: , offHeap -> name: offHeap, amount: 0, script: , vendor: ), task resources: Map(cpus -> name: cpus, amount: 1.0)
  21/02/06 13:26:28 INFO ResourceProfile: Limiting resource is cpus at 1 tasks per executor
  21/02/06 13:26:28 INFO ResourceProfileManager: Added ResourceProfile id: 0
  21/02/06 13:26:28 INFO SecurityManager: Changing view acls to: 185,jenkins
  21/02/06 13:26:28 INFO SecurityManager: Changing modify acls to: 185,jenkins
  21/02/06 13:26:28 INFO SecurityManager: Changing view acls groups to: 
  21/02/06 13:26:28 INFO SecurityManager: Changing modify acls groups to: 
  21/02/06 13:26:28 INFO SecurityManager: SecurityManager: authentication enabled; ui acls disabled; users  with view permissions: Set(185, jenkins); groups with view permissions: Set(); users  with modify permissions: Set(185, jenkins); groups with modify permissions: Set()
  21/02/06 13:26:28 INFO Utils: Successfully started service 'sparkDriver' on port 7078.
  21/02/06 13:26:28 INFO SparkEnv: Registering MapOutputTracker
  21/02/06 13:26:28 INFO SparkEnv: Registering BlockManagerMaster
  21/02/06 13:26:28 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
  21/02/06 13:26:28 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
  21/02/06 13:26:28 INFO SparkEnv: Registering BlockManagerMasterHeartbeat
  21/02/06 13:26:28 INFO DiskBlockManager: Created local directory at /var/data/spark-c5b9c769-c1bf-4635-b403-26fb0c8e4c9b/blockmgr-082eca13-578e-4acf-91d8-32dd096fa5fa
  21/02/06 13:26:28 INFO MemoryStore: MemoryStore started with capacity 593.9 MiB
  21/02/06 13:26:28 INFO SparkEnv: Registering OutputCommitCoordinator
  21/02/06 13:26:29 INFO Utils: Successfully started service 'SparkUI' on port 4040.
  21/02/06 13:26:29 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://spark-test-app-427966777785a077-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:4040
  21/02/06 13:26:29 INFO SparkKubernetesClientFactory: Auto-configuring K8S client using current context from users K8S config file
  21/02/06 13:26:31 INFO Utils: Using initial executors = 2, max of spark.dynamicAllocation.initialExecutors, spark.dynamicAllocation.minExecutors and spark.executor.instances
  21/02/06 13:26:31 INFO ExecutorPodsAllocator: Going to request 2 executors from Kubernetes for ResourceProfile Id: 0, target: 2 running: 0.
  21/02/06 13:26:31 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:26:31 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 7079.
  21/02/06 13:26:31 INFO NettyBlockTransferService: Server created on spark-test-app-427966777785a077-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079
  21/02/06 13:26:31 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
  21/02/06 13:26:31 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, spark-test-app-427966777785a077-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:26:31 INFO BlockManagerMasterEndpoint: Registering block manager spark-test-app-427966777785a077-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 with 593.9 MiB RAM, BlockManagerId(driver, spark-test-app-427966777785a077-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:26:31 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, spark-test-app-427966777785a077-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:26:31 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, spark-test-app-427966777785a077-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:26:31 INFO Utils: Using initial executors = 2, max of spark.dynamicAllocation.initialExecutors, spark.dynamicAllocation.minExecutors and spark.executor.instances
  21/02/06 13:26:31 WARN ExecutorAllocationManager: Dynamic allocation without a shuffle service is an experimental feature.
  21/02/06 13:26:31 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:27:01 INFO KubernetesClusterSchedulerBackend: SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000000000(ns)
  21/02/06 13:27:01 INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/opt/spark/work-dir/spark-warehouse').
  21/02/06 13:27:01 INFO SharedState: Warehouse path is 'file:/opt/spark/work-dir/spark-warehouse'.
  21/02/06 13:27:03 INFO SparkContext: Starting job: collect at /opt/spark/tests/autoscale.py:38
  21/02/06 13:27:03 INFO DAGScheduler: Registering RDD 2 (groupByKey at /opt/spark/tests/autoscale.py:37) as input to shuffle 0
  21/02/06 13:27:03 INFO DAGScheduler: Got job 0 (collect at /opt/spark/tests/autoscale.py:38) with 5 output partitions
  21/02/06 13:27:03 INFO DAGScheduler: Final stage: ResultStage 1 (collect at /opt/spark/tests/autoscale.py:38)
  21/02/06 13:27:03 INFO DAGScheduler: Parents of final stage: List(ShuffleMapStage 0)
  21/02/06 13:27:03 INFO DAGScheduler: Missing parents: List(ShuffleMapStage 0)
  21/02/06 13:27:03 INFO DAGScheduler: Submitting ShuffleMapStage 0 (PairwiseRDD[2] at groupByKey at /opt/spark/tests/autoscale.py:37), which has no missing parents
  21/02/06 13:27:03 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 10.2 KiB, free 593.9 MiB)
  21/02/06 13:27:04 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 6.2 KiB, free 593.9 MiB)
  21/02/06 13:27:04 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on spark-test-app-427966777785a077-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 (size: 6.2 KiB, free: 593.9 MiB)
  21/02/06 13:27:04 INFO SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:1387
  21/02/06 13:27:04 INFO DAGScheduler: Submitting 5 missing tasks from ShuffleMapStage 0 (PairwiseRDD[2] at groupByKey at /opt/spark/tests/autoscale.py:37) (first 15 tasks are for partitions Vector(0, 1, 2, 3, 4))
  21/02/06 13:27:04 INFO TaskSchedulerImpl: Adding task set 0.0 with 5 tasks resource profile 0
  21/02/06 13:27:04 INFO ExecutorAllocationManager: Requesting 1 new executor because tasks are backlogged (new desired total will be 3 for resource profile id: 0)
  21/02/06 13:27:04 INFO ExecutorPodsAllocator: Going to request 1 executors from Kubernetes for ResourceProfile Id: 0, target: 3 running: 2.
  21/02/06 13:27:05 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:27:05 INFO ExecutorAllocationManager: Requesting 2 new executors because tasks are backlogged (new desired total will be 5 for resource profile id: 0)
  21/02/06 13:27:06 INFO ExecutorPodsAllocator: Going to request 2 executors from Kubernetes for ResourceProfile Id: 0, target: 5 running: 3.
  21/02/06 13:27:06 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:27:06 INFO BasicExecutorFeatureStep: Adding decommission script to lifecycle
  21/02/06 13:27:19 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:27:34 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:27:49 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:28:04 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:28:19 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:28:34 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:28:49 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:29:04 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:29:19 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  " did not contain "Finished waiting, stopping Spark" The application did not complete, driver log did not contain str Finished waiting, stopping Spark. (KubernetesSuite.scala:405)
- Run SparkR on simple dataframe.R example *** FAILED ***
  The code passed to eventually never returned normally. Attempted 183 times over 3.006895033033333 minutes. Last failure message: "++ id -u
  + myuid=185
  ++ id -g
  + mygid=0
  + set +e
  ++ getent passwd 185
  + uidentry=
  + set -e
  + '[' -z '' ']'
  + '[' -w /etc/passwd ']'
  + echo '185:x:185:0:anonymous uid:/opt/spark:/bin/false'
  + SPARK_CLASSPATH=':/opt/spark/jars/*'
  + env
  + grep SPARK_JAVA_OPT_
  + sort -t_ -k4 -n
  + sed 's/[^=]*=\(.*\)/\1/g'
  + readarray -t SPARK_EXECUTOR_JAVA_OPTS
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z ']'
  + '[' -n '' ']'
  + '[' -z ']'
  + '[' -z x ']'
  + SPARK_CLASSPATH='/opt/spark/conf::/opt/spark/jars/*'
  + case "$1" in
  + shift 1
  + CMD=("$SPARK_HOME/bin/spark-submit" --conf "spark.driver.bindAddress=$SPARK_DRIVER_BIND_ADDRESS" --deploy-mode client "$@")
  + exec /usr/bin/tini -s -- /opt/spark/bin/spark-submit --conf spark.driver.bindAddress=172.17.0.8 --deploy-mode client --properties-file /opt/spark/conf/spark.properties --class org.apache.spark.deploy.RRunner local:///opt/spark/examples/src/main/r/dataframe.R
  21/02/06 13:30:03 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
  log4j:WARN No appenders could be found for logger (io.netty.util.internal.logging.InternalLoggerFactory).
  log4j:WARN Please initialize the log4j system properly.
  log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
  
  Attaching package: ‘SparkR’
  
  The following objects are masked from ‘package:stats’:
  
      cov, filter, lag, na.omit, predict, sd, var, window
  
  The following objects are masked from ‘package:base’:
  
      as.data.frame, colnames, colnames<-, drop, endsWith, intersect,
      rank, rbind, sample, startsWith, subset, summary, transform, union
  
  Spark package found in SPARK_HOME: /opt/spark
  Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
  21/02/06 13:30:05 INFO SparkContext: Running Spark version 3.2.0-SNAPSHOT
  21/02/06 13:30:05 INFO ResourceUtils: ==============================================================
  21/02/06 13:30:05 INFO ResourceUtils: No custom resources configured for spark.driver.
  21/02/06 13:30:05 INFO ResourceUtils: ==============================================================
  21/02/06 13:30:05 INFO SparkContext: Submitted application: SparkR-DataFrame-example
  21/02/06 13:30:05 INFO ResourceProfile: Default ResourceProfile created, executor resources: Map(cores -> name: cores, amount: 1, script: , vendor: , memory -> name: memory, amount: 1024, script: , vendor: , offHeap -> name: offHeap, amount: 0, script: , vendor: ), task resources: Map(cpus -> name: cpus, amount: 1.0)
  21/02/06 13:30:05 INFO ResourceProfile: Limiting resource is cpus at 1 tasks per executor
  21/02/06 13:30:05 INFO ResourceProfileManager: Added ResourceProfile id: 0
  21/02/06 13:30:05 INFO SecurityManager: Changing view acls to: 185,jenkins
  21/02/06 13:30:05 INFO SecurityManager: Changing modify acls to: 185,jenkins
  21/02/06 13:30:05 INFO SecurityManager: Changing view acls groups to: 
  21/02/06 13:30:05 INFO SecurityManager: Changing modify acls groups to: 
  21/02/06 13:30:05 INFO SecurityManager: SecurityManager: authentication enabled; ui acls disabled; users  with view permissions: Set(185, jenkins); groups with view permissions: Set(); users  with modify permissions: Set(185, jenkins); groups with modify permissions: Set()
  21/02/06 13:30:05 INFO Utils: Successfully started service 'sparkDriver' on port 7078.
  21/02/06 13:30:05 INFO SparkEnv: Registering MapOutputTracker
  21/02/06 13:30:05 INFO SparkEnv: Registering BlockManagerMaster
  21/02/06 13:30:05 INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
  21/02/06 13:30:05 INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
  21/02/06 13:30:05 INFO SparkEnv: Registering BlockManagerMasterHeartbeat
  21/02/06 13:30:05 INFO DiskBlockManager: Created local directory at /var/data/spark-3f5f6349-037d-4b84-b329-00f114f482e0/blockmgr-6c347396-fc49-4a3f-a2ee-8509ea420e03
  21/02/06 13:30:05 INFO MemoryStore: MemoryStore started with capacity 593.9 MiB
  21/02/06 13:30:05 INFO SparkEnv: Registering OutputCommitCoordinator
  21/02/06 13:30:06 INFO Utils: Successfully started service 'SparkUI' on port 4040.
  21/02/06 13:30:06 INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://spark-test-app-66d48e777788efa1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:4040
  21/02/06 13:30:06 WARN SparkContext: File with 'local' scheme local:///opt/spark/examples/src/main/r/dataframe.R is not supported to add to file server, since it is already available on every node.
  21/02/06 13:30:06 INFO SparkKubernetesClientFactory: Auto-configuring K8S client using current context from users K8S config file
  21/02/06 13:30:08 INFO ExecutorPodsAllocator: Going to request 1 executors from Kubernetes for ResourceProfile Id: 0, target: 1 running: 0.
  21/02/06 13:30:08 INFO BasicExecutorFeatureStep: Decommissioning not enabled, skipping shutdown script
  21/02/06 13:30:08 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 7079.
  21/02/06 13:30:08 INFO NettyBlockTransferService: Server created on spark-test-app-66d48e777788efa1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079
  21/02/06 13:30:08 INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
  21/02/06 13:30:08 INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, spark-test-app-66d48e777788efa1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:30:08 INFO BlockManagerMasterEndpoint: Registering block manager spark-test-app-66d48e777788efa1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 with 593.9 MiB RAM, BlockManagerId(driver, spark-test-app-66d48e777788efa1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:30:08 INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, spark-test-app-66d48e777788efa1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:30:08 INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, spark-test-app-66d48e777788efa1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc, 7079, None)
  21/02/06 13:30:38 INFO KubernetesClusterSchedulerBackend: SchedulerBackend is ready for scheduling beginning after waiting maxRegisteredResourcesWaitingTime: 30000000000(ns)
  21/02/06 13:30:38 INFO HiveConf: Found configuration file null
  21/02/06 13:30:38 INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/opt/spark/work-dir/spark-warehouse').
  21/02/06 13:30:38 INFO SharedState: Warehouse path is 'file:/opt/spark/work-dir/spark-warehouse'.
  Java ref type org.apache.spark.sql.SparkSession id 1 
  root
   |-- name: string (nullable = true)
   |-- age: double (nullable = true)
  21/02/06 13:30:44 INFO InMemoryFileIndex: It took 63 ms to list leaf files for 1 paths.
  21/02/06 13:30:44 INFO InMemoryFileIndex: It took 2 ms to list leaf files for 1 paths.
  21/02/06 13:30:44 INFO FileSourceStrategy: Pushed Filters: 
  21/02/06 13:30:44 INFO FileSourceStrategy: Post-Scan Filters: 
  21/02/06 13:30:44 INFO FileSourceStrategy: Output Data Schema: struct<value: string>
  21/02/06 13:30:45 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 327.2 KiB, free 593.6 MiB)
  21/02/06 13:30:45 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 30.1 KiB, free 593.6 MiB)
  21/02/06 13:30:45 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on spark-test-app-66d48e777788efa1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 (size: 30.1 KiB, free: 593.9 MiB)
  21/02/06 13:30:45 INFO SparkContext: Created broadcast 0 from json at NativeMethodAccessorImpl.java:0
  21/02/06 13:30:45 INFO FileSourceScanExec: Planning scan with bin packing, max size: 4194304 bytes, open cost is considered as scanning 4194304 bytes.
  21/02/06 13:30:45 INFO SparkContext: Starting job: json at NativeMethodAccessorImpl.java:0
  21/02/06 13:30:45 INFO DAGScheduler: Got job 0 (json at NativeMethodAccessorImpl.java:0) with 1 output partitions
  21/02/06 13:30:45 INFO DAGScheduler: Final stage: ResultStage 0 (json at NativeMethodAccessorImpl.java:0)
  21/02/06 13:30:45 INFO DAGScheduler: Parents of final stage: List()
  21/02/06 13:30:45 INFO DAGScheduler: Missing parents: List()
  21/02/06 13:30:45 INFO DAGScheduler: Submitting ResultStage 0 (MapPartitionsRDD[7] at json at NativeMethodAccessorImpl.java:0), which has no missing parents
  21/02/06 13:30:45 INFO MemoryStore: Block broadcast_1 stored as values in memory (estimated size 13.3 KiB, free 593.6 MiB)
  21/02/06 13:30:45 INFO MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 7.2 KiB, free 593.6 MiB)
  21/02/06 13:30:45 INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on spark-test-app-66d48e777788efa1-driver-svc.e295d961e57d4a67964daf9b3a75fd4e.svc:7079 (size: 7.2 KiB, free: 593.9 MiB)
  21/02/06 13:30:45 INFO SparkContext: Created broadcast 1 from broadcast at DAGScheduler.scala:1387
  21/02/06 13:30:45 INFO DAGScheduler: Submitting 1 missing tasks from ResultStage 0 (MapPartitionsRDD[7] at json at NativeMethodAccessorImpl.java:0) (first 15 tasks are for partitions Vector(0))
  21/02/06 13:30:45 INFO TaskSchedulerImpl: Adding task set 0.0 with 1 tasks resource profile 0
  21/02/06 13:31:00 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:31:15 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:31:30 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:31:45 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:32:00 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:32:15 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:32:30 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:32:45 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  21/02/06 13:33:00 WARN TaskSchedulerImpl: Initial job has not accepted any resources; check your cluster UI to ensure that workers are registered and have sufficient resources
  " did not contain "1 Justin" The application did not complete, driver log did not contain str 1 Justin. (KubernetesSuite.scala:405)
Run completed in 1 hour, 23 minutes, 26 seconds.
Total number of tests run: 26
Suites: completed 2, aborted 0
Tests: succeeded 2, failed 24, canceled 0, ignored 0, pending 0
*** 24 TESTS FAILED ***
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary for Spark Project Parent POM 3.2.0-SNAPSHOT:
[INFO] 
[INFO] Spark Project Parent POM ........................... SUCCESS [  4.132 s]
[INFO] Spark Project Tags ................................. SUCCESS [  9.053 s]
[INFO] Spark Project Local DB ............................. SUCCESS [  4.346 s]
[INFO] Spark Project Networking ........................... SUCCESS [  6.433 s]
[INFO] Spark Project Shuffle Streaming Service ............ SUCCESS [  3.293 s]
[INFO] Spark Project Unsafe ............................... SUCCESS [ 11.544 s]
[INFO] Spark Project Launcher ............................. SUCCESS [  3.029 s]
[INFO] Spark Project Core ................................. SUCCESS [02:37 min]
[INFO] Spark Project Kubernetes Integration Tests ......... FAILURE [  01:27 h]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time:  01:30 h
[INFO] Finished at: 2021-02-06T05:33:34-08:00
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.scalatest:scalatest-maven-plugin:2.0.0:test (integration-test) on project spark-kubernetes-integration-tests_2.12: There are test failures -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <args> -rf :spark-kubernetes-integration-tests_2.12
+ retcode3=1
+ kill -9 177769
+ minikube stop
:   Stopping "minikube" in kvm2 ...
-   "minikube" stopped.
/tmp/jenkins2002647585585267185.sh: line 68: 177769 Killed                  minikube mount ${PVC_TESTS_HOST_PATH}:${PVC_TESTS_VM_PATH} --9p-version=9p2000.L --gid=0 --uid=185
+ rm -rf /tmp/tmp.pTCVbaOmbC
+ exit 1
Build step 'Execute shell' marked build as failure
Archiving artifacts
Finished: FAILURE