FailedConsole Output

Started by upstream project "avocado" build number 5204
originally caused by:
 Started by timer
[EnvInject] - Loading node environment variables.
Building remotely on amp-jenkins-staging-worker-02 (ubuntu ubuntu-gpu ubuntu-avx2 staging-02 staging) in workspace /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu
 > git rev-parse --is-inside-work-tree # timeout=10
Fetching changes from the remote Git repository
 > git config remote.origin.url https://github.com/bigdatagenomics/avocado.git # timeout=10
Fetching upstream changes from https://github.com/bigdatagenomics/avocado.git
 > git --version # timeout=10
 > git fetch --tags --progress https://github.com/bigdatagenomics/avocado.git +refs/heads/*:refs/remotes/origin/*
Checking out Revision 4cea7777bfa76a940ad875157b92f6de7cead980 (refs/remotes/origin/master)
 > git config core.sparsecheckout # timeout=10
 > git checkout -f 4cea7777bfa76a940ad875157b92f6de7cead980
 > git rev-list 4cea7777bfa76a940ad875157b92f6de7cead980 # timeout=10
[ubuntu] $ /bin/bash /tmp/hudson1697579640108314278.sh
+ set -e
+ export JAVA_HOME=/usr/lib/jvm/java-8-oracle
+ JAVA_HOME=/usr/lib/jvm/java-8-oracle
+ export PATH=/usr/lib/jvm/java-8-oracle/bin/::/home/jenkins/gems/bin:/usr/local/go/bin:/home/jenkins/go-projects/bin:/home/jenkins/anaconda2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
+ PATH=/usr/lib/jvm/java-8-oracle/bin/::/home/jenkins/gems/bin:/usr/local/go/bin:/home/jenkins/go-projects/bin:/home/jenkins/anaconda2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
+ set +x
+ /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/scripts/jenkins-test

# make a tempdir for writing maven cruft to
AVOCADO_MVN_TMP_DIR=$(mktemp -d -t avocadoTestMvnXXXXXXX)
mktemp -d -t avocadoTestMvnXXXXXXX
++ mktemp -d -t avocadoTestMvnXXXXXXX
+ AVOCADO_MVN_TMP_DIR=/tmp/avocadoTestMvn1W7ijjn

# add this tempdir to the poms...
find . -name pom.xml \
    -exec sed -i.bak \
    -e "s:sun.io.serialization.extendedDebugInfo=true:sun.io.serialization.extendedDebugInfo=true -Djava.io.tmpdir=${AVOCADO_MVN_TMP_DIR}:g" \
    {} \;
+ find . -name pom.xml -exec sed -i.bak -e 's:sun.io.serialization.extendedDebugInfo=true:sun.io.serialization.extendedDebugInfo=true -Djava.io.tmpdir=/tmp/avocadoTestMvn1W7ijjn:g' '{}' ';'
find . -name "*.bak" -exec rm -f {} \;
+ find . -name '*.bak' -exec rm -f '{}' ';'

# variable declarations
export PATH=${JAVA_HOME}/bin/:${PATH}
+ export PATH=/usr/lib/jvm/java-8-oracle/bin/:/usr/lib/jvm/java-8-oracle/bin/::/home/jenkins/gems/bin:/usr/local/go/bin:/home/jenkins/go-projects/bin:/home/jenkins/anaconda2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
+ PATH=/usr/lib/jvm/java-8-oracle/bin/:/usr/lib/jvm/java-8-oracle/bin/::/home/jenkins/gems/bin:/usr/local/go/bin:/home/jenkins/go-projects/bin:/home/jenkins/anaconda2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
export MAVEN_OPTS="-Xmx1536m -XX:MaxPermSize=1g -Dfile.encoding=utf-8"
+ export 'MAVEN_OPTS=-Xmx1536m -XX:MaxPermSize=1g -Dfile.encoding=utf-8'
+ MAVEN_OPTS='-Xmx1536m -XX:MaxPermSize=1g -Dfile.encoding=utf-8'
DIR=$( cd $( dirname ${BASH_SOURCE[0]} ) && pwd )
 cd $( dirname ${BASH_SOURCE[0]} ) && pwd 
 dirname ${BASH_SOURCE[0]} 
+++ dirname /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/scripts/jenkins-test
++ cd /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/scripts
++ pwd
+ DIR=/home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/scripts
PROJECT_ROOT=${DIR}/..
+ PROJECT_ROOT=/home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/scripts/..
VERSION=$(grep "<version>" ${PROJECT_ROOT}/pom.xml  | head -2 | tail -1 | sed 's/ *<version>//g' | sed 's/<\/version>//g')
grep "<version>" ${PROJECT_ROOT}/pom.xml  | head -2 | tail -1 | sed 's/ *<version>//g' | sed 's/<\/version>//g'
++ grep '<version>' /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/scripts/../pom.xml
++ head -2
++ sed 's/ *<version>//g'
++ sed 's/<\/version>//g'
++ tail -1
+ VERSION=0.1.1-SNAPSHOT

# is the hadoop version set?
if ! [[ ${HADOOP_VERSION} ]];
then
    echo "HADOOP_VERSION environment variable is not set."
    echo "Please set this variable before running."
    
    exit 1
fi
+ [[ -n 2.6.0 ]]

# is the spark version set?
if ! [[ ${SPARK_VERSION} ]];
then
    echo "SPARK_VERSION environment variable is not set."
    echo "Please set this variable before running."
    
    exit 1
fi
+ [[ -n 2.2.0 ]]

# print versions
echo "Testing AVOCADO version ${VERSION} on Spark ${SPARK_VERSION} and Hadoop ${HADOOP_VERSION}"
+ echo 'Testing AVOCADO version 0.1.1-SNAPSHOT on Spark 2.2.0 and Hadoop 2.6.0'
Testing AVOCADO version 0.1.1-SNAPSHOT on Spark 2.2.0 and Hadoop 2.6.0

# first, build the sources, run the unit tests, and generate a coverage report
mvn clean \
    -Dhadoop.version=${HADOOP_VERSION} \
    -Dspark.version=${SPARK_VERSION} 
+ mvn clean -Dhadoop.version=2.6.0 -Dspark.version=2.2.0
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=1g; support was removed in 8.0
[INFO] Scanning for projects...
[WARNING] 
[WARNING] Some problems were encountered while building the effective model for org.bdgenomics.avocado:avocado-core_2.11:jar:0.1.1-SNAPSHOT
[WARNING] 'build.plugins.plugin.version' for org.apache.maven.plugins:maven-jar-plugin is missing. @ line 66, column 15
[WARNING] 
[WARNING] It is highly recommended to fix these problems because they threaten the stability of your build.
[WARNING] 
[WARNING] For this reason, future Maven versions might no longer support building such malformed projects.
[WARNING] 
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Build Order:
[INFO] 
[INFO] avocado: A Variant Caller, Distributed
[INFO] avocado-core: Core variant calling algorithms
[INFO] avocado-cli: Command line interface for a distributed variant caller
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building avocado: A Variant Caller, Distributed 0.1.1-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ avocado-parent_2.11 ---
[INFO] Deleting /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/target
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building avocado-core: Core variant calling algorithms 0.1.1-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ avocado-core_2.11 ---
[INFO] Deleting /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/avocado-core/target
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building avocado-cli: Command line interface for a distributed variant caller 0.1.1-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ avocado-cli_2.11 ---
[INFO] Deleting /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/avocado-cli/target
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] avocado: A Variant Caller, Distributed ............. SUCCESS [  0.129 s]
[INFO] avocado-core: Core variant calling algorithms ...... SUCCESS [  0.077 s]
[INFO] avocado-cli: Command line interface for a distributed variant caller SUCCESS [  0.040 s]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD SUCCESS
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 0.405 s
[INFO] Finished at: 2020-01-09T01:10:42-08:00
[INFO] Final Memory: 22M/1472M
[INFO] ------------------------------------------------------------------------

# if this is a pull request, we need to set the coveralls pr id
if [[ ! -z $ghprbPullId ]];
then
    COVERALLS_PRB_OPTION="-DpullRequest=${ghprbPullId}"
fi
+ [[ ! -z '' ]]

# coveralls token should not be visible
set +x +v
+ set +x +v
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=1g; support was removed in 8.0
[INFO] Scanning for projects...
[WARNING] 
[WARNING] Some problems were encountered while building the effective model for org.bdgenomics.avocado:avocado-core_2.11:jar:0.1.1-SNAPSHOT
[WARNING] 'build.plugins.plugin.version' for org.apache.maven.plugins:maven-jar-plugin is missing. @ line 66, column 15
[WARNING] 
[WARNING] It is highly recommended to fix these problems because they threaten the stability of your build.
[WARNING] 
[WARNING] For this reason, future Maven versions might no longer support building such malformed projects.
[WARNING] 
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Build Order:
[INFO] 
[INFO] avocado: A Variant Caller, Distributed
[INFO] avocado-core: Core variant calling algorithms
[INFO] avocado-cli: Command line interface for a distributed variant caller
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building avocado: A Variant Caller, Distributed 0.1.1-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ avocado-parent_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-source (add-source) @ avocado-parent_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ avocado-parent_2.11 ---
[INFO] Modified 0 of 58 .scala files
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ avocado-parent_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-test-source (add-test-source) @ avocado-parent_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/src/test/scala added.
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ avocado-parent_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] >>> scoverage-maven-plugin:1.1.1:report (default-cli) > [scoverage]test @ avocado-parent_2.11 >>>
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ avocado-parent_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-source (add-source) @ avocado-parent_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ avocado-parent_2.11 ---
[INFO] Modified 0 of 58 .scala files
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:pre-compile (default-cli) @ avocado-parent_2.11 ---
[INFO] Skipping SCoverage execution for project with packaging type 'pom'
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ avocado-parent_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:post-compile (default-cli) @ avocado-parent_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-test-source (add-test-source) @ avocado-parent_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/src/test/scala added.
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ avocado-parent_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] <<< scoverage-maven-plugin:1.1.1:report (default-cli) < [scoverage]test @ avocado-parent_2.11 <<<
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:report (default-cli) @ avocado-parent_2.11 ---
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building avocado-core: Core variant calling algorithms 0.1.1-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ avocado-core_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-source (add-source) @ avocado-core_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/avocado-core/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ avocado-core_2.11 ---
[INFO] Modified 0 of 49 .scala files
[INFO] 
[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ avocado-core_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/avocado-core/src/main/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ avocado-core_2.11 ---
[WARNING]  Expected all dependencies to require Scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-misc-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-metrics-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-serialization-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-misc-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-io-spark2_2.11:0.2.13 requires scala version: 2.11.8
[WARNING] Multiple versions of scala libraries detected!
[INFO] /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/avocado-core/src/main/scala:-1: info: compiling
[INFO] Compiling 26 source files to /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/avocado-core/target/scala-2.11.4/classes at 1578561051774
[WARNING] warning: there were three deprecation warnings; re-run with -deprecation for details
[WARNING] one warning found
[INFO] prepare-compile in 0 s
[INFO] compile in 10 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.5.1:compile (default-compile) @ avocado-core_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-test-source (add-test-source) @ avocado-core_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/avocado-core/src/test/scala added.
[INFO] 
[INFO] --- maven-resources-plugin:2.6:testResources (default-testResources) @ avocado-core_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 26 resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ avocado-core_2.11 ---
[WARNING]  Expected all dependencies to require Scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-misc-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-metrics-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-serialization-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-misc-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-io-spark2_2.11:0.2.13 requires scala version: 2.11.8
[WARNING] Multiple versions of scala libraries detected!
[INFO] /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/avocado-core/src/test/scala:-1: info: compiling
[INFO] Compiling 23 source files to /home/jenkins/workspace/avocado/HADOOP_VERSION/2.6.0/SCALAVER/2.11/SPARK_VERSION/2.2.0/label/ubuntu/avocado-core/target/scala-2.11.4/test-classes at 1578561062928
[INFO] prepare-compile in 0 s
[INFO] compile in 15 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.5.1:testCompile (default-testCompile) @ avocado-core_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- maven-surefire-plugin:2.7:test (default-test) @ avocado-core_2.11 ---
[INFO] Tests are skipped.
[INFO] 
[INFO] --- scalatest-maven-plugin:1.0:test (test) @ avocado-core_2.11 ---
Discovery starting.
Discovery completed in 704 milliseconds.
Run starting. Expected test count is: 282
LogPhredSuite:
- convert log error probabilities to phred scores
TreeRegionJoinSuite:
- build a forest with a single item and retrieve data
- build a forest with data from a single contig and retrieve data
- build a forest with data from multiple contigs and retrieve data
2020-01-09 01:11:20 WARN  NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2020-01-09 01:11:20 WARN  Utils:66 - Your hostname, amp-jenkins-staging-worker-02 resolves to a loopback address: 127.0.1.1; using 192.168.10.32 instead (on interface eno1)
2020-01-09 01:11:20 WARN  Utils:66 - Set SPARK_LOCAL_IP if you need to bind to another address
- build a forest out of data on a single contig and retrieve data
- run a join between data on a single contig
HardLimiterSuite:
- add a read to an empty buffer
- add a read to a non-empty buffer, without moving forward
- add a read to a non-empty buffer, and move forward
- trying to add a read to a full buffer—without moving forward—drops the read
- add a read to a full buffer, while moving forward and keeping buffer full
- add a read to a full buffer, while moving forward and emptying buffer
- adding an out of order read should fire an assert
- adding a read that is on the wrong contig should fire an assert
- apply hard limiting to an iterator that is wholly under the coverage limit
- apply hard limiting to an iterator that is partially under the coverage limit
- apply hard limiting to an iterator that is wholly over the coverage limit
- apply hard limiting on a file that is wholly under the coverage limit
- apply hard limiting on a file with sections over the coverage limit
VariantSummarySuite:
- create from genotype without strand bias components
- create from genotype with strand bias components
- invalid strand bias causes exception
- merge two fully populated summaries
- merge two partially populated summaries
- populating an annotation should carry old fields
RewriteHetsSuite:
- should rewrite a bad het snp
- should not rewrite het snp if snp filtering is disabled
- should rewrite a bad het indel
- should not rewrite het indel if indel filtering is disabled
- don't rewrite good het calls
- don't rewrite homozygous calls
- rewrite a het call as a hom alt snp
- processing a valid call should not change the call
- if processing is disabled, don't rewrite bad calls
- process a bad het snp call
- process a bad het indel call
- disable processing for a whole rdd
- process a whole rdd
RealignerSuite:
- realignment candidate code needs at least one block
- read is not a realignment candidate if it is canonical
- read is not a realignment candidate if it is canonical and clipped
- read is a realignment candidate if there is at least one non-canonical block
- realign an indel that is not left normalized
- realign a mnp expressed as a complex indel
- realign two snps expressed as a complex indel
- align sequence with a complex deletion
- realign a read with a complex deletion
- realign a read with a snp and deletion separated by a flank
- realigning a repetative read will fire an assert
- realign a set of reads around an insert
- realign a set of reads around a deletion
2020-01-09 01:11:26 WARN  Realigner:101 - Realigning A_READ failed with exception java.lang.AssertionError: assertion failed: Input sequence contains a repeat..
- realigning a read with a repeat will return the original read
- one sample read should fail due to a repeat, all others should realign
HardFilterGenotypesSuite:
- filter out reference calls
- filter out low quality calls
- filter out genotypes for emission
- filter out genotypes with a low quality per depth
- filter out genotypes with a low depth
- filter out genotypes with a high depth
- filter out genotypes with a low RMS mapping quality
- filter out genotypes with a high strand bias
- update genotype where no filters were applied
- update genotype where filters were applied and passed
- update genotype where filters were applied and failed
- discard a ref genotype call
- keep a ref genotype call
- discard a genotype whose quality is too low
- build filters and apply to snp
- build filters and apply to indel
- test adding filters
- filter out genotypes with a low allelic fraction
- filter out genotypes with a high allelic fraction
TrioCallerSuite:
- cannot have a sample with no record groups
- cannot have a sample with discordant sample ids
- extract id from a single read group
- extract id from multiple read groups
- filter an empty site
- filter a site with only ref calls
- keep a site with a non-ref call
- fill in no-calls for site with missing parents
- pass through site with odd copy number
- confirm call at site where proband and parents are consistent and phase
- confirm call at site where proband and parents are consistent but cannot phase
- invalidate call at site where proband and parents are inconsistent
- end-to-end trio call test
BlockSuite:
- folding over a match block returns a match operator
- an unknown block must have mismatching input sequences
- folding over an unknown block returns our function's result
AlignerSuite:
- aligning a repetative sequence will fire an assert
- align a minimally flanked sequence with a snp
- align a minimally flanked sequence with a 3 bp mnp
- align a minimally flanked sequence with 2 snps separated by 1bp
- align a minimally flanked sequence with 2 snps separated by 3bp
- align a minimally flanked sequence with a simple insert
- align a minimally flanked sequence with a complex insert
- align a minimally flanked sequence with a simple deletion
- align a minimally flanked sequence that contains a discordant k-mer pair
- align a minimally flanked sequence with a complex deletion
- align a minimally flanked sequence with 2 snps separated by two matching k-mers
- align a minimally flanked sequence with a snp and an indel separated by one matching k-mer
- zip and trim short insert
- zip and trim short deletion
- cut up a sequence that is longer than the k-mer length
- cutting up a sequence that is shorter than the k-mer length yields an empty map
- cutting up a repeated sequence throws an assert
- get no indices if we have no intersection
- get correct index for a single intersection
- get correct indices for two k-mers in a row
- get correct indices for two k-mers separated by a snp
- get correct indices for two k-mers separated by an indel
- get correct indices for two k-mers whose positions are flipped
- fire assert when cutting up repeatitive reads
- fire assert when checking negative index pair
- a set of a single index pair is concordant
- a set with a pair of index pairs is concordant
- a set with multiple good index pairs is concordant
- a set with a pair of swapped index pairs is discordant
- a set with a pair of both con/discordant index pairs is discordant
- making blocks from no indices returns a single unknown block
- make blocks from a single match between two snps
- make blocks from three matches between two snps
- make blocks from three matches between two indels, opposite events
- make blocks from three matches between two indels, same events
- make blocks from matches between snp/indel/snp
BiallelicGenotyperSuite:
- properly handle haploid genotype state
- properly handle diploid genotype state with het call
- properly handle triploid genotype state with hom alt call
- scoring read that overlaps no variants should return empty observations in variant only mode
- scoring read that overlaps no variants should return empty observations
2020-01-09 01:11:31 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (4472 KB). The maximum recommended task size is 100 KB.
2020-01-09 01:11:32 WARN  Utils:66 - Truncated the string representation of a plan since it was too large. This behavior can be adjusted by setting 'spark.debug.maxToStringFields' in SparkEnv.conf.
- score snps in a read overlapping a copy number dup boundary
2020-01-09 01:11:49 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (4060 KB). The maximum recommended task size is 100 KB.
- score snps in a read overlapping a copy number del boundary
- score snp in a read with no evidence of the snp
- score snp in a read with evidence of the snp
- score snp in a read with evidence of the snp, and non-variant bases
- build genotype for het snp
- force call possible STR/indel !!! IGNORED !!!
- log space factorial
- fisher test for strand bias
2020-01-09 01:12:05 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:12:12 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- discover and call simple SNP
2020-01-09 01:13:22 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:13:29 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- discover and call simple SNP and score all sites
2020-01-09 01:14:42 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:14:50 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- discover and call short indel
2020-01-09 01:16:10 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:16:17 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
2020-01-09 01:16:30 ERROR BiallelicGenotyper:387 - Processing read H06JUADXX130110:1:1109:10925:52628 failed with exception java.lang.StringIndexOutOfBoundsException: String index out of range: 0. Skipping...
2020-01-09 01:16:31 ERROR BiallelicGenotyper:387 - Processing read H06JUADXX130110:1:1116:7369:15293 failed with exception java.lang.StringIndexOutOfBoundsException: String index out of range: 0. Skipping...
2020-01-09 01:16:31 ERROR BiallelicGenotyper:387 - Processing read H06HDADXX130110:2:1115:12347:40533 failed with exception java.lang.StringIndexOutOfBoundsException: String index out of range: 0. Skipping...
2020-01-09 01:16:31 ERROR BiallelicGenotyper:387 - Processing read H06HDADXX130110:1:2110:7844:95190 failed with exception java.lang.StringIndexOutOfBoundsException: String index out of range: 0. Skipping...
2020-01-09 01:16:31 ERROR BiallelicGenotyper:387 - Processing read H06HDADXX130110:1:2203:13041:33390 failed with exception java.lang.StringIndexOutOfBoundsException: String index out of range: 0. Skipping...
- discover and call het and hom snps
- score a single read covering a deletion
2020-01-09 01:17:47 WARN  TaskSetManager:66 - Stage 7 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- discover and force call hom alt deletion
2020-01-09 01:19:10 WARN  TaskSetManager:66 - Stage 7 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- call hom alt AGCCAGTGGACGCCGACCT->A deletion at 1/875159
2020-01-09 01:20:28 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:20:35 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- call hom alt TACACACACACACACACACACACACACACAC->T deletion at 1/1777263
2020-01-09 01:21:52 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:22:01 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- call hom alt CAG->C deletion at 1/1067596
2020-01-09 01:23:23 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:23:30 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- call hom alt C->G snp at 1/877715
2020-01-09 01:24:50 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:24:57 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- call hom alt ACAG->A deletion at 1/886049
2020-01-09 01:26:15 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:26:22 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- call hom alt GA->CC mnp at 1/889158–9
2020-01-09 01:27:41 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:27:49 WARN  TaskSetManager:66 - Stage 14 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- call hom alt C->CCCCT insertion at 1/866511
2020-01-09 01:27:55 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:28:02 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- call het ATG->A deletion at 1/905130
2020-01-09 01:29:21 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:29:29 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
- call het ATG->A deletion at 1/905130 while scoring all sites
2020-01-09 01:30:53 WARN  BiallelicGenotyper:170 - Input RDD is not persisted. Performance may be degraded.
2020-01-09 01:31:00 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10813 KB). The maximum recommended task size is 100 KB.
2020-01-09 01:31:39 ERROR Utils:91 - Uncaught exception in thread driver-heartbeater
java.lang.OutOfMemoryError: GC overhead limit exceeded
	at scala.collection.mutable.LinkedHashMap.foreach(LinkedHashMap.scala:139)
	at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59)
	at scala.collection.mutable.AbstractMap.$plus$plus$eq(Map.scala:80)
	at scala.collection.mutable.MapLike$class.clone(MapLike.scala:217)
	at scala.collection.mutable.AbstractMap.clone(Map.scala:80)
	at scala.collection.mutable.MapLike$class.$plus$plus(MapLike.scala:141)
	at scala.collection.mutable.AbstractMap.$plus$plus(Map.scala:80)
	at org.apache.spark.executor.TaskMetrics.nameToAccums$lzycompute(TaskMetrics.scala:228)
	at org.apache.spark.executor.TaskMetrics.nameToAccums(TaskMetrics.scala:228)
	at org.apache.spark.executor.TaskMetrics.internalAccums$lzycompute(TaskMetrics.scala:231)
	at org.apache.spark.executor.TaskMetrics.internalAccums(TaskMetrics.scala:230)
	at org.apache.spark.executor.TaskMetrics.accumulators(TaskMetrics.scala:252)
	at org.apache.spark.executor.Executor$$anonfun$org$apache$spark$executor$Executor$$reportHeartBeat$1.apply(Executor.scala:720)
	at org.apache.spark.executor.Executor$$anonfun$org$apache$spark$executor$Executor$$reportHeartBeat$1.apply(Executor.scala:716)
	at scala.collection.Iterator$class.foreach(Iterator.scala:743)
	at scala.collection.AbstractIterator.foreach(Iterator.scala:1195)
	at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
	at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
	at org.apache.spark.executor.Executor.org$apache$spark$executor$Executor$$reportHeartBeat(Executor.scala:716)
	at org.apache.spark.executor.Executor$$anon$2$$anonfun$run$1.apply$mcV$sp(Executor.scala:755)
	at org.apache.spark.executor.Executor$$anon$2$$anonfun$run$1.apply(Executor.scala:755)
	at org.apache.spark.executor.Executor$$anon$2$$anonfun$run$1.apply(Executor.scala:755)
	at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1954)
	at org.apache.spark.executor.Executor$$anon$2.run(Executor.scala:755)
	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
	at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
2020-01-09 01:31:39 ERROR Executor:91 - Exception in task 3.0 in stage 7.0 (TID 415)
java.lang.OutOfMemoryError: GC overhead limit exceeded
	at sun.reflect.GeneratedConstructorAccessor80.newInstance(Unknown Source)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
	at java.io.ObjectStreamClass.newInstance(ObjectStreamClass.java:1091)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2053)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.readArray(ObjectInputStream.java:1975)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1567)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
2020-01-09 01:31:39 ERROR Executor:91 - Exception in task 4.0 in stage 7.0 (TID 416)
java.lang.OutOfMemoryError: GC overhead limit exceeded
	at sun.reflect.GeneratedConstructorAccessor80.newInstance(Unknown Source)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
	at java.io.ObjectStreamClass.newInstance(ObjectStreamClass.java:1091)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2053)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.readArray(ObjectInputStream.java:1975)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1567)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
2020-01-09 01:31:39 ERROR Executor:91 - Exception in task 1.0 in stage 7.0 (TID 413)
java.lang.OutOfMemoryError: GC overhead limit exceeded
2020-01-09 01:31:39 ERROR Executor:91 - Exception in task 2.0 in stage 7.0 (TID 414)
java.lang.OutOfMemoryError: GC overhead limit exceeded
2020-01-09 01:31:39 ERROR SparkUncaughtExceptionHandler:91 - Uncaught exception in thread Thread[Executor task launch worker for task 415,5,main]
java.lang.OutOfMemoryError: GC overhead limit exceeded
	at sun.reflect.GeneratedConstructorAccessor80.newInstance(Unknown Source)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
	at java.io.ObjectStreamClass.newInstance(ObjectStreamClass.java:1091)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2053)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.readArray(ObjectInputStream.java:1975)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1567)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
2020-01-09 01:31:39 ERROR SparkUncaughtExceptionHandler:91 - Uncaught exception in thread Thread[Executor task launch worker for task 416,5,main]
java.lang.OutOfMemoryError: GC overhead limit exceeded
	at sun.reflect.GeneratedConstructorAccessor80.newInstance(Unknown Source)
	at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
	at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
	at java.io.ObjectStreamClass.newInstance(ObjectStreamClass.java:1091)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2053)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.readArray(ObjectInputStream.java:1975)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1567)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
	at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2287)
	at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2211)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2069)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1573)
2020-01-09 01:31:39 ERROR SparkUncaughtExceptionHandler:91 - Uncaught exception in thread Thread[Executor task launch worker for task 413,5,main]
java.lang.OutOfMemoryError: GC overhead limit exceeded
2020-01-09 01:31:39 ERROR SparkUncaughtExceptionHandler:91 - Uncaught exception in thread Thread[Executor task launch worker for task 414,5,main]
java.lang.OutOfMemoryError: GC overhead limit exceeded
2020-01-09 01:31:39 WARN  TaskSetManager:66 - Lost task 2.0 in stage 7.0 (TID 414, localhost, executor driver): java.lang.OutOfMemoryError: GC overhead limit exceeded

2020-01-09 01:31:39 ERROR LiveListenerBus:70 - SparkListenerBus has already stopped! Dropping event SparkListenerStageCompleted(org.apache.spark.scheduler.StageInfo@27c3511d)
2020-01-09 01:31:39 ERROR TaskSetManager:70 - Task 2 in stage 7.0 failed 1 times; aborting job
2020-01-09 01:31:39 ERROR LiveListenerBus:70 - SparkListenerBus has already stopped! Dropping event SparkListenerJobEnd(3,1578562299669,JobFailed(org.apache.spark.SparkException: Job 3 cancelled because SparkContext was shut down))
2020-01-09 01:31:39 WARN  SparkContext:87 - Multiple running SparkContexts detected in the same JVM!
org.apache.spark.SparkException: Only one SparkContext may be running in this JVM (see SPARK-2243). To ignore this error, set spark.driver.allowMultipleContexts = true. The currently running SparkContext was created at:
org.apache.spark.SparkContext.<init>(SparkContext.scala:76)
org.bdgenomics.utils.misc.SparkFunSuite$class.setupSparkContext(SparkFunSuite.scala:56)
org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.setupSparkContext(BiallelicGenotyperSuite.scala:46)
org.bdgenomics.utils.misc.SparkFunSuite$$anonfun$sparkTest$1.apply$mcV$sp(SparkFunSuite.scala:99)
org.bdgenomics.utils.misc.SparkFunSuite$$anonfun$sparkTest$1.apply(SparkFunSuite.scala:98)
org.bdgenomics.utils.misc.SparkFunSuite$$anonfun$sparkTest$1.apply(SparkFunSuite.scala:98)
org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)
org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
org.scalatest.Transformer.apply(Transformer.scala:22)
org.scalatest.Transformer.apply(Transformer.scala:20)
org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)
org.scalatest.Suite$class.withFixture(Suite.scala:1122)
org.scalatest.FunSuite.withFixture(FunSuite.scala:1555)
org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)
org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)
org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.org$scalatest$BeforeAndAfter$$super$runTest(BiallelicGenotyperSuite.scala:46)
	at org.apache.spark.SparkContext$$anonfun$assertNoOtherContextIsRunning$2.apply(SparkContext.scala:2472)
	at org.apache.spark.SparkContext$$anonfun$assertNoOtherContextIsRunning$2.apply(SparkContext.scala:2468)
	at scala.Option.foreach(Option.scala:256)
	at org.apache.spark.SparkContext$.assertNoOtherContextIsRunning(SparkContext.scala:2468)
	at org.apache.spark.SparkContext$.markPartiallyConstructed(SparkContext.scala:2557)
	at org.apache.spark.SparkContext.<init>(SparkContext.scala:85)
	at org.bdgenomics.utils.misc.SparkFunSuite$class.setupSparkContext(SparkFunSuite.scala:56)
	at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.setupSparkContext(BiallelicGenotyperSuite.scala:46)
	at org.bdgenomics.utils.misc.SparkFunSuite$$anonfun$sparkTest$1.apply$mcV$sp(SparkFunSuite.scala:99)
	at org.bdgenomics.utils.misc.SparkFunSuite$$anonfun$sparkTest$1.apply(SparkFunSuite.scala:98)
	at org.bdgenomics.utils.misc.SparkFunSuite$$anonfun$sparkTest$1.apply(SparkFunSuite.scala:98)
	at org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)
	at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
	at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
	at org.scalatest.Transformer.apply(Transformer.scala:22)
	at org.scalatest.Transformer.apply(Transformer.scala:20)
	at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)
	at org.scalatest.Suite$class.withFixture(Suite.scala:1122)
	at org.scalatest.FunSuite.withFixture(FunSuite.scala:1555)
	at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)
	at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
	at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
	at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
	at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)
	at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.org$scalatest$BeforeAndAfter$$super$runTest(BiallelicGenotyperSuite.scala:46)
	at org.scalatest.BeforeAndAfter$class.runTest(BeforeAndAfter.scala:200)
	at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.runTest(BiallelicGenotyperSuite.scala:46)
	at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
	at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
	at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413)
	at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401)
	at scala.collection.immutable.List.foreach(List.scala:381)
	at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
	at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396)
	at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483)
	at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208)
	at org.scalatest.FunSuite.runTests(FunSuite.scala:1555)
	at org.scalatest.Suite$class.run(Suite.scala:1424)
	at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)
	at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
	at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
	at org.scalatest.SuperEngine.runImpl(Engine.scala:545)
	at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)
	at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.org$scalatest$BeforeAndAfter$$super$run(BiallelicGenotyperSuite.scala:46)
	at org.scalatest.BeforeAndAfter$class.run(BeforeAndAfter.scala:241)
	at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.run(BiallelicGenotyperSuite.scala:46)
	at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1492)
	at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1528)
	at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1526)
	at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
	at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1526)
	at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:29)
	at org.scalatest.Suite$class.run(Suite.scala:1421)
	at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:29)
	at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)
	at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)
	at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)
	at scala.collection.immutable.List.foreach(List.scala:381)
	at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)
	at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)
	at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)
	at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)
	at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)
	at org.scalatest.tools.Runner$.main(Runner.scala:860)
	at org.scalatest.tools.Runner.main(Runner.scala)
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Skipping avocado: A Variant Caller, Distributed
[INFO] This project has been banned from the build due to previous failures.
[INFO] ------------------------------------------------------------------------
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] avocado: A Variant Caller, Distributed ............. SUCCESS [  4.000 s]
[INFO] avocado-core: Core variant calling algorithms ...... FAILURE [20:51 min]
[INFO] avocado-cli: Command line interface for a distributed variant caller SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 20:55 min
[INFO] Finished at: 2020-01-09T01:31:40-08:00
[INFO] Final Memory: 34M/1172M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.scalatest:scalatest-maven-plugin:1.0:test (test) on project avocado-core_2.11: There are test failures -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :avocado-core_2.11
Build step 'Execute shell' marked build as failure
Recording test results
ERROR: Step ‘Publish JUnit test result report’ failed: No test report files were found. Configuration error?
Sending e-mails to: fnothaft@berkeley.edu
Finished: FAILURE