FailedConsole Output

Started by upstream project "avocado-prb" build number 213
originally caused by:
 GitHub pull request #250 of commit 6234a49a85794509a5f250329c5172e320b790df automatically merged.
[EnvInject] - Loading node environment variables.
Building remotely on amp-jenkins-worker-03 (centos spark-test) in workspace /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos
 > git rev-parse --is-inside-work-tree # timeout=10
Fetching changes from the remote Git repository
 > git config remote.origin.url https://github.com/bigdatagenomics/avocado.git # timeout=10
Fetching upstream changes from https://github.com/bigdatagenomics/avocado.git
 > git --version # timeout=10
 > git fetch --tags --progress https://github.com/bigdatagenomics/avocado.git +refs/pull/*:refs/remotes/origin/pr/*
Checking out Revision 417f88eac88514c4864bac92b8aad89368784402 (origin/pr/250/merge)
 > git config core.sparsecheckout # timeout=10
 > git checkout -f 417f88eac88514c4864bac92b8aad89368784402
 > git rev-list f07e918345ca6edd9088dd5e8c6e8c859cd0c18e # timeout=10
First time build. Skipping changelog.
[centos] $ /bin/bash /tmp/hudson4281389885706038183.sh
+ set -e
+ export JAVA_HOME=/usr/java/jdk1.8.0_60
+ JAVA_HOME=/usr/java/jdk1.8.0_60
+ export CONDA_BIN=/home/anaconda/bin/
+ CONDA_BIN=/home/anaconda/bin/
+ export MVN_BIN=/home/jenkins/tools/hudson.tasks.Maven_MavenInstallation/Maven_3.1.1/bin/
+ MVN_BIN=/home/jenkins/tools/hudson.tasks.Maven_MavenInstallation/Maven_3.1.1/bin/
+ export PATH=/usr/java/jdk1.8.0_60/bin/:/home/jenkins/tools/hudson.tasks.Maven_MavenInstallation/Maven_3.1.1/bin/:/home/anaconda/bin/:/usr/local/bin:/bin:/usr/bin
+ PATH=/usr/java/jdk1.8.0_60/bin/:/home/jenkins/tools/hudson.tasks.Maven_MavenInstallation/Maven_3.1.1/bin/:/home/anaconda/bin/:/usr/local/bin:/bin:/usr/bin
+ set +x
+ /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/scripts/jenkins-test

# make a tempdir for writing maven cruft to
AVOCADO_MVN_TMP_DIR=$(mktemp -d -t avocadoTestMvnXXXXXXX)
++ mktemp -d -t avocadoTestMvnXXXXXXX
+ AVOCADO_MVN_TMP_DIR=/tmp/avocadoTestMvnwqvVvTr

# add this tempdir to the poms...
find . -name pom.xml \
    -exec sed -i.bak \
    -e "s:sun.io.serialization.extendedDebugInfo=true:sun.io.serialization.extendedDebugInfo=true -Djava.io.tmpdir=${AVOCADO_MVN_TMP_DIR}:g" \
    {} \;
+ find . -name pom.xml -exec sed -i.bak -e 's:sun.io.serialization.extendedDebugInfo=true:sun.io.serialization.extendedDebugInfo=true -Djava.io.tmpdir=/tmp/avocadoTestMvnwqvVvTr:g' '{}' ';'
find . -name "*.bak" -exec rm -f {} \;
+ find . -name '*.bak' -exec rm -f '{}' ';'

# variable declarations
export PATH=${JAVA_HOME}/bin/:${PATH}
+ export PATH=/usr/java/jdk1.8.0_60/bin/:/usr/java/jdk1.8.0_60/bin/:/home/jenkins/tools/hudson.tasks.Maven_MavenInstallation/Maven_3.1.1/bin/:/home/anaconda/bin/:/usr/local/bin:/bin:/usr/bin
+ PATH=/usr/java/jdk1.8.0_60/bin/:/usr/java/jdk1.8.0_60/bin/:/home/jenkins/tools/hudson.tasks.Maven_MavenInstallation/Maven_3.1.1/bin/:/home/anaconda/bin/:/usr/local/bin:/bin:/usr/bin
export MAVEN_OPTS="-Xmx1536m -XX:MaxPermSize=1g -Dfile.encoding=utf-8"
+ export 'MAVEN_OPTS=-Xmx1536m -XX:MaxPermSize=1g -Dfile.encoding=utf-8'
+ MAVEN_OPTS='-Xmx1536m -XX:MaxPermSize=1g -Dfile.encoding=utf-8'
DIR=$( cd $( dirname ${BASH_SOURCE[0]} ) && pwd )
+++ dirname /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/scripts/jenkins-test
++ cd /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/scripts
++ pwd
+ DIR=/home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/scripts
PROJECT_ROOT=${DIR}/..
+ PROJECT_ROOT=/home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/scripts/..
VERSION=$(grep "<version>" ${PROJECT_ROOT}/pom.xml  | head -2 | tail -1 | sed 's/ *<version>//g' | sed 's/<\/version>//g')
++ grep '<version>' /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/scripts/../pom.xml
++ head -2
++ tail -1
++ sed 's/ *<version>//g'
++ sed 's/<\/version>//g'
+ VERSION=0.0.3-SNAPSHOT

# is the hadoop version set?
if ! [[ ${HADOOP_VERSION} ]];
then
    echo "HADOOP_VERSION environment variable is not set."
    echo "Please set this variable before running."
    
    exit 1
fi
+ [[ -n 2.3.0 ]]

# is the spark version set?
if ! [[ ${SPARK_VERSION} ]];
then
    echo "SPARK_VERSION environment variable is not set."
    echo "Please set this variable before running."
    
    exit 1
fi
+ [[ -n 2.0.0 ]]

# this next line is supposed to fail
set +e
+ set +e

echo "Rewriting POM.xml files to Scala 2.10 and Spark 1 should error..."
+ echo 'Rewriting POM.xml files to Scala 2.10 and Spark 1 should error...'
Rewriting POM.xml files to Scala 2.10 and Spark 1 should error...
./scripts/move_to_spark_1.sh
+ ./scripts/move_to_spark_1.sh
POM is already set up for Spark 1 (Spark 1/2 artifacts are missing -spark2 suffix in artifact names).
Cowardly refusing to move to Spark 1 a second time...
if [[ $? == 0 ]];
then
    echo "Running move_to_spark_1.sh when POMs are set up for Spark 1 should fail, but error code was 0 (success)."
    exit 1
fi
+ [[ 1 == 0 ]]

./scripts/move_to_scala_2.10.sh
+ ./scripts/move_to_scala_2.10.sh
Scala version is already set to 2.10 (Scala artifacts have _2.10 version suffix in artifact name).
Cowardly refusing to move to Scala 2.10 a second time...
if [[ $? == 0 ]];
then
    echo "Running move_to_scala_2.10.sh when POMs are set up for Scala 2.10 should fail, but error code was 0 (success)."
    exit 1
fi
+ [[ 1 == 0 ]]

set -e
+ set -e

# are we testing for spark 2.0.0? if so, we need to rewrite our poms first
if [ ${SPARK_VERSION} == 2.0.0 ];
then
    
    echo "Rewriting POM.xml files for Spark 2."
    ./scripts/move_to_spark_2.sh

    # shouldn't be able to move to spark 2 twice
    set +e
    ./scripts/move_to_spark_2.sh
    if [[ $? == 0 ]];
    then
        echo "We have already moved to Spark 2, so running move_to_spark_2.sh a second time should fail, but error code was 0 (success)."
        exit 1
    fi
    set -e
fi
+ '[' 2.0.0 == 2.0.0 ']'
+ echo 'Rewriting POM.xml files for Spark 2.'
Rewriting POM.xml files for Spark 2.
+ ./scripts/move_to_spark_2.sh
+ set +e
+ ./scripts/move_to_spark_2.sh
POM is already set up for Spark 2 (Spark 1/2 artifacts have -spark2 suffix in artifact names).
Cowardly refusing to move to Spark 2 a second time...
+ [[ 1 == 0 ]]
+ set -e

# are we testing for scala 2.11? if so, we need to rewrite our poms to 2.11 first
if [ ${SCALAVER} == 2.11 ];
then
    echo "Rewriting POM.xml files for Scala 2.11."
    ./scripts/move_to_scala_2.11.sh

    # shouldn't be able to move to scala 2.11 twice
    set +e
    ./scripts/move_to_scala_2.11.sh
    if [[ $? == 0 ]];
    then
        echo "We have already moved to Scala 2.11, so running move_to_scala_2.11.sh a second time should fail, but error code was 0 (success)."
        exit 1
    fi
    set -e
fi
+ '[' 2.11 == 2.11 ']'
+ echo 'Rewriting POM.xml files for Scala 2.11.'
Rewriting POM.xml files for Scala 2.11.
+ ./scripts/move_to_scala_2.11.sh
+ set +e
+ ./scripts/move_to_scala_2.11.sh
Scala version is already set to 2.11 (Scala artifacts have _2.11 version suffix in artifact name).
Cowardly refusing to move to Scala 2.11 a second time...
+ [[ 1 == 0 ]]
+ set -e

# print versions
echo "Testing AVOCADO version ${VERSION} on Spark ${SPARK_VERSION} and Hadoop ${HADOOP_VERSION}"
+ echo 'Testing AVOCADO version 0.0.3-SNAPSHOT on Spark 2.0.0 and Hadoop 2.3.0'
Testing AVOCADO version 0.0.3-SNAPSHOT on Spark 2.0.0 and Hadoop 2.3.0

# first, build the sources, run the unit tests, and generate a coverage report
mvn clean \
    -Dhadoop.version=${HADOOP_VERSION} \
    -Dspark.version=${SPARK_VERSION} 
+ mvn clean -Dhadoop.version=2.3.0 -Dspark.version=2.0.0
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=1g; support was removed in 8.0
[INFO] Scanning for projects...
[WARNING] 
[WARNING] Some problems were encountered while building the effective model for org.bdgenomics.avocado:avocado-core-spark2_2.11:jar:0.0.3-SNAPSHOT
[WARNING] 'build.plugins.plugin.version' for org.apache.maven.plugins:maven-jar-plugin is missing. @ line 66, column 15
[WARNING] 'dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ line 134, column 23
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 336, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 340, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 344, column 25
[WARNING] 
[WARNING] Some problems were encountered while building the effective model for org.bdgenomics.avocado:avocado-cli-spark2_2.11:jar:0.0.3-SNAPSHOT
[WARNING] 'dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ line 176, column 23
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 336, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 340, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 344, column 25
[WARNING] 
[WARNING] Some problems were encountered while building the effective model for org.bdgenomics.avocado:avocado-parent-spark2_2.11:pom:0.0.3-SNAPSHOT
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ line 336, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ line 340, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ line 344, column 25
[WARNING] 
[WARNING] It is highly recommended to fix these problems because they threaten the stability of your build.
[WARNING] 
[WARNING] For this reason, future Maven versions might no longer support building such malformed projects.
[WARNING] 
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Build Order:
[INFO] 
[INFO] avocado: A Variant Caller, Distributed
[INFO] avocado-core: Core variant calling algorithms
[INFO] avocado-cli: Command line interface for a distributed variant caller
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building avocado: A Variant Caller, Distributed 0.0.3-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ avocado-parent-spark2_2.11 ---
[INFO] Deleting /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/target
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building avocado-core: Core variant calling algorithms 0.0.3-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ avocado-core-spark2_2.11 ---
[INFO] Deleting /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/avocado-core/target
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building avocado-cli: Command line interface for a distributed variant caller 0.0.3-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ avocado-cli-spark2_2.11 ---
[INFO] Deleting /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/avocado-cli/target
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] avocado: A Variant Caller, Distributed ............ SUCCESS [0.123s]
[INFO] avocado-core: Core variant calling algorithms ..... SUCCESS [0.095s]
[INFO] avocado-cli: Command line interface for a distributed variant caller  SUCCESS [0.652s]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD SUCCESS
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 0.999s
[INFO] Finished at: Thu Oct 12 09:07:35 PDT 2017
[INFO] Final Memory: 19M/1472M
[INFO] ------------------------------------------------------------------------

# if this is a pull request, we need to set the coveralls pr id
if [[ ! -z $ghprbPullId ]];
then
    COVERALLS_PRB_OPTION="-DpullRequest=${ghprbPullId}"
fi
+ [[ ! -z 250 ]]
+ COVERALLS_PRB_OPTION=-DpullRequest=250

# coveralls token should not be visible
set +x +v
+ set +x +v
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=1g; support was removed in 8.0
[INFO] Scanning for projects...
[WARNING] 
[WARNING] Some problems were encountered while building the effective model for org.bdgenomics.avocado:avocado-core-spark2_2.11:jar:0.0.3-SNAPSHOT
[WARNING] 'build.plugins.plugin.version' for org.apache.maven.plugins:maven-jar-plugin is missing. @ line 66, column 15
[WARNING] 'dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ line 134, column 23
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 336, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 340, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 344, column 25
[WARNING] 
[WARNING] Some problems were encountered while building the effective model for org.bdgenomics.avocado:avocado-cli-spark2_2.11:jar:0.0.3-SNAPSHOT
[WARNING] 'dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ line 176, column 23
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 336, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 340, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ org.bdgenomics.avocado:avocado-parent-spark2_2.11:0.0.3-SNAPSHOT, /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/pom.xml, line 344, column 25
[WARNING] 
[WARNING] Some problems were encountered while building the effective model for org.bdgenomics.avocado:avocado-parent-spark2_2.11:pom:0.0.3-SNAPSHOT
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ line 336, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ line 340, column 25
[WARNING] 'dependencyManagement.dependencies.dependency.exclusions.exclusion.artifactId' for org.apache.hadoop:hadoop-client:jar with value '*' does not match a valid id pattern. @ line 344, column 25
[WARNING] 
[WARNING] It is highly recommended to fix these problems because they threaten the stability of your build.
[WARNING] 
[WARNING] For this reason, future Maven versions might no longer support building such malformed projects.
[WARNING] 
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Build Order:
[INFO] 
[INFO] avocado: A Variant Caller, Distributed
[INFO] avocado-core: Core variant calling algorithms
[INFO] avocado-cli: Command line interface for a distributed variant caller
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building avocado: A Variant Caller, Distributed 0.0.3-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ avocado-parent-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-source (add-source) @ avocado-parent-spark2_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ avocado-parent-spark2_2.11 ---
[INFO] Modified 0 of 54 .scala files
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ avocado-parent-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-test-source (add-test-source) @ avocado-parent-spark2_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/src/test/scala added.
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ avocado-parent-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] >>> scoverage-maven-plugin:1.1.1:report (default-cli) @ avocado-parent-spark2_2.11 >>>
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ avocado-parent-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-source (add-source) @ avocado-parent-spark2_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ avocado-parent-spark2_2.11 ---
[INFO] Modified 0 of 54 .scala files
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:pre-compile (default-cli) @ avocado-parent-spark2_2.11 ---
[INFO] Skipping SCoverage execution for project with packaging type 'pom'
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ avocado-parent-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:post-compile (default-cli) @ avocado-parent-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-test-source (add-test-source) @ avocado-parent-spark2_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/src/test/scala added.
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ avocado-parent-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] <<< scoverage-maven-plugin:1.1.1:report (default-cli) @ avocado-parent-spark2_2.11 <<<
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:report (default-cli) @ avocado-parent-spark2_2.11 ---
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building avocado-core: Core variant calling algorithms 0.0.3-SNAPSHOT
[INFO] ------------------------------------------------------------------------
Downloading: http://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-core-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml
Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-core-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml
Downloading: http://people.apache.org/repo/m2-snapshot-repository/org/bdgenomics/adam/adam-core-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml

2/2 KB   2/2 KB   
2/2 KB   2/2 KB   
                  
Downloaded: https://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-core-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml (2 KB at 1.9 KB/sec)
                  
Downloaded: http://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-core-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml (2 KB at 1.8 KB/sec)
Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-parent-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml
Downloading: http://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-parent-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml
Downloading: http://people.apache.org/repo/m2-snapshot-repository/org/bdgenomics/adam/adam-parent-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml
625/625 B         
625/625 B   625/625 B   
                        
Downloaded: https://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-parent-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml (625 B at 3.4 KB/sec)
                        
                        
Downloaded: http://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-parent-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml (625 B at 1.7 KB/sec)
Downloading: http://people.apache.org/repo/m2-snapshot-repository/org/bdgenomics/adam/adam-codegen-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml
Downloading: http://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-codegen-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml
Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-codegen-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml
2/2 KB                  
2/2 KB   2/2 KB   
                  
Downloaded: https://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-codegen-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml (2 KB at 7.3 KB/sec)
                  
                  
Downloaded: http://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/adam/adam-codegen-spark2_2.11/0.23.0-SNAPSHOT/maven-metadata.xml (2 KB at 3.8 KB/sec)
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ avocado-core-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-source (add-source) @ avocado-core-spark2_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/avocado-core/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ avocado-core-spark2_2.11 ---
[INFO] Modified 0 of 45 .scala files
[INFO] 
[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ avocado-core-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/avocado-core/src/main/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ avocado-core-spark2_2.11 ---
[WARNING]  Expected all dependencies to require Scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-misc-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-metrics-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-serialization-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-misc-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-io-spark2_2.11:0.2.13 requires scala version: 2.11.8
[WARNING] Multiple versions of scala libraries detected!
[INFO] /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/avocado-core/src/main/scala:-1: info: compiling
[INFO] Compiling 24 source files to /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/avocado-core/target/scala-2.11.4/classes at 1507824464254
[WARNING] warning: there were three deprecation warnings; re-run with -deprecation for details
[WARNING] one warning found
[INFO] prepare-compile in 0 s
[INFO] compile in 10 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.5.1:compile (default-compile) @ avocado-core-spark2_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- build-helper-maven-plugin:1.10:add-test-source (add-test-source) @ avocado-core-spark2_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/avocado-core/src/test/scala added.
[INFO] 
[INFO] --- maven-resources-plugin:2.6:testResources (default-testResources) @ avocado-core-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 25 resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ avocado-core-spark2_2.11 ---
[WARNING]  Expected all dependencies to require Scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-misc-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-metrics-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-serialization-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-misc-spark2_2.11:0.2.11 requires scala version: 2.11.4
[WARNING]  org.bdgenomics.utils:utils-io-spark2_2.11:0.2.13 requires scala version: 2.11.8
[WARNING] Multiple versions of scala libraries detected!
[INFO] /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/avocado-core/src/test/scala:-1: info: compiling
[INFO] Compiling 21 source files to /home/jenkins/workspace/avocado-prb/HADOOP_VERSION/2.3.0/SCALAVER/2.11/SPARK_VERSION/2.0.0/label/centos/avocado-core/target/scala-2.11.4/test-classes at 1507824475577
[INFO] prepare-compile in 0 s
[INFO] compile in 16 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.5.1:testCompile (default-testCompile) @ avocado-core-spark2_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- maven-surefire-plugin:2.7:test (default-test) @ avocado-core-spark2_2.11 ---
[INFO] Tests are skipped.
[INFO] 
[INFO] --- scalatest-maven-plugin:1.0:test (test) @ avocado-core-spark2_2.11 ---
Discovery starting.
Discovery completed in 659 milliseconds.
Run starting. Expected test count is: 267
LogPhredSuite:
- convert log error probabilities to phred scores
TreeRegionJoinSuite:
- build a forest with a single item and retrieve data
- build a forest with data from a single contig and retrieve data
- build a forest with data from multiple contigs and retrieve data
2017-10-12 09:08:14 WARN  NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
- build a forest out of data on a single contig and retrieve data
- run a join between data on a single contig
HardLimiterSuite:
- add a read to an empty buffer
- add a read to a non-empty buffer, without moving forward
- add a read to a non-empty buffer, and move forward
- trying to add a read to a full buffer?without moving forward?drops the read
- add a read to a full buffer, while moving forward and keeping buffer full
- add a read to a full buffer, while moving forward and emptying buffer
- adding an out of order read should fire an assert
- adding a read that is on the wrong contig should fire an assert
- apply hard limiting to an iterator that is wholly under the coverage limit
- apply hard limiting to an iterator that is partially under the coverage limit
- apply hard limiting to an iterator that is wholly over the coverage limit
- apply hard limiting on a file that is wholly under the coverage limit
- apply hard limiting on a file with sections over the coverage limit
VariantSummarySuite:
- create from genotype without strand bias components
- create from genotype with strand bias components
- invalid strand bias causes exception
- merge two fully populated summaries
- merge two partially populated summaries
- populating an annotation should carry old fields
RewriteHetsSuite:
- should rewrite a bad het snp
- should not rewrite het snp if snp filtering is disabled
- should rewrite a bad het indel
- should not rewrite het indel if indel filtering is disabled
- don't rewrite good het calls
- don't rewrite homozygous calls
- rewrite a het call as a hom alt snp
- processing a valid call should not change the call
- if processing is disabled, don't rewrite bad calls
- process a bad het snp call
- process a bad het indel call
- disable processing for a whole rdd
- process a whole rdd
RealignerSuite:
- realignment candidate code needs at least one block
- read is not a realignment candidate if it is canonical
- read is not a realignment candidate if it is canonical and clipped
- read is a realignment candidate if there is at least one non-canonical block
- realign an indel that is not left normalized
- realign a mnp expressed as a complex indel
- realign two snps expressed as a complex indel
- align sequence with a complex deletion
- realign a read with a complex deletion
- realign a read with a snp and deletion separated by a flank
- realigning a repetative read will fire an assert
- realign a set of reads around an insert
- realign a set of reads around a deletion
2017-10-12 09:08:19 WARN  Realigner:101 - Realigning A_READ failed with exception java.lang.AssertionError: assertion failed: Input sequence contains a repeat..
- realigning a read with a repeat will return the original read
- one sample read should fail due to a repeat, all others should realign
HardFilterGenotypesSuite:
- filter out reference calls
- filter out low quality calls
- filter out genotypes for emission
- filter out genotypes with a low quality per depth
- filter out genotypes with a low depth
- filter out genotypes with a high depth
- filter out genotypes with a low RMS mapping quality
- filter out genotypes with a high strand bias
- update genotype where no filters were applied
- update genotype where filters were applied and passed
- update genotype where filters were applied and failed
- discard a ref genotype call
- keep a ref genotype call
- discard a genotype whose quality is too low *** FAILED ***
  java.lang.NullPointerException:
  at org.bdgenomics.avocado.util.HardFilterGenotypes$.filterGenotype(HardFilterGenotypes.scala:642)
  at org.bdgenomics.avocado.util.HardFilterGenotypesSuite$$anonfun$14.apply$mcV$sp(HardFilterGenotypesSuite.scala:289)
  at org.bdgenomics.avocado.util.HardFilterGenotypesSuite$$anonfun$14.apply(HardFilterGenotypesSuite.scala:287)
  at org.bdgenomics.avocado.util.HardFilterGenotypesSuite$$anonfun$14.apply(HardFilterGenotypesSuite.scala:287)
  at org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)
  at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
  at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
  at org.scalatest.Transformer.apply(Transformer.scala:22)
  at org.scalatest.Transformer.apply(Transformer.scala:20)
  at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)
  at org.scalatest.Suite$class.withFixture(Suite.scala:1122)
  at org.scalatest.FunSuite.withFixture(FunSuite.scala:1555)
  at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)
  at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
  at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
  at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
  at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)
  at org.bdgenomics.avocado.util.HardFilterGenotypesSuite.org$scalatest$BeforeAndAfter$$super$runTest(HardFilterGenotypesSuite.scala:52)
  at org.scalatest.BeforeAndAfter$class.runTest(BeforeAndAfter.scala:200)
  at org.bdgenomics.avocado.util.HardFilterGenotypesSuite.runTest(HardFilterGenotypesSuite.scala:52)
  at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
  at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
  at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413)
  at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401)
  at scala.collection.immutable.List.foreach(List.scala:381)
  at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
  at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396)
  at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483)
  at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208)
  at org.scalatest.FunSuite.runTests(FunSuite.scala:1555)
  at org.scalatest.Suite$class.run(Suite.scala:1424)
  at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)
  at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
  at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
  at org.scalatest.SuperEngine.runImpl(Engine.scala:545)
  at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)
  at org.bdgenomics.avocado.util.HardFilterGenotypesSuite.org$scalatest$BeforeAndAfter$$super$run(HardFilterGenotypesSuite.scala:52)
  at org.scalatest.BeforeAndAfter$class.run(BeforeAndAfter.scala:241)
  at org.bdgenomics.avocado.util.HardFilterGenotypesSuite.run(HardFilterGenotypesSuite.scala:52)
  at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1492)
  at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1528)
  at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1526)
  at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
  at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
  at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1526)
  at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:29)
  at org.scalatest.Suite$class.run(Suite.scala:1421)
  at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:29)
  at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)
  at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)
  at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)
  at scala.collection.immutable.List.foreach(List.scala:381)
  at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)
  at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)
  at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)
  at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)
  at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)
  at org.scalatest.tools.Runner$.main(Runner.scala:860)
  at org.scalatest.tools.Runner.main(Runner.scala)
- build filters and apply to snp
- build filters and apply to indel
- test adding filters
- filter out genotypes with a low allelic fraction
- filter out genotypes with a high allelic fraction
TrioCallerSuite:
- cannot have a sample with no record groups
- cannot have a sample with discordant sample ids
- extract id from a single read group
- extract id from multiple read groups
- filter an empty site
- filter a site with only ref calls
- keep a site with a non-ref call
- fill in no-calls for site with missing parents
- pass through site with odd copy number
- confirm call at site where proband and parents are consistent and phase
- confirm call at site where proband and parents are consistent but cannot phase
- invalidate call at site where proband and parents are inconsistent
- end-to-end trio call test
BlockSuite:
- folding over a match block returns a match operator
- an unknown block must have mismatching input sequences
- folding over an unknown block returns our function's result
AlignerSuite:
- aligning a repetative sequence will fire an assert
- align a minimally flanked sequence with a snp
- align a minimally flanked sequence with a 3 bp mnp
- align a minimally flanked sequence with 2 snps separated by 1bp
- align a minimally flanked sequence with 2 snps separated by 3bp
- align a minimally flanked sequence with a simple insert
- align a minimally flanked sequence with a complex insert
- align a minimally flanked sequence with a simple deletion
- align a minimally flanked sequence that contains a discordant k-mer pair
- align a minimally flanked sequence with a complex deletion
- align a minimally flanked sequence with 2 snps separated by two matching k-mers
- align a minimally flanked sequence with a snp and an indel separated by one matching k-mer
- zip and trim short insert
- zip and trim short deletion
- cut up a sequence that is longer than the k-mer length
- cutting up a sequence that is shorter than the k-mer length yields an empty map
- cutting up a repeated sequence throws an assert
- get no indices if we have no intersection
- get correct index for a single intersection
- get correct indices for two k-mers in a row
- get correct indices for two k-mers separated by a snp
- get correct indices for two k-mers separated by an indel
- get correct indices for two k-mers whose positions are flipped
- fire assert when cutting up repeatitive reads
- fire assert when checking negative index pair
- a set of a single index pair is concordant
- a set with a pair of index pairs is concordant
- a set with multiple good index pairs is concordant
- a set with a pair of swapped index pairs is discordant
- a set with a pair of both con/discordant index pairs is discordant
- making blocks from no indices returns a single unknown block
- make blocks from a single match between two snps
- make blocks from three matches between two snps
- make blocks from three matches between two indels, opposite events
- make blocks from three matches between two indels, same events
- make blocks from matches between snp/indel/snp
BiallelicGenotyperSuite:
- properly handle haploid genotype state
- properly handle diploid genotype state with het call
- properly handle triploid genotype state with hom alt call
- scoring read that overlaps no variants should return empty observations in variant only mode
- score snp in a read with no evidence of the snp
- score snp in a read with evidence of the snp
- score snp in a read with evidence of the snp, and non-variant bases *** FAILED ***
  org.bdgenomics.utils.misc.MathUtils.fpEquals(nonRefObservation.alleleLogLikelihoods.apply(0), BiallelicGenotyperSuite.this.os.logL(2, 2, 0.9999, 0.9999), org.bdgenomics.utils.misc.MathUtils.fpEquals$default$3) was false (BiallelicGenotyperSuite.scala:182)
  org.scalatest.exceptions.TestFailedException:
  at org.scalatest.Assertions$class.newAssertionFailedException(Assertions.scala:500)
  at org.scalatest.FunSuite.newAssertionFailedException(FunSuite.scala:1555)
  at org.scalatest.Assertions$AssertionsHelper.macroAssert(Assertions.scala:466)
  at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite$$anonfun$7$$anonfun$apply$mcV$sp$1.apply(BiallelicGenotyperSuite.scala:182)
  at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite$$anonfun$7$$anonfun$apply$mcV$sp$1.apply(BiallelicGenotyperSuite.scala:171)
  at scala.collection.immutable.List.foreach(List.scala:381)
  at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite$$anonfun$7.apply$mcV$sp(BiallelicGenotyperSuite.scala:171)
  at org.bdgenomics.utils.misc.SparkFunSuite$$anonfun$sparkTest$1.apply$mcV$sp(SparkFunSuite.scala:102)
  at org.bdgenomics.utils.misc.SparkFunSuite$$anonfun$sparkTest$1.apply(SparkFunSuite.scala:98)
  at org.bdgenomics.utils.misc.SparkFunSuite$$anonfun$sparkTest$1.apply(SparkFunSuite.scala:98)
  at org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)
  at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
  at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
  at org.scalatest.Transformer.apply(Transformer.scala:22)
  at org.scalatest.Transformer.apply(Transformer.scala:20)
  at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)
  at org.scalatest.Suite$class.withFixture(Suite.scala:1122)
  at org.scalatest.FunSuite.withFixture(FunSuite.scala:1555)
  at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)
  at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
  at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
  at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
  at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)
  at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.org$scalatest$BeforeAndAfter$$super$runTest(BiallelicGenotyperSuite.scala:43)
  at org.scalatest.BeforeAndAfter$class.runTest(BeforeAndAfter.scala:200)
  at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.runTest(BiallelicGenotyperSuite.scala:43)
  at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
  at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
  at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413)
  at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401)
  at scala.collection.immutable.List.foreach(List.scala:381)
  at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
  at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396)
  at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483)
  at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208)
  at org.scalatest.FunSuite.runTests(FunSuite.scala:1555)
  at org.scalatest.Suite$class.run(Suite.scala:1424)
  at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)
  at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
  at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
  at org.scalatest.SuperEngine.runImpl(Engine.scala:545)
  at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)
  at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.org$scalatest$BeforeAndAfter$$super$run(BiallelicGenotyperSuite.scala:43)
  at org.scalatest.BeforeAndAfter$class.run(BeforeAndAfter.scala:241)
  at org.bdgenomics.avocado.genotyping.BiallelicGenotyperSuite.run(BiallelicGenotyperSuite.scala:43)
  at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1492)
  at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1528)
  at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1526)
  at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
  at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
  at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1526)
  at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:29)
  at org.scalatest.Suite$class.run(Suite.scala:1421)
  at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:29)
  at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)
  at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)
  at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)
  at scala.collection.immutable.List.foreach(List.scala:381)
  at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)
  at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)
  at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)
  at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)
  at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)
  at org.scalatest.tools.Runner$.main(Runner.scala:860)
  at org.scalatest.tools.Runner.main(Runner.scala)
- build genotype for het snp
- force call possible STR/indel !!! IGNORED !!!
- log space factorial
- fisher test for strand bias
2017-10-12 09:08:24 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:08:31 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- discover and call simple SNP
2017-10-12 09:09:37 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:09:44 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- discover and call simple SNP and score all sites
2017-10-12 09:10:53 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:11:00 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- discover and call short indel
2017-10-12 09:12:10 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:12:18 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
2017-10-12 09:12:30 ERROR BiallelicGenotyper:358 - Processing read H06JUADXX130110:1:1109:10925:52628 failed with exception java.lang.StringIndexOutOfBoundsException: String index out of range: 0. Skipping...
2017-10-12 09:12:30 ERROR BiallelicGenotyper:358 - Processing read H06JUADXX130110:1:1116:7369:15293 failed with exception java.lang.StringIndexOutOfBoundsException: String index out of range: 0. Skipping...
2017-10-12 09:12:30 ERROR BiallelicGenotyper:358 - Processing read H06HDADXX130110:2:1115:12347:40533 failed with exception java.lang.StringIndexOutOfBoundsException: String index out of range: 0. Skipping...
2017-10-12 09:12:30 ERROR BiallelicGenotyper:358 - Processing read H06HDADXX130110:1:2110:7844:95190 failed with exception java.lang.StringIndexOutOfBoundsException: String index out of range: 0. Skipping...
2017-10-12 09:12:30 ERROR BiallelicGenotyper:358 - Processing read H06HDADXX130110:1:2203:13041:33390 failed with exception java.lang.StringIndexOutOfBoundsException: String index out of range: 0. Skipping...
- discover and call het and hom snps
2017-10-12 09:13:30 WARN  Executor:66 - 1 block locks were not released by TID = 202:
[rdd_2_0]
- score a single read covering a deletion
2017-10-12 09:13:37 WARN  TaskSetManager:66 - Stage 7 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- discover and force call hom alt deletion
2017-10-12 09:14:54 WARN  TaskSetManager:66 - Stage 7 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call hom alt AGCCAGTGGACGCCGACCT->A deletion at 1/875159
2017-10-12 09:16:02 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:16:08 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call hom alt TACACACACACACACACACACACACACACAC->T deletion at 1/1777263
2017-10-12 09:17:18 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:17:26 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call hom alt CAG->C deletion at 1/1067596
2017-10-12 09:18:38 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:18:44 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call hom alt C->G snp at 1/877715
2017-10-12 09:19:55 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:20:01 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call hom alt ACAG->A deletion at 1/886049
2017-10-12 09:21:12 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:21:19 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call hom alt GA->CC mnp at 1/889158?9
2017-10-12 09:22:29 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:22:35 WARN  TaskSetManager:66 - Stage 14 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call hom alt C->CCCCT insertion at 1/866511
2017-10-12 09:22:40 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:22:46 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call het ATG->A deletion at 1/905130
2017-10-12 09:23:56 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:24:03 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call het ATG->A deletion at 1/905130 while scoring all sites
2017-10-12 09:25:16 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:25:22 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call het AG->A deletion at 1/907170
2017-10-12 09:26:33 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:26:39 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call het T->G snp at 1/240898
2017-10-12 09:27:50 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:27:57 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- make het alt calls at biallelic snp locus
2017-10-12 09:29:08 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:29:15 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
- call hom alt T->TAAA insertion at 1/4120185
2017-10-12 09:30:24 WARN  BiallelicGenotyper:157 - Input RDD is not persisted. Performance may be degraded.
2017-10-12 09:30:31 WARN  TaskSetManager:66 - Stage 5 contains a task of very large size (10540 KB). The maximum recommended task size is 100 KB.
{"variant": {"contigName": "1", "start": 5274546, "end": 5274551, "names": [], "splitFromMultiAllelic": false, "referenceAllele": "TTATA", "alternateAllele": "T", "quality": null, "filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "annotation": null}, "contigName": "1", "start": 5274546, "end": 5274551, "variantCallingAnnotations": {"filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "downsampled": null, "baseQRankSum": null, "fisherStrandBiasPValue": -0.0, "rmsMapQ": 97.46794, "mapq0Reads": null, "mqRankSum": null, "readPositionRankSum": null, "genotypePriors": [], "genotypePosteriors": [], "vqslod": null, "culprit": null, "attributes": {}}, "sampleId": "NA12878", "sampleDescription": null, "processingDescription": null, "alleles": ["ALT", "OTHER_ALT"], "expectedAlleleDosage": null, "referenceReadDepth": 0, "alternateReadDepth": 17, "readDepth": 38, "minReadDepth": null, "genotypeQuality": 249, "genotypeLikelihoods": [-70.87324901033372, 0.0, 9.157569518503912], "nonReferenceLikelihoods": [0.46526149370831527, 0.0, -0.6073937268968032], "strandBiasComponents": [0, 0, 8, 9], "splitFromMultiAllelic": false, "phased": false, "phaseSetId": null, "phaseQuality": null}
{"variant": {"contigName": "1", "start": 5274546, "end": 5274549, "names": [], "splitFromMultiAllelic": false, "referenceAllele": "TTA", "alternateAllele": "T", "quality": null, "filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "annotation": null}, "contigName": "1", "start": 5274546, "end": 5274549, "variantCallingAnnotations": {"filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "downsampled": null, "baseQRankSum": null, "fisherStrandBiasPValue": -0.0, "rmsMapQ": 83.795815, "mapq0Reads": null, "mqRankSum": null, "readPositionRankSum": null, "genotypePriors": [], "genotypePosteriors": [], "vqslod": null, "culprit": null, "attributes": {}}, "sampleId": "NA12878", "sampleDescription": null, "processingDescription": null, "alleles": ["ALT", "OTHER_ALT"], "expectedAlleleDosage": null, "referenceReadDepth": 0, "alternateReadDepth": 23, "readDepth": 38, "minReadDepth": null, "genotypeQuality": 249, "genotypeLikelihoods": [-126.58664682399018, 0.0, 13.440146510587008], "nonReferenceLikelihoods": [0.46526149370831527, 0.0, -0.6073937268968032], "strandBiasComponents": [0, 0, 11, 12], "splitFromMultiAllelic": false, "phased": false, "phaseSetId": null, "phaseQuality": null}
- call het alt TTATA,TTA->T insertion at 1/5274547
ObserverSuite:
- a fully clipped read will not generate any observations
- generate observations for a sequence match under diploid model
- generate observations for a read with an insertion under diploid model
- generate observations for a read with a deletion under diploid model
DiscoveredVariantSuite:
- round trip conversion to/from variant
DiscoverVariantsSuite:
- no variants in unaligned read
- no variants in rdd with unaligned read
- no variants in read that is a perfect sequence match
- no variants in rdd with sequence match reads
- find snp in read with a 1bp sequence mismatch
- find one snp in reads with 1bp sequence mismatch
- find insertion in read
- find insertion in reads
- find deletion in read
- find deletion in reads
- find variants in alignment record rdd
- break TT->CA mnp into two snps *** FAILED ***
  optC.isDefined was false (DiscoverVariantsSuite.scala:256)
  org.scalatest.exceptions.TestFailedException:
  at org.scalatest.Assertions$class.newAssertionFailedException(Assertions.scala:500)
  at org.scalatest.FunSuite.newAssertionFailedException(FunSuite.scala:1555)
  at org.scalatest.Assertions$AssertionsHelper.macroAssert(Assertions.scala:466)
  at org.bdgenomics.avocado.genotyping.DiscoverVariantsSuite$$anonfun$12.apply$mcV$sp(DiscoverVariantsSuite.scala:256)
  at org.bdgenomics.avocado.genotyping.DiscoverVariantsSuite$$anonfun$12.apply(DiscoverVariantsSuite.scala:250)
  at org.bdgenomics.avocado.genotyping.DiscoverVariantsSuite$$anonfun$12.apply(DiscoverVariantsSuite.scala:250)
  at org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)
  at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
  at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
  at org.scalatest.Transformer.apply(Transformer.scala:22)
  at org.scalatest.Transformer.apply(Transformer.scala:20)
  at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)
  at org.scalatest.Suite$class.withFixture(Suite.scala:1122)
  at org.scalatest.FunSuite.withFixture(FunSuite.scala:1555)
  at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)
  at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
  at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
  at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
  at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)
  at org.bdgenomics.avocado.genotyping.DiscoverVariantsSuite.org$scalatest$BeforeAndAfter$$super$runTest(DiscoverVariantsSuite.scala:29)
  at org.scalatest.BeforeAndAfter$class.runTest(BeforeAndAfter.scala:200)
  at org.bdgenomics.avocado.genotyping.DiscoverVariantsSuite.runTest(DiscoverVariantsSuite.scala:29)
  at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
  at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
  at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413)
  at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401)
  at scala.collection.immutable.List.foreach(List.scala:381)
  at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
  at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396)
  at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483)
  at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208)
  at org.scalatest.FunSuite.runTests(FunSuite.scala:1555)
  at org.scalatest.Suite$class.run(Suite.scala:1424)
  at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)
  at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
  at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
  at org.scalatest.SuperEngine.runImpl(Engine.scala:545)
  at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)
  at org.bdgenomics.avocado.genotyping.DiscoverVariantsSuite.org$scalatest$BeforeAndAfter$$super$run(DiscoverVariantsSuite.scala:29)
  at org.scalatest.BeforeAndAfter$class.run(BeforeAndAfter.scala:241)
  at org.bdgenomics.avocado.genotyping.DiscoverVariantsSuite.run(DiscoverVariantsSuite.scala:29)
  at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1492)
  at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1528)
  at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1526)
  at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
  at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186)
  at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1526)
  at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:29)
  at org.scalatest.Suite$class.run(Suite.scala:1421)
  at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:29)
  at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)
  at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)
  at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)
  at scala.collection.immutable.List.foreach(List.scala:381)
  at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)
  at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)
  at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)
  at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)
  at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)
  at org.scalatest.tools.Runner$.main(Runner.scala:860)
  at org.scalatest.tools.Runner.main(Runner.scala)
ObservationOperatorSuite:
- zero operators are empty
- non-zero operators are non-empty
- cannot build mismatch with wrong ref length
- collapsing a non repeated set of operators should eliminate 0 ops
- collapsing a repeated set of operators with mixed match/mismatch
- collapse a set of operators with repeats
- collapse a set of operators with repeats and clips
- make a cigar and md tag from a single sequence match
- make a cigar and md tag from a single sequence mismatch
- make a cigar and md tag from a single multi-base sequence match
- make a cigar and md tag from a single deletion
- make a cigar and md tag from a single insertion
- make a cigar for a match followed by a deletion
- make a cigar for an insertion flanked by matches
- make a cigar for a match followed by a mismatch
- make a cigar for a multi-base mismatch flanked by matches
- make a cigar for a match after a clip
- make a cigar for a mismatch after a clip
- extract reference from a single snp
- extract reference from a single deletion
- extract reference from a single insertion
- extract reference from a soft clipped sequence
- extract reference from a hard clipped sequence
- extract reference from a match flanked deletion
- extract reference from a match flanked insertion
- read must be mapped to extract alignment operators
- extracting alignment operators will fail if cigar is unset
- extracting alignment operators will fail if cigar is *
- extracting alignment operators will fail if MD tag is unset
- extract alignment operators from a perfect read
- extract alignment operators from a read with a single mismatch
- extract alignment operators from a read with a single deletion
- extract alignment operators from a read with a single insertion
LogUtilsSuite:
- test our nifty log summer
- can we compute the sum of logs correctly?
- can we compute the additive inverse of logs correctly?
ObservationSuite:
- cannot create an observation with empty likelihoods
- cannot create an observation with 1-length likelihoods
- cannot create an observation with mismatching likelihood lengths
- forward strand must be >= 0
- forward strand cannot exceed coverage
- square map-q must be >= 0
- coverage is strictly positive
- invert an observation
- null an observation
RealignmentBlockSuite:
- folding over a clip returns the clip operator, soft clip
- folding over a clip returns the clip operator, hard clip
- folding over a canonical block returns the original alignment
- violate an invariant of the fold function, part 1
- violate an invariant of the fold function, part 2
- apply the fold function on a realignable block
- having a clip in the middle of a read is illegal
- can't have two soft clips back to back
- a read that is an exact sequence match is canonical
- hard clip before soft clip is ok at start of read
- hard clip after soft clip is ok at end of read
- a read with a single snp is canonical
- a read containing an indel with exact flanks is wholly realignable
- a read containing an indel with exact flanks is wholly realignable, with soft clipped bases
- a read containing an indel with longer flanks can be split into multiple blocks
- a read containing an indel with longer flanks on both sides can be split into multiple blocks
- properly handle a read that starts with a long soft clip
JointAnnotatorCallerSuite:
- discard reference site
- calculate MAF for all called genotypes
- calculate MAF ignoring uncalled genotypes
- roll up variant annotations from a single genotype
- roll up variant annotations across multiple genotypes
- recalling genotypes is a no-op for no calls and complex hets
- recall a genotype so that the state changes
- allele frequency being outside of (0.0, 1.0) just computes posteriors
- compute variant quality from a single genotype
- compute variant quality from multiple genotypes
PrefilterReadsSuite:
- filter on read uniqueness
- filter unmapped reads
- filter autosomal chromosomes with grc names
- filter sex chromosomes with grc names
- filter mitochondrial chromosome with a grc names
- filter autosomal chromosomes with hg names
- filter sex chromosomes with hg names
- filter mitochondrial chromosome with a hg names
- filter autosomal chromosomes from generator
- filter autosomal + sex chromosomes from generator
- filter all chromosomes from generator
- update a read whose mate is mapped to a filtered contig
- filter reads mapped to autosomal chromosomes from generator
- filter reads mapped to autosomal + sex chromosomes from generator
- filter reads mapped to all chromosomes from generator
- filter reads uniquely mapped to autosomal chromosomes from generator
- filter reads uniquely mapped to autosomal + sex chromosomes from generator
- filter reads uniquely mapped to all chromosomes from generator
- filter rdd of reads mapped to autosomal chromosomes from generator
- filter rdd of reads mapped to autosomal + sex chromosomes from generator
- filter rdd of reads mapped to all chromosomes from generator
- filter rdd of reads uniquely mapped to autosomal chromosomes from generator
- filter rdd of reads uniquely mapped to autosomal + sex chromosomes from generator
- filter rdd of reads uniquely mapped to all chromosomes from generator
Run completed in 23 minutes, 35 seconds.
Total number of tests run: 267
Suites: completed 21, aborted 0
Tests: succeeded 264, failed 3, canceled 0, ignored 1, pending 0
*** 3 TESTS FAILED ***
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Skipping avocado: A Variant Caller, Distributed
[INFO] This project has been banned from the build due to previous failures.
[INFO] ------------------------------------------------------------------------
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] avocado: A Variant Caller, Distributed ............ SUCCESS [3.686s]
[INFO] avocado-core: Core variant calling algorithms ..... FAILURE [24:08.415s]
[INFO] avocado-cli: Command line interface for a distributed variant caller  SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 24:12.351s
[INFO] Finished at: Thu Oct 12 09:31:48 PDT 2017
[INFO] Final Memory: 34M/1092M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal org.scalatest:scalatest-maven-plugin:1.0:test (test) on project avocado-core-spark2_2.11: There are test failures -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :avocado-core-spark2_2.11
Build step 'Execute shell' marked build as failure
Recording test results
Publishing Scoverage XML and HTML report...
null
Setting commit status on GitHub for https://github.com/bigdatagenomics/avocado/commit/417f88eac88514c4864bac92b8aad89368784402
Finished: FAILURE