FailedConsole Output

Started by upstream project "ADAM-prb" build number 3081
originally caused by:
 GitHub pull request #2245 of commit 16b9475899870a1c380e582e2e66e0d8b1f0ac2d.
[EnvInject] - Loading node environment variables.
Building remotely on research-jenkins-worker-08 (ubuntu research-08) in workspace /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu
Wiping out workspace first.
Cloning the remote Git repository
Cloning repository https://github.com/bigdatagenomics/adam.git
 > git init /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu # timeout=10
Fetching upstream changes from https://github.com/bigdatagenomics/adam.git
 > git --version # timeout=10
 > git fetch --tags --progress https://github.com/bigdatagenomics/adam.git +refs/heads/*:refs/remotes/origin/* # timeout=15
 > git config remote.origin.url https://github.com/bigdatagenomics/adam.git # timeout=10
 > git config --add remote.origin.fetch +refs/heads/*:refs/remotes/origin/* # timeout=10
 > git config remote.origin.url https://github.com/bigdatagenomics/adam.git # timeout=10
Fetching upstream changes from https://github.com/bigdatagenomics/adam.git
 > git fetch --tags --progress https://github.com/bigdatagenomics/adam.git +refs/pull/*:refs/remotes/origin/pr/* # timeout=15
Checking out Revision 9a16079f5a139ff66e00e34273154a8fc202520b (origin/pr/2245/head)
 > git config core.sparsecheckout # timeout=10
 > git checkout -f 9a16079f5a139ff66e00e34273154a8fc202520b
 > git rev-list 07e6b1a39af5863a6c67f63690416da161096882 # timeout=10
[ubuntu] $ /bin/bash /tmp/hudson6898500247735869411.sh
+ set -e
+ unset SPARK_TESTING
+ export JAVA_HOME=/usr/lib/jvm/java-8-oracle
+ JAVA_HOME=/usr/lib/jvm/java-8-oracle
+ export CONDA_BIN=/home/anaconda/bin/
+ CONDA_BIN=/home/anaconda/bin/
+ export PATH=/usr/lib/jvm/java-8-oracle/bin/:/home/anaconda/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
+ PATH=/usr/lib/jvm/java-8-oracle/bin/:/home/anaconda/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
+ set +x
+ /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/scripts/jenkins-test

# make a tempdir for writing maven cruft to
ADAM_MVN_TMP_DIR=$(mktemp -d -t adamTestMvnXXXXXXX)
mktemp -d -t adamTestMvnXXXXXXX
++ mktemp -d -t adamTestMvnXXXXXXX
+ ADAM_MVN_TMP_DIR=/tmp/adamTestMvnPRoviTH

# add this tempdir to the poms...
find . -name pom.xml \
    -exec sed -i.bak \
    -e "s:sun.io.serialization.extendedDebugInfo=true:sun.io.serialization.extendedDebugInfo=true -Djava.io.tmpdir=${ADAM_MVN_TMP_DIR}:g" \
    {} \;
+ find . -name pom.xml -exec sed -i.bak -e 's:sun.io.serialization.extendedDebugInfo=true:sun.io.serialization.extendedDebugInfo=true -Djava.io.tmpdir=/tmp/adamTestMvnPRoviTH:g' '{}' ';'
find . -name "*.bak" -exec rm -f {} \;
+ find . -name '*.bak' -exec rm -f '{}' ';'

# variable declarations
export PATH=${JAVA_HOME}/bin/:${PATH}
+ export PATH=/usr/lib/jvm/java-8-oracle/bin/:/usr/lib/jvm/java-8-oracle/bin/:/home/anaconda/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
+ PATH=/usr/lib/jvm/java-8-oracle/bin/:/usr/lib/jvm/java-8-oracle/bin/:/home/anaconda/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
export MAVEN_OPTS="-Xmx1536m -XX:MaxPermSize=1g -Dfile.encoding=utf-8"
+ export 'MAVEN_OPTS=-Xmx1536m -XX:MaxPermSize=1g -Dfile.encoding=utf-8'
+ MAVEN_OPTS='-Xmx1536m -XX:MaxPermSize=1g -Dfile.encoding=utf-8'
DIR=$( cd $( dirname ${BASH_SOURCE[0]} ) && pwd )
 cd $( dirname ${BASH_SOURCE[0]} ) && pwd 
 dirname ${BASH_SOURCE[0]} 
+++ dirname /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/scripts/jenkins-test
++ cd /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/scripts
++ pwd
+ DIR=/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/scripts
PROJECT_ROOT=${DIR}/..
+ PROJECT_ROOT=/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/scripts/..
VERSION=$(grep "<version>" ${PROJECT_ROOT}/pom.xml  | head -2 | tail -1 | sed 's/ *<version>//g' | sed 's/<\/version>//g')
grep "<version>" ${PROJECT_ROOT}/pom.xml  | head -2 | tail -1 | sed 's/ *<version>//g' | sed 's/<\/version>//g'
++ grep '<version>' /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/scripts/../pom.xml
++ head -2
++ tail -1
++ sed 's/ *<version>//g'
++ sed 's/<\/version>//g'
+ VERSION=0.30.0-SNAPSHOT

# is the hadoop version set?
if ! [[ ${HADOOP_VERSION} ]];
then
    echo "HADOOP_VERSION environment variable is not set."
    echo "Please set this variable before running."
    
    exit 1
fi
+ [[ -n 2.7.5 ]]

# is the spark version set?
if ! [[ ${SPARK_VERSION} ]];
then
    echo "SPARK_VERSION environment variable is not set."
    echo "Please set this variable before running."
    
    exit 1
fi
+ [[ -n 2.4.4 ]]

set -e
+ set -e

# build defaults to Scala 2.11
if [ ${SCALAVER} == 2.11 ];
then
    # shouldn't be able to move to scala 2.11 twice
    set +e
    ./scripts/move_to_scala_2.11.sh
    if [[ $? == 0 ]];
    then
        echo "We have already moved to Scala 2.11, so running move_to_scala_2.11.sh a second time should fail, but error code was 0 (success)."
        exit 1
    fi
    set -e
fi
+ '[' 2.11 == 2.11 ']'
+ set +e
+ ./scripts/move_to_scala_2.11.sh
Scala version is already set to 2.11 (Scala artifacts have _2.11 version suffix in artifact name).
Cowardly refusing to move to Scala 2.11 a second time...
+ [[ 1 == 0 ]]
+ set -e

# move to Scala 2.12 if requested
if [ ${SCALAVER} == 2.12 ];
then
    set +e
    ./scripts/move_to_scala_2.12.sh
    set -e
fi
+ '[' 2.11 == 2.12 ']'

# print versions
echo "Testing ADAM version ${VERSION} on Spark ${SPARK_VERSION} and Hadoop ${HADOOP_VERSION}"
+ echo 'Testing ADAM version 0.30.0-SNAPSHOT on Spark 2.4.4 and Hadoop 2.7.5'
Testing ADAM version 0.30.0-SNAPSHOT on Spark 2.4.4 and Hadoop 2.7.5

# first, build the sources, run the unit tests, and generate a coverage report
mvn clean \
    -Dhadoop.version=${HADOOP_VERSION}
+ mvn clean -Dhadoop.version=2.7.5
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=1g; support was removed in 8.0
[INFO] Scanning for projects...
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Build Order:
[INFO] 
[INFO] ADAM_2.11
[INFO] ADAM_2.11: Avro-to-Dataset codegen utils
[INFO] ADAM_2.11: Core
[INFO] ADAM_2.11: APIs for Java, Python
[INFO] ADAM_2.11: CLI
[INFO] ADAM_2.11: Assembly
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:3.1.0:clean (default-clean) @ adam-parent-spark2_2.11 ---
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11: Avro-to-Dataset codegen utils 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:3.1.0:clean (default-clean) @ adam-codegen-spark2_2.11 ---
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11: Core 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:3.1.0:clean (default-clean) @ adam-core-spark2_2.11 ---
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11: APIs for Java, Python 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:3.1.0:clean (default-clean) @ adam-apis-spark2_2.11 ---
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11: CLI 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:3.1.0:clean (default-clean) @ adam-cli-spark2_2.11 ---
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11: Assembly 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-clean-plugin:3.1.0:clean (default-clean) @ adam-assembly-spark2_2.11 ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] ADAM_2.11 .......................................... SUCCESS [  0.191 s]
[INFO] ADAM_2.11: Avro-to-Dataset codegen utils ........... SUCCESS [  0.008 s]
[INFO] ADAM_2.11: Core .................................... SUCCESS [  0.011 s]
[INFO] ADAM_2.11: APIs for Java, Python ................... SUCCESS [  0.002 s]
[INFO] ADAM_2.11: CLI ..................................... SUCCESS [  0.022 s]
[INFO] ADAM_2.11: Assembly ................................ SUCCESS [  0.002 s]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD SUCCESS
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 0.438 s
[INFO] Finished at: 2020-01-24T11:58:42-08:00
[INFO] Final Memory: 22M/1472M
[INFO] ------------------------------------------------------------------------
    
# if this is a pull request, we need to set the coveralls pr id
if [[ ! -z $ghprbPullId ]];
then
    COVERALLS_PRB_OPTION="-DpullRequest=${ghprbPullId}"
fi
+ [[ ! -z 2245 ]]
+ COVERALLS_PRB_OPTION=-DpullRequest=2245

# coveralls token should not be visible
set +x +v
+ set +x +v
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=1g; support was removed in 8.0
[INFO] Scanning for projects...
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Build Order:
[INFO] 
[INFO] ADAM_2.11
[INFO] ADAM_2.11: Avro-to-Dataset codegen utils
[INFO] ADAM_2.11: Core
[INFO] ADAM_2.11: APIs for Java, Python
[INFO] ADAM_2.11: CLI
[INFO] ADAM_2.11: Assembly
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-versions) @ adam-parent-spark2_2.11 ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ adam-parent-spark2_2.11 ---
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ adam-parent-spark2_2.11 ---
[INFO] Modified 0 of 241 .scala files
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ adam-parent-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ adam-parent-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] >>> scoverage-maven-plugin:1.1.1:report (default-cli) > [scoverage]test @ adam-parent-spark2_2.11 >>>
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-versions) @ adam-parent-spark2_2.11 ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ adam-parent-spark2_2.11 ---
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ adam-parent-spark2_2.11 ---
[INFO] Modified 0 of 241 .scala files
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:pre-compile (default-cli) @ adam-parent-spark2_2.11 ---
[INFO] Skipping SCoverage execution for project with packaging type 'pom'
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ adam-parent-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:post-compile (default-cli) @ adam-parent-spark2_2.11 ---
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ adam-parent-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] <<< scoverage-maven-plugin:1.1.1:report (default-cli) < [scoverage]test @ adam-parent-spark2_2.11 <<<
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:report (default-cli) @ adam-parent-spark2_2.11 ---
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11: Avro-to-Dataset codegen utils 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
Downloading: https://repo.maven.apache.org/maven2/org/apache/avro/avro/1.9.1/avro-1.9.1.pom
3/7 KB   
5/7 KB   
7/7 KB   
         
Downloaded: https://repo.maven.apache.org/maven2/org/apache/avro/avro/1.9.1/avro-1.9.1.pom (7 KB at 20.8 KB/sec)
Downloading: https://repo.maven.apache.org/maven2/org/apache/avro/avro-parent/1.9.1/avro-parent-1.9.1.pom
3/24 KB   
5/24 KB   
8/24 KB   
11/24 KB   
13/24 KB   
16/24 KB   
19/24 KB   
21/24 KB   
24/24 KB   
24/24 KB   
           
Downloaded: https://repo.maven.apache.org/maven2/org/apache/avro/avro-parent/1.9.1/avro-parent-1.9.1.pom (24 KB at 1327.3 KB/sec)
Downloading: https://repo.maven.apache.org/maven2/org/apache/avro/avro-toplevel/1.9.1/avro-toplevel-1.9.1.pom
3/22 KB    
5/22 KB   
8/22 KB   
11/22 KB   
13/22 KB   
16/22 KB   
19/22 KB   
21/22 KB   
22/22 KB   
           
Downloaded: https://repo.maven.apache.org/maven2/org/apache/avro/avro-toplevel/1.9.1/avro-toplevel-1.9.1.pom (22 KB at 1641.0 KB/sec)
Downloading: https://repo.maven.apache.org/maven2/com/fasterxml/jackson/core/jackson-annotations/2.9.0/jackson-annotations-2.9.0.pom
2/2 KB     
         
Downloaded: https://repo.maven.apache.org/maven2/com/fasterxml/jackson/core/jackson-annotations/2.9.0/jackson-annotations-2.9.0.pom (2 KB at 158.0 KB/sec)
Downloading: https://repo.maven.apache.org/maven2/com/fasterxml/jackson/jackson-parent/2.9.0/jackson-parent-2.9.0.pom
3/8 KB   
5/8 KB   
8/8 KB   
8/8 KB   
         
Downloaded: https://repo.maven.apache.org/maven2/com/fasterxml/jackson/jackson-parent/2.9.0/jackson-parent-2.9.0.pom (8 KB at 849.1 KB/sec)
Downloading: https://repo.maven.apache.org/maven2/com/fasterxml/oss-parent/28/oss-parent-28.pom
3/20 KB   
5/20 KB   
8/20 KB   
11/20 KB   
13/20 KB   
16/20 KB   
19/20 KB   
20/20 KB   
           
Downloaded: https://repo.maven.apache.org/maven2/com/fasterxml/oss-parent/28/oss-parent-28.pom (20 KB at 1776.9 KB/sec)
Downloading: https://repo.maven.apache.org/maven2/org/apache/avro/avro/1.9.1/avro-1.9.1.jar
Downloading: https://repo.maven.apache.org/maven2/com/fasterxml/jackson/core/jackson-annotations/2.9.0/jackson-annotations-2.9.0.jar
3/554 KB   
5/554 KB   
8/554 KB   
11/554 KB   
13/554 KB   
16/554 KB   
19/554 KB   
21/554 KB   
24/554 KB   
27/554 KB   
29/554 KB   
32/554 KB   
36/554 KB   
40/554 KB   
44/554 KB   
48/554 KB   
3/65 KB   48/554 KB   
5/65 KB   48/554 KB   
5/65 KB   52/554 KB   
5/65 KB   56/554 KB   
5/65 KB   60/554 KB   
8/65 KB   60/554 KB   
8/65 KB   64/554 KB   
11/65 KB   64/554 KB   
13/65 KB   64/554 KB   
16/65 KB   64/554 KB   
16/65 KB   68/554 KB   
16/65 KB   72/554 KB   
19/65 KB   72/554 KB   
19/65 KB   76/554 KB   
19/65 KB   80/554 KB   
21/65 KB   80/554 KB   
24/65 KB   80/554 KB   
27/65 KB   80/554 KB   
29/65 KB   80/554 KB   
29/65 KB   84/554 KB   
32/65 KB   84/554 KB   
32/65 KB   88/554 KB   
32/65 KB   92/554 KB   
32/65 KB   96/554 KB   
32/65 KB   100/554 KB   
32/65 KB   104/554 KB   
32/65 KB   108/554 KB   
32/65 KB   112/554 KB   
36/65 KB   112/554 KB   
40/65 KB   112/554 KB   
44/65 KB   112/554 KB   
48/65 KB   112/554 KB   
48/65 KB   116/554 KB   
48/65 KB   120/554 KB   
48/65 KB   124/554 KB   
48/65 KB   128/554 KB   
48/65 KB   132/554 KB   
48/65 KB   136/554 KB   
48/65 KB   140/554 KB   
48/65 KB   144/554 KB   
52/65 KB   144/554 KB   
56/65 KB   144/554 KB   
60/65 KB   144/554 KB   
64/65 KB   144/554 KB   
65/65 KB   144/554 KB   
65/65 KB   148/554 KB   
65/65 KB   152/554 KB   
65/65 KB   156/554 KB   
65/65 KB   160/554 KB   
65/65 KB   164/554 KB   
65/65 KB   168/554 KB   
65/65 KB   172/554 KB   
65/65 KB   176/554 KB   
65/65 KB   180/554 KB   
65/65 KB   184/554 KB   
65/65 KB   188/554 KB   
65/65 KB   192/554 KB   
65/65 KB   196/554 KB   
65/65 KB   200/554 KB   
65/65 KB   204/554 KB   
65/65 KB   208/554 KB   
                        
Downloaded: https://repo.maven.apache.org/maven2/com/fasterxml/jackson/core/jackson-annotations/2.9.0/jackson-annotations-2.9.0.jar (65 KB at 1910.6 KB/sec)
212/554 KB              
216/554 KB   
220/554 KB   
224/554 KB   
228/554 KB   
232/554 KB   
236/554 KB   
240/554 KB   
244/554 KB   
248/554 KB   
252/554 KB   
256/554 KB   
260/554 KB   
264/554 KB   
268/554 KB   
272/554 KB   
276/554 KB   
280/554 KB   
284/554 KB   
288/554 KB   
292/554 KB   
296/554 KB   
300/554 KB   
304/554 KB   
308/554 KB   
312/554 KB   
316/554 KB   
320/554 KB   
324/554 KB   
328/554 KB   
332/554 KB   
336/554 KB   
340/554 KB   
344/554 KB   
348/554 KB   
352/554 KB   
356/554 KB   
360/554 KB   
364/554 KB   
368/554 KB   
372/554 KB   
376/554 KB   
380/554 KB   
384/554 KB   
388/554 KB   
392/554 KB   
396/554 KB   
400/554 KB   
404/554 KB   
408/554 KB   
412/554 KB   
416/554 KB   
420/554 KB   
424/554 KB   
428/554 KB   
432/554 KB   
436/554 KB   
440/554 KB   
444/554 KB   
448/554 KB   
452/554 KB   
456/554 KB   
460/554 KB   
464/554 KB   
468/554 KB   
472/554 KB   
476/554 KB   
480/554 KB   
484/554 KB   
488/554 KB   
492/554 KB   
496/554 KB   
500/554 KB   
504/554 KB   
508/554 KB   
512/554 KB   
516/554 KB   
520/554 KB   
524/554 KB   
528/554 KB   
532/554 KB   
536/554 KB   
540/554 KB   
544/554 KB   
548/554 KB   
552/554 KB   
554/554 KB   
             
Downloaded: https://repo.maven.apache.org/maven2/org/apache/avro/avro/1.9.1/avro-1.9.1.jar (554 KB at 8518.5 KB/sec)
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-versions) @ adam-codegen-spark2_2.11 ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ adam-codegen-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-source (add-source) @ adam-codegen-spark2_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ adam-codegen-spark2_2.11 ---
[INFO] Modified 0 of 4 .scala files
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:resources (default-resources) @ adam-codegen-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/src/main/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ adam-codegen-spark2_2.11 ---
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/src/main/scala:-1: info: compiling
[INFO] Compiling 4 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/target/2.11.12/classes at 1579895937468
[INFO] prepare-compile in 0 s
[INFO] compile in 3 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:compile (default-compile) @ adam-codegen-spark2_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-test-source (add-test-source) @ adam-codegen-spark2_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/src/test/scala added.
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:testResources (default-testResources) @ adam-codegen-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/src/test/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ adam-codegen-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:testCompile (default-testCompile) @ adam-codegen-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] --- maven-surefire-plugin:3.0.0-M3:test (default-test) @ adam-codegen-spark2_2.11 ---
[INFO] Tests are skipped.
[INFO] 
[INFO] --- scalatest-maven-plugin:2.0.0:test (test) @ adam-codegen-spark2_2.11 ---
Discovery starting.
Discovery completed in 88 milliseconds.
Run starting. Expected test count is: 0
DiscoverySuite:
Run completed in 118 milliseconds.
Total number of tests run: 0
Suites: completed 1, aborted 0
Tests: succeeded 0, failed 0, canceled 0, ignored 0, pending 0
No tests were executed.
[INFO] 
[INFO] >>> scoverage-maven-plugin:1.1.1:report (default-cli) > [scoverage]test @ adam-codegen-spark2_2.11 >>>
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-versions) @ adam-codegen-spark2_2.11 ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ adam-codegen-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-source (add-source) @ adam-codegen-spark2_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ adam-codegen-spark2_2.11 ---
[INFO] Modified 0 of 4 .scala files
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:pre-compile (default-cli) @ adam-codegen-spark2_2.11 ---
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:resources (default-resources) @ adam-codegen-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/src/main/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ adam-codegen-spark2_2.11 ---
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/src/main/scala:-1: info: compiling
[INFO] Compiling 4 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/target/2.11.12/scoverage-classes at 1579895942573
[INFO] [info] Cleaning datadir [/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/target/scoverage-data]
[INFO] [info] Beginning coverage instrumentation
[INFO] [info] Instrumentation completed [0 statements]
[INFO] [info] Wrote instrumentation file [/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/target/scoverage-data/scoverage.coverage.xml]
[INFO] [info] Will write measurement data to [/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/target/scoverage-data]
[INFO] prepare-compile in 0 s
[INFO] compile in 3 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:compile (default-compile) @ adam-codegen-spark2_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:post-compile (default-cli) @ adam-codegen-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-test-source (add-test-source) @ adam-codegen-spark2_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/src/test/scala added.
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:testResources (default-testResources) @ adam-codegen-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-codegen/src/test/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ adam-codegen-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:testCompile (default-testCompile) @ adam-codegen-spark2_2.11 ---
[INFO] No sources to compile
[INFO] 
[INFO] --- maven-surefire-plugin:3.0.0-M3:test (default-test) @ adam-codegen-spark2_2.11 ---
[INFO] Tests are skipped.
[INFO] 
[INFO] --- scalatest-maven-plugin:2.0.0:test (test) @ adam-codegen-spark2_2.11 ---
Discovery starting.
Discovery completed in 55 milliseconds.
Run starting. Expected test count is: 0
DiscoverySuite:
Run completed in 82 milliseconds.
Total number of tests run: 0
Suites: completed 1, aborted 0
Tests: succeeded 0, failed 0, canceled 0, ignored 0, pending 0
No tests were executed.
[INFO] 
[INFO] <<< scoverage-maven-plugin:1.1.1:report (default-cli) < [scoverage]test @ adam-codegen-spark2_2.11 <<<
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:report (default-cli) @ adam-codegen-spark2_2.11 ---
[INFO] [scoverage] Generating cobertura XML report...
[INFO] [scoverage] Generating scoverage XML report...
[INFO] [scoverage] Generating scoverage HTML report...
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11: Core 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
Downloading: https://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/bdg-formats/bdg-formats/0.15.0-SNAPSHOT/maven-metadata.xml
793/793 B    
            
Downloaded: https://oss.sonatype.org/content/repositories/snapshots/org/bdgenomics/bdg-formats/bdg-formats/0.15.0-SNAPSHOT/maven-metadata.xml (793 B at 2.2 KB/sec)
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-versions) @ adam-core-spark2_2.11 ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ adam-core-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-source (add-source) @ adam-core-spark2_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/main/scala added.
[INFO] Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/generated-sources/src/main/scala added.
[INFO] 
[INFO] --- exec-maven-plugin:1.5.0:java (generate-scala-products) @ adam-core-spark2_2.11 ---
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
[INFO] 
[INFO] --- exec-maven-plugin:1.5.0:java (generate-scala-projection-fields) @ adam-core-spark2_2.11 ---
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ adam-core-spark2_2.11 ---
[INFO] Modified 2 of 203 .scala files
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:resources (default-resources) @ adam-core-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/main/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ adam-core-spark2_2.11 ---
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/main/java:-1: info: compiling
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/main/scala:-1: info: compiling
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/generated-sources/src/main/scala:-1: info: compiling
[INFO] Compiling 138 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/2.11.12/classes at 1579895955554
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/main/scala/org/bdgenomics/adam/rdd/GenomicDataset.scala:3099: warning: no valid targets for annotation on value uTag - it is discarded unused. You may specify targets with meta-annotations, e.g. @(transient @getter)
[WARNING]   @transient val uTag: TypeTag[U]
[WARNING]    ^
[WARNING] warning: there were 87 deprecation warnings; re-run with -deprecation for details
[WARNING] warning: there were 5 feature warnings; re-run with -feature for details
[WARNING] three warnings found
[INFO] prepare-compile in 0 s
[INFO] compile in 31 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:compile (default-compile) @ adam-core-spark2_2.11 ---
[INFO] Changes detected - recompiling the module!
[INFO] Compiling 5 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/2.11.12/classes
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-test-source (add-test-source) @ adam-core-spark2_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala added.
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:testResources (default-testResources) @ adam-core-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 152 resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ adam-core-spark2_2.11 ---
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala:-1: info: compiling
[INFO] Compiling 70 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/2.11.12/test-classes at 1579895989708
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/algorithms/consensus/NormalizationUtilsSuite.scala:59: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]       .build()
[WARNING]             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/algorithms/consensus/NormalizationUtilsSuite.scala:61: warning: method richRecordToRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val new_cigar = NormalizationUtils.leftAlignIndel(read)
[WARNING]                                                       ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/algorithms/consensus/NormalizationUtilsSuite.scala:66: warning: method richRecordToRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(RichAlignment(read).samtoolsCigar.getReadLength === new_cigar.getReadLength)
[WARNING]                          ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/converters/SAMRecordConverterSuite.scala:54: warning: method getNotPrimaryAlignmentFlag in class SAMRecord is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(!testAlignment.getPrimaryAlignment === testSAMRecord.getNotPrimaryAlignmentFlag)
[WARNING]                                                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/converters/SAMRecordConverterSuite.scala:89: warning: method getNotPrimaryAlignmentFlag in class SAMRecord is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(!testAlignment.getPrimaryAlignment === testSAMRecord.getNotPrimaryAlignmentFlag)
[WARNING]                                                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/converters/VariantContextConverterSuite.scala:53: warning: method extractDictionary in object SAMSequenceDictionaryExtractor is deprecated: see corresponding Javadoc for more information.
[WARNING]     SequenceDictionary(SAMSequenceDictionaryExtractor.extractDictionary(new File(path)))
[WARNING]                                                       ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/CoverageSuite.scala:37: warning: variable start in class Feature is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(coverageAfterConversion.start == featureToConvert.start)
[WARNING]                                                              ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/CoverageSuite.scala:38: warning: variable end in class Feature is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(coverageAfterConversion.end == featureToConvert.end)
[WARNING]                                                            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/CoverageSuite.scala:40: warning: variable score in class Feature is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(coverageAfterConversion.count == featureToConvert.score)
[WARNING]                                                              ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/CoverageSuite.scala:57: warning: variable start in class Feature is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(coverageAfterConversion.start == featureToConvert.start)
[WARNING]                                                              ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/CoverageSuite.scala:58: warning: variable end in class Feature is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(coverageAfterConversion.end == featureToConvert.end)
[WARNING]                                                            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/CoverageSuite.scala:60: warning: variable score in class Feature is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(coverageAfterConversion.count == featureToConvert.score)
[WARNING]                                                              ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/CoverageSuite.scala:61: warning: variable sampleId in class Feature is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(coverageAfterConversion.optSampleId == Some(featureToConvert.sampleId))
[WARNING]                                                                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/MdTagSuite.scala:194: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(read.mdTag.get.end === read.getEnd)
[WARNING]            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/MdTagSuite.scala:475: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val tag = read.mdTag.get
[WARNING]               ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/MdTagSuite.scala:482: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(tag.getReference(read) === "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAGGGGGGGGGGAAAAAAAAAAGGGGGGGGGGAAAAAAAAAAA")
[WARNING]                             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/MdTagSuite.scala:495: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val tag = read.mdTag.get
[WARNING]               ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/MdTagSuite.scala:496: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(tag.getReference(read, withGaps = true) === "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGGGGGGGGGGAAAAAAAAAAA")
[WARNING]                             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/MdTagSuite.scala:510: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val newTag = MdTag.moveAlignment(read, newCigar)
[WARNING]                                      ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/MdTagSuite.scala:526: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val newTag = MdTag.moveAlignment(read, newCigar, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", 100L)
[WARNING]                                      ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/MdTagSuite.scala:544: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val newTag = MdTag.moveAlignment(read, newCigar, "GGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", 100L)
[WARNING]                                      ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/MdTagSuite.scala:562: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val newTag = MdTag.moveAlignment(read, newCigar, "AAAAAAAAAAGGGGGGGGGGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", 100L)
[WARNING]                                      ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/MdTagSuite.scala:580: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val newTag = MdTag.moveAlignment(read, newCigar, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", 100L)
[WARNING]                                      ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/SequenceDictionarySuite.scala:48: warning: method extractDictionary in object SAMSequenceDictionaryExtractor is deprecated: see corresponding Javadoc for more information.
[WARNING]     val ssd = SAMSequenceDictionaryExtractor.extractDictionary(new File(path))
[WARNING]                                              ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/SequenceDictionarySuite.scala:62: warning: method extractDictionary in object SAMSequenceDictionaryExtractor is deprecated: see corresponding Javadoc for more information.
[WARNING]     val ssd = SAMSequenceDictionaryExtractor.extractDictionary(new File(path))
[WARNING]                                              ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/models/SequenceDictionarySuite.scala:75: warning: method extractDictionary in object SAMSequenceDictionaryExtractor is deprecated: see corresponding Javadoc for more information.
[WARNING]     val ssd = SAMSequenceDictionaryExtractor.extractDictionary(new File(path))
[WARNING]                                              ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:832: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:845: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:857: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:870: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:883: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:896: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:907: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:918: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:929: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:943: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:956: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:968: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:980: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:993: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:1005: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:1018: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:1031: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/ADAMContextSuite.scala:1044: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val df = SQLContext.getOrCreate(sc).read.parquet(outputDir)
[WARNING]                         ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/CoverageDatasetSuite.scala:307: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/CoverageDatasetSuite.scala:338: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/CoverageDatasetSuite.scala:369: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/CoverageDatasetSuite.scala:400: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/CoverageDatasetSuite.scala:431: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/CoverageDatasetSuite.scala:462: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/FeatureDatasetSuite.scala:1022: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/FeatureDatasetSuite.scala:1053: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/FeatureDatasetSuite.scala:1081: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/FeatureDatasetSuite.scala:1112: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/FeatureDatasetSuite.scala:1143: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/feature/FeatureDatasetSuite.scala:1174: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/fragment/FragmentDatasetSuite.scala:400: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/fragment/FragmentDatasetSuite.scala:431: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/fragment/FragmentDatasetSuite.scala:459: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/fragment/FragmentDatasetSuite.scala:490: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/fragment/FragmentDatasetSuite.scala:521: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/fragment/FragmentDatasetSuite.scala:552: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/AlignmentDatasetSuite.scala:1438: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/AlignmentDatasetSuite.scala:1460: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/AlignmentDatasetSuite.scala:1491: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/AlignmentDatasetSuite.scala:1519: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/AlignmentDatasetSuite.scala:1550: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/AlignmentDatasetSuite.scala:1581: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/AlignmentDatasetSuite.scala:1613: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/realignment/IndelRealignmentTargetSuite.scala:83: warning: method richRecordToRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(TargetOrdering.contains(targets.head, read))
[WARNING]                                                  ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/realignment/IndelRealignmentTargetSuite.scala:191: warning: method richRecordToRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]         if (read.getStart < 105) {
[WARNING]             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/realignment/IndelRealignmentTargetSuite.scala:194: warning: method richRecordToRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]           assert(targets.head.readRange.start === read.getStart)
[WARNING]                                                   ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/realignment/IndelRealignmentTargetSuite.scala:195: warning: method richRecordToRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]           assert(targets.head.readRange.end === read.getEnd)
[WARNING]                                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/read/realignment/IndelRealignmentTargetSuite.scala:196: warning: method richRecordToRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]           assert(check_indel(targets.head, read))
[WARNING]                                            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:406: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:437: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:465: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:496: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:527: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:558: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:697: warning: variable end in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.end === 16157602L)
[WARNING]                  ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:698: warning: variable end in class Variant is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.variant.end === 16157602L)
[WARNING]                          ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:698: warning: variable variant in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.variant.end === 16157602L)
[WARNING]                  ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:699: warning: variable annotation in class Variant is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.variant.annotation === null)
[WARNING]                          ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:699: warning: variable variant in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.variant.annotation === null)
[WARNING]                  ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:710: warning: variable end in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.end === 16157602L)
[WARNING]                             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:712: warning: variable end in class Variant is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.variant.end === 16157602L)
[WARNING]                                     ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:712: warning: variable variant in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.variant.end === 16157602L)
[WARNING]                             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:714: warning: variable annotation in class Variant is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.variant.annotation === null)
[WARNING]                                     ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:714: warning: variable variant in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.variant.annotation === null)
[WARNING]                             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:745: warning: variable end in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.end === 16157602L)
[WARNING]                  ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:746: warning: variable end in class Variant is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.variant.end === 16157602L)
[WARNING]                          ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:746: warning: variable variant in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.variant.end === 16157602L)
[WARNING]                  ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:747: warning: variable attributes in class VariantAnnotation is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.variant.annotation.attributes.get("END") === "16157602")
[WARNING]                                     ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:747: warning: variable annotation in class Variant is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.variant.annotation.attributes.get("END") === "16157602")
[WARNING]                          ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:747: warning: variable variant in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(first.variant.annotation.attributes.get("END") === "16157602")
[WARNING]                  ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:758: warning: variable end in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.end === 16157602L)
[WARNING]                             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:759: warning: variable end in class Variant is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.variant.end === 16157602L)
[WARNING]                                     ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:759: warning: variable variant in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.variant.end === 16157602L)
[WARNING]                             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:760: warning: variable attributes in class VariantAnnotation is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.variant.annotation.attributes.get("END") === "16157602")
[WARNING]                                                ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:760: warning: variable annotation in class Variant is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.variant.annotation.attributes.get("END") === "16157602")
[WARNING]                                     ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeDatasetSuite.scala:760: warning: variable variant in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     assert(firstVcfGenotype.variant.annotation.attributes.get("END") === "16157602")
[WARNING]                             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/VariantContextDatasetSuite.scala:181: warning: variable start in class Genotype is deprecated: see corresponding Javadoc for more information.
[WARNING]     val filtered = vcs.toGenotypes().rdd.filter(_.start == 66631043)
[WARNING]                                                   ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/VariantDatasetSuite.scala:422: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/VariantDatasetSuite.scala:453: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/VariantDatasetSuite.scala:481: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/VariantDatasetSuite.scala:512: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/VariantDatasetSuite.scala:543: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/VariantDatasetSuite.scala:574: warning: method getOrCreate in object SQLContext is deprecated: Use SparkSession.builder instead
[WARNING]     val sqlContext = SQLContext.getOrCreate(sc)
[WARNING]                                 ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentSuite.scala:31: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(recordWithoutClipping.unclippedStart == 42L)
[WARNING]            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentSuite.scala:32: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(recordWithClipping.unclippedStart == 40L)
[WARNING]            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentSuite.scala:33: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(recordWithHardClipping.unclippedStart == 37L)
[WARNING]            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentSuite.scala:41: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(recordWithoutClipping.unclippedEnd == 20L)
[WARNING]            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentSuite.scala:42: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(recordWithClipping.unclippedEnd == 20L)
[WARNING]            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentSuite.scala:43: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(recordWithHardClipping.unclippedEnd == 20L)
[WARNING]            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentSuite.scala:49: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(rec.tags.size === 2)
[WARNING]            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentSuite.scala:50: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(rec.tags(0) === Attribute("XX", TagType.Integer, 3))
[WARNING]            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentSuite.scala:51: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(rec.tags(1) === Attribute("YY", TagType.String, "foo"))
[WARNING]            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichAlignmentSuite.scala:57: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val overlaps = unmappedRead.overlapsReferencePosition(ReferencePosition("chr1", 10))
[WARNING]                    ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichCigarSuite.scala:34: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val newCigar = RichCigar(new Cigar(read.samtoolsCigar.getCigarElements)).moveLeft(1)
[WARNING]                                        ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichCigarSuite.scala:37: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(newCigar2.cigar.getReadLength == read.samtoolsCigar.getReadLength)
[WARNING]                                             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichCigarSuite.scala:49: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val newCigar = RichCigar(new Cigar(read.samtoolsCigar.getCigarElements)).moveLeft(1)
[WARNING]                                        ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichCigarSuite.scala:52: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(newCigar2.cigar.getReadLength == read.samtoolsCigar.getReadLength)
[WARNING]                                             ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichCigarSuite.scala:64: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val newCigar = RichCigar(new Cigar(read.samtoolsCigar.getCigarElements)).moveLeft(1)
[WARNING]                                        ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichCigarSuite.scala:66: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(newCigar.cigar.getReadLength == read.samtoolsCigar.getReadLength)
[WARNING]                                            ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichCigarSuite.scala:78: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     val newCigar = RichCigar(new Cigar(read.samtoolsCigar.getCigarElements)).moveLeft(1)
[WARNING]                                        ^
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala/org/bdgenomics/adam/rich/RichCigarSuite.scala:80: warning: method recordToRichRecord in object RichAlignment is deprecated: Use explicit conversion wherever possible in new development.
[WARNING]     assert(newCigar.cigar.getReadLength == read.samtoolsCigar.getReadLength)
[WARNING]                                            ^
[WARNING] 127 warnings found
[INFO] prepare-compile in 0 s
[INFO] compile in 35 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:testCompile (default-testCompile) @ adam-core-spark2_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- maven-surefire-plugin:3.0.0-M3:test (default-test) @ adam-core-spark2_2.11 ---
[INFO] Tests are skipped.
[INFO] 
[INFO] --- scalatest-maven-plugin:2.0.0:test (test) @ adam-core-spark2_2.11 ---
Discovery starting.
Discovery completed in 1 second, 994 milliseconds.
Run starting. Expected test count is: 1175
RepairPartitionsSuite:
- don't pull from the first partition
- properly handle pulling from an empty iterator
- gets a single read from the partition if there are no other reads in the pair
- gets all the reads from a pair from the start of a partition
- properly handle dropping from an empty iterator
- don't drop from the first partition
- drop a single read from the partition if there are no other reads in the pair
- drops all the reads from a pair from the start of a partition
- only append to the first partition
- drop a single read from the partition and append read when in the middle
- drop reads from the start and don't append when in the last partition
- can't have more records than number of partitions
- unroll array for broadcast
2020-01-24 12:00:27 WARN  Utils:66 - Your hostname, research-jenkins-worker-08 resolves to a loopback address: 127.0.1.1; using 192.168.10.28 instead (on interface eth0)
2020-01-24 12:00:27 WARN  Utils:66 - Set SPARK_LOCAL_IP if you need to bind to another address
2020-01-24 12:00:28 WARN  NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
- move pairs around an rdd
FlagStatSuite:
- Standard FlagStat test
ModPartitionerSuite:
- report number of partitions correctly
- partition a number that is lower than the number of partitions and positive
- partition a number that is greater than the number of partitions and positive
- partition a number that is lower than the number of partitions and negative
- partition a number that is greater than the number of partitions and negative
- fire an exception if input is not an integer
TwoBitFileSuite:
- correctly read sequence from .2bit file
- correctly return masked sequences from .2bit file
- correctly return Ns from .2bit file
- correctly calculates sequence dictionary
NonoverlappingRegionsSuite:
- alternating returns an alternating seq of items
- Single region returns itself
- Two adjacent regions will be merged
- Nonoverlapping regions will all be returned
- Many overlapping regions will all be merged
- ADAMRecords return proper references
ReferencePositionSuite:
- create reference position from mapped read
- create reference position from variant
- create reference position from genotype
- create reference position from variant starting at vcf 0
- create reference position from genotype starting at vcf 0
ConsensusGeneratorFromReadsSuite:
- checking search for consensus list for artificial reads
FragmentConverterSuite:
- build a fragment collector and convert to a read
- if a fragment isn't associated with a contig, don't get a fragment collector
- convert an rdd of discontinuous fragments, all from the same contig
- convert an rdd of contiguous fragments, all from the same contig
- convert an rdd of varied fragments from multiple contigs
FileMergerSuite:
- cannot write both empty gzip block and cram eof
- buffer size must be non-negative
RichAlignmentSuite:
- Unclipped Start
- Unclipped End
- tags contains optional fields
- read overlap unmapped read
- read overlap reference position
- read overlap same position different contig
SingleReadBucketSuite:
- convert unmapped pair to fragment
- convert proper pair to fragment
- convert read pair to fragment with first of pair chimeric read
RecalibrationTableSuite:
- look up quality scores in table
FlankSlicesSuite:
- don't put flanks on non-adjacent slices
- put flanks on adjacent slices
InterleavedFastqInputFormatSuite:
- interleaved FASTQ hadoop reader: interleaved_fastq_sample1.ifq->interleaved_fastq_sample1.ifq.output
- interleaved FASTQ hadoop reader: interleaved_fastq_sample2.ifq->interleaved_fastq_sample2.ifq.output
- interleaved FASTQ hadoop reader: interleaved_fastq_sample3.ifq->interleaved_fastq_sample3.ifq.output
- interleaved FASTQ hadoop reader: interleaved_fastq_sample4.ifq->interleaved_fastq_sample4.ifq.output
- interleaved FASTQ hadoop reader: interleaved_fastq_sample5.ifq->interleaved_fastq_sample5.ifq.output
TreeRegionJoinSuite:
- run a join between data on a single contig
ReferenceRegionSuite:
- cannot create an empty predicate
- parse empty string throws IllegalArgumentException
- parse contigName only string into reference regions
- parse to end strings into reference regions
- parse string into reference regions
- contains(: ReferenceRegion)
- contains(: ReferencePosition)
- merge
- overlaps and covers
- overlapsBy and coversBy
- distance(: ReferenceRegion)
- distance(: ReferencePosition)
- unstrandedDistance
- subtract fails on non-overlapping and non-covering regions
- subtract
- create region from unmapped read fails
- create region from read with null alignment positions fails
- create stranded region from unmapped read fails
- create stranded region from read with null alignment positions fails
- create stranded region from read with null alignment strand fails
- create stranded region from read on forward strand
- create stranded region from read on reverse strand
- create region from mapped read contains read start and end
- validate that adjacent regions can be merged
- validate that non-adjacent regions cannot be merged
- validate that nearby regions can be merged
- validate that non-nearby regions cannot be merged
- compute convex hull of two sets
- region name is sanitized when creating region from read
- intersection fails on non-overlapping regions
- intersection fails when minOverlap is not met
- compute intersection
- overlap tests for oriented reference region
- check the width of a reference region
- make a reference region for a variant or genotype
- make a reference region for a variant or genotype starting at VCF 0
- uniformly pad a reference region
- uniformly pad a reference region into negative coordinate space, ensure non negative start
- unevenly pad a reference region
- unevenly pad a reference region into negative coordinate space, ensure non negative start
- can build an open ended reference region
- can build an open ended reference region with strand
- can build a reference region with an open start position
- can build a reference region with an open start position with strand
- can build a reference region that covers the entirety of a contig
- can build a reference region that covers the entirety of a contig with strand
- convert a genotype and then get the reference region
- create region from feature with null alignment positions fails
- create stranded region from feature with null alignment positions fails
- create stranded region from feature with null alignment strand fails
- create stranded region from feature on forward strand
- create stranded region from feature on reverse strand
CoverageDatasetSuite:
- correctly saves coverage
- can read a bed file to coverage
- correctly filters coverage with predicate
- keeps sample metadata
- can read a bed file with multiple samples to coverage
- correctly flatmaps coverage without aggregated bins
- correctly flatmaps coverage with aggregated bins
- collapses coverage records in one partition
- approximately collapses coverage records in multiple partitions
2020-01-24 12:00:47 WARN  DatasetBoundSliceDataset:190 - Saving directly as Parquet from SQL. Options other than compression codec are ignored.
- transform coverage to slice genomic dataset
- transform coverage to feature genomic dataset
2020-01-24 12:00:49 WARN  Utils:66 - Truncated the string representation of a plan since it was too large. This behavior can be adjusted by setting 'spark.debug.maxToStringFields' in SparkEnv.conf.
- transform coverage to fragment genomic dataset
- transform coverage to read genomic dataset
- transform coverage to genotype genomic dataset
- transform coverage to variant genomic dataset
- transform coverage to variant context genomic dataset
- copy coverage rdd
- copy coverage dataset
- copy coverage rdd without sequence dictionary
- copy coverage dataset without sequence dictionary
- transform dataset via java API
RecalibratorSuite:
- don't replace quality if quality was null
- if no covariates, return alignment
- skip recalibration if base is below quality threshold
- recalibrate changed bases above quality threshold
SliceDatasetSuite:
- create a new slice genomic dataset
- create a new slice genomic dataset with sequence dictionary
- merge slices into a sequence genomic dataset
- save as parquet
- round trip as parquet
- save as fasta
- save as single file fasta
- convert slices to reads
- convert slices to sequences
GenotypeDatasetSuite:
- union two genotype genomic datasets together
- round trip to parquet
- round trip to partitioned parquet
- use broadcast join to pull down genotypes mapped to targets
- use right outer broadcast join to pull down genotypes mapped to targets
- use shuffle join to pull down genotypes mapped to targets
- use right outer shuffle join to pull down genotypes mapped to targets
- use left outer shuffle join to pull down genotypes mapped to targets
- use full outer shuffle join to pull down genotypes mapped to targets
- use shuffle join with group by to pull down genotypes mapped to targets
- use right outer shuffle join with group by to pull down genotypes mapped to targets
- convert back to variant contexts
- load parquet to sql, save, re-read from avro
2020-01-24 12:01:14 WARN  DatasetBoundSliceDataset:190 - Saving directly as Parquet from SQL. Options other than compression codec are ignored.
- transform genotypes to slice genomic dataset
- transform genotypes to coverage genomic dataset
- transform genotypes to feature genomic dataset
- transform genotypes to fragment genomic dataset
- transform genotypes to read genomic dataset
- transform genotypes to variant genomic dataset
- transform genotypes to variant context genomic dataset
- loading genotypes then converting to variants yields same output as loading variants
- filter RDD bound genotypes to genotype filters passed
- filter dataset bound genotypes to genotype filters passed
- filter RDD bound genotypes by genotype quality
- filter dataset bound genotypes by genotype quality
- filter RDD bound genotypes by read depth
- filter dataset bound genotypes by read depth
- filter RDD bound genotypes by alternate read depth
- filter dataset bound genotypes by alternate read depth
- filter RDD bound genotypes by reference read depth
- filter dataset bound genotypes by reference read depth
- filter RDD bound genotypes by sample
- filter dataset bound genotypes by sample
- filter RDD bound genotypes by samples
- filter dataset bound genotypes by samples
- filter RDD bound no call genotypes
- filter dataset no call genotypes
- round trip gVCF END attribute without nested variant annotations rdd bound
- round trip gVCF END attribute without nested variant annotations dataset bound
- round trip gVCF END attribute with nested variant annotations rdd bound
- round trip gVCF END attribute with nested variant annotations dataset bound
- transform dataset via java API
FragmentDatasetSuite:
- don't lose any reads when piping interleaved fastq to sam
- don't lose any reads when piping tab5 to sam
- don't lose any reads when piping tab6 to sam
- use broadcast join to pull down fragments mapped to targets
- use right outer broadcast join to pull down fragments mapped to targets
- use shuffle join to pull down fragments mapped to targets
- use right outer shuffle join to pull down fragments mapped to targets
- use left outer shuffle join to pull down fragments mapped to targets
- use full outer shuffle join to pull down fragments mapped to targets
- use shuffle join with group by to pull down fragments mapped to targets
- use right outer shuffle join with group by to pull down fragments mapped to targets
- bin quality scores in fragments
- union two genomic datasets of fragments together
- load parquet to sql, save, re-read from avro
- transform fragments to slice genomic dataset
- transform fragments to coverage genomic dataset
- transform fragments to feature genomic dataset
- transform fragments to read genomic dataset
- transform fragments to genotype genomic dataset
- transform fragments to variant genomic dataset
- transform fragments to variant context genomic dataset
- paired read names with index sequences in read names can group into fragments
- interleaved paired read names with index sequences in read names can group into fragments
- interleaved paired read names with index sequences in read names as fragments
- transform dataset via java API
- dataset and rdd conversion to alignments are equivalent
SingleFastqInputFormatSuite:
- FASTQ hadoop reader: fastq_sample1.fq->single_fastq_sample1.fq.output
- FASTQ hadoop reader: fastq_sample2.fq->single_fastq_sample2.fq.output
- FASTQ hadoop reader: fastq_sample3.fq->single_fastq_sample3.fq.output
- FASTQ hadoop reader: fastq_sample4.fq->single_fastq_sample4.fq.output
VariantContextDatasetSuite:
- load a gvcf with a missing info field set to .
- union two variant context genomic datasets together
- can write, then read in .vcf file
- can write as a single file via simple saveAsVcf method, then read in .vcf file
- can write as a single file via full saveAsVcf method, then read in .vcf file
- transform a vcf file with bad header
- read a vcf file with multi-allelic variants to split
- support VCFs with +Inf/-Inf float values
- support VCFs with `nan` instead of `NaN` float values
- don't lose any variants when piping as VCF
- pipe works with empty partitions
- don't lose any non-default VCF header lines or attributes when piping as VCF
- save a file sorted by contig index
- save a lexicographically sorted file
- save a multiallelic gvcf !!! IGNORED !!!
- test metadata
- save sharded bgzip vcf
- save bgzip vcf as single file
- can't save file with non-vcf extension
- transform variant contexts to slice genomic dataset
- transform variant contexts to coverage genomic dataset
- transform variant contexts to feature genomic dataset
- transform variant contexts to fragment genomic dataset
- transform variant contexts to read genomic dataset
- transform variant contexts to genotype genomic dataset
- transform variant contexts to variant genomic dataset
- save and reload from partitioned parquet
- transform dataset via java API
ConsensusSuite:
- test the insertion of a consensus insertion into a reference
- test the insertion of a consensus deletion into a reference
- inserting empty consensus returns the reference
SequenceDatasetSuite:
- create a new sequence genomic dataset
- create a new sequence genomic dataset with sequence dictionary
- save as parquet
- round trip as parquet
- save as fasta
- save as single file fasta
- convert sequences to reads
- convert sequences to slices
- slice sequences to a maximum length
- slice sequences shorter than maximum length
- filter sequences by overlapping region
- filter sequences failing to overlap region
- filter sequences by overlapping regions
- filter sequences failing to overlap regions
- slice sequences overlapping a smaller region
- slice sequences overlapping a larger region
- slice sequences failing to overlap a region
- slice sequences overlapping smaller regions
- slice sequences overlapping larger regions
- slice sequences failing to overlap regions
AttributeUtilsSuite:
- parseTags returns a reasonable set of tagStrings
- parseTags works with NumericSequence tagType
- empty string is parsed as zero tagStrings
- incorrectly formatted tag throws an exception
- string tag with a ':' in it is correctly parsed
- oq string tag with many ':' in it is correctly parsed
- oq string tag with a ',' in it is correctly parsed
- if a tag is an array but doesn't define it's format, throw
MarkDuplicatesSuite:
- single read
- reads at different positions
- reads at the same position
- reads at the same position with clipping
- reads on reverse strand
- unmapped reads
- read pairs
- read pairs with fragments
- quality scores
- read pairs that cross chromosomes
- single fragment
- fragments at different positions
- fragments at the same position
- fragments at the same position with clipping
- fragments on reverse strand
- unmapped fragments
- read pairs as fragments
- read pairs with fragments as fragments
- chimeric fragments
- inverse pairs
- supplemental reads
TranscriptEffectConverterSuite:
- parse empty transcript effect
- parse empty transcript effect strict validation stringency
- parse invalid transcript effect
- parse invalid transcript effect strict validation stringency
- parse transcript effect
- parse empty list VCF ANN attribute
- parse empty list VCF ANN attribute strict validation stringency
- parse empty string VCF ANN attribute
- parse empty string VCF ANN attribute strict validation stringency
- parse invalid VCF ANN attribute
- parse invalid VCF ANN attribute strict validation stringency
- parse VCF ANN attribute with one transcript effect
- parse VCF ANN attribute with multiple transcript effects
- convert to transcript effect from null VCF ANN attribute in variant context
- convert to transcript effect from empty list VCF ANN attribute in variant context
- convert to transcript effect from empty list VCF ANN attribute in variant context strict validation stringency
- convert to transcript effect from empty string VCF ANN attribute in variant context
- convert to transcript effect from empty string VCF ANN attribute in variant context strict validation stringency
- convert to transcript effect from invalid VCF ANN attribute in variant context
- convert to transcript effect from invalid VCF ANN attribute in variant context strict validation stringency
2020-01-24 12:02:31 WARN  TranscriptEffectConverter$:190 - Could not convert VCF INFO reserved key ANN value to TranscriptEffect, caught java.lang.NumberFormatException: For input string: "not a number".
- convert to transcript effect from VCF ANN attribute with invalid number in variant context lenient validation stringency
2020-01-24 12:02:31 WARN  TranscriptEffectConverter$:190 - Could not convert VCF INFO reserved key ANN value to TranscriptEffect, caught java.lang.NumberFormatException: For input string: "not a number".
- convert to transcript effect from VCF ANN attribute with invalid fraction in variant context lenient validation stringency
- convert to transcript effect from VCF ANN attribute with invalid number in variant context strict validation stringency
- convert to transcript effect from VCF ANN attribute with invalid fraction in variant context strict validation stringency
- convert to transcript effect from VCF ANN attribute in variant context different alt allele
- convert to transcript effect from VCF ANN attribute in variant context same alt allele
- convert to transcript effect from VCF ANN attribute in variant context multiple effects same alt allele
- convert transcript effect to VCF ANN attribute value
- convert transcript effect with null fields to VCF ANN attribute value
2020-01-24 12:02:31 WARN  TranscriptEffectConverter$:190 - Incorrect fractional value ?/2, missing numerator
- convert transcript effect with incorrect fractional value to VCF ANN attribute value
IndelRealignmentTargetSuite:
- checking simple realignment target
- creating simple target from read with deletion
- creating simple target from read with insertion
- joining simple realignment targets on same chr
- joining simple realignment targets on different chr throws exception
- creating targets from three intersecting reads, same indel
- creating targets from three intersecting reads, two different indel
- creating targets from two disjoint reads
- creating targets for artificial reads: one-by-one
- creating targets for artificial reads: all-at-once (merged)
- creating indel targets for mason reads
- additional indel targets taken from consensus generator
NormalizationUtilsSuite:
- cannot move an indel left if there are no bases to it's left
- move a simple indel to farthest position left until bases run out
- move a simple indel to farthest position left, past length of indel
- cannot move a left normalized indel in a short tandem repeat
- move an indel in a short tandem repeat
- move an indel in a short tandem repeat of more than 2 bases, where shift is not an integer multiple of repeated sequence length
- moving a simple read with single deletion that cannot shift
- shift an indel left by 0 in a cigar
- shift an indel left by 1 in a cigar
- do not left align a complex read which is already left aligned
ReadGroupDictionarySuite:
- simple conversion to and from sam read group
- sample name must be set
- simple equality checks
- get samples from read group dictionary
- empty read group is empty
- merging a dictionary with itself should work
- round trip a record with all attributes set
RightOuterTreeRegionJoinSuite:
- Ensure same reference regions get passed together
- Overlapping reference regions
InnerShuffleRegionJoinSuite:
- Overlapping reference regions
- Multiple reference regions do not throw exception
FeatureDatasetSuite:
- round trip GTF format
- save GTF as GFF3 format
- save GTF as BED format
- save GTF as IntervalList format
- save GTF as NarrowPeak format
- save GFF3 as GTF format
- save GFF3 as BED format
- save GFF3 as IntervalList format
- save GFF3 as NarrowPeak format
- round trip GFF3 format
- ignore FASTA sequence in GFF3 file
- save BED as GTF format
- save BED as GFF3 format
- save BED as IntervalList format
- save BED as NarrowPeak format
- round trip BED6 format
- keeps sample metadata
- round trip BED12 format
- save to UCSC BED format
- save IntervalList as GTF format
- save IntervalList as GFF3 format
- save IntervalList as BED format
- save IntervalList as IntervalList format
- save IntervalList as NarrowPeak format
- round trip IntervalList format
- save NarrowPeak as GTF format
- save NarrowPeak as GFF3 format
- save NarrowPeak as BED format
- save NarrowPeak as IntervalList format
- save NarrowPeak as NarrowPeak format
- round trip NarrowPeak format
- sort by reference
- sort by reference and feature fields
- sort gene features by reference and gene structure
- sort transcript features by reference and gene structure
- sort exon features by reference and gene structure
- sort intron features by reference and gene structure
- correctly flatmaps CoverageDataset from FeatureDataset
- use broadcast join to pull down features mapped to targets
- use right outer broadcast join to pull down features mapped to targets
- use shuffle join to pull down features mapped to targets
- use right outer shuffle join to pull down features mapped to targets
- use left outer shuffle join to pull down features mapped to targets
- use full outer shuffle join to pull down features mapped to targets
- use shuffle join with group by to pull down features mapped to targets
- use right outer shuffle join with group by to pull down features mapped to targets
- union two feature rdds together
- obtain sequence dictionary contig lengths from header in IntervalList format
- don't lose any features when piping as BED format
- don't lose any features when piping as GTF format
- don't lose any features when piping as GFF3 format
- don't lose any features when piping as NarrowPeak format
- load parquet to sql, save, re-read from avro
- load partitioned parquet to sql, save, re-read from avro
2020-01-24 12:03:01 WARN  DatasetBoundSliceDataset:190 - Saving directly as Parquet from SQL. Options other than compression codec are ignored.
- transform features to slice genomic dataset
- transform features to coverage genomic dataset
- transform features to fragment genomic dataset
- transform features to read genomic dataset
- transform features to genotype genomic dataset
- transform features to variant genomic dataset
- transform features to variant context genomic dataset
- filter RDD bound features by feature type
- filter dataset bound features by feature type
- filter RDD bound features by feature types
- filter dataset bound features by feature types
- filter RDD bound features by gene
- filter dataset bound features by gene
- filter RDD bound features by genes
- filter dataset bound features by genes
- filter RDD bound features by transcript
- filter dataset bound features by transcript
- filter RDD bound features by transcripts
- filter dataset bound features by transcripts
- filter RDD bound features by exon
- filter dataset bound features by exon
- filter RDD bound features by exons
- filter dataset bound features by exons
- filter RDD bound features by score
- filter dataset bound features by score
- filter RDD bound features by parent
- filter dataset bound features by parent
- filter RDD bound features by parents
- filter dataset bound features by parents
- filter RDD bound features by attribute
- filter dataset bound features by attribute
- transform dataset via java API
AlphabetSuite:
- test size of a case-sensitive alphabet
- test apply of a case-sensitive alphabet
- test reverse complement of a case-sensitive alphabet
- test exact reverse complement of a case-sensitive alphabet
- test size of a case-insensitive alphabet
- test apply of a case-insensitive alphabet
- test reverse complement of a case-insensitive alphabet
- test exact reverse complement of a case-insensitive alphabet
- DNA alphabet
- map unknown bases to N
SortedGenomicDatasetSuite:
- testing that partition and sort provide correct outputs
- testing copartition maintains or adds sort
- testing that we don't drop any data on the right side even though it doesn't map to a partition on the left
- testing that sorted shuffleRegionJoin matches unsorted
- testing that sorted fullOuterShuffleRegionJoin matches unsorted
- testing that sorted rightOuterShuffleRegionJoin matches unsorted
- testing that sorted leftOuterShuffleRegionJoin matches unsorted
- testing that we can persist the sorted knowledge
VariantContextConverterSuite:
- Convert htsjdk site-only SNV to ADAM
- Convert somatic htsjdk site-only SNV to ADAM
- Convert htsjdk site-only CNV to ADAM
- Convert htsjdk SNV w/ genotypes w/ phase information to ADAM
- Convert htsjdk SNV with different variant filters to ADAM
- Convert htsjdk SNV with different genotype filters to ADAM
- Convert ADAM site-only SNV to htsjdk
- Convert ADAM SNV w/ genotypes to htsjdk
- Convert ADAM SNV w/ genotypes but bad SB to htsjdk with strict validation
2020-01-24 12:03:18 WARN  VariantContextConverter:2300 - Applying annotation extraction function <function2> to {"variant": {"referenceName": "1", "start": 0, "end": 1, "names": [], "splitFromMultiAllelic": false, "referenceAllele": "A", "alternateAllele": "T", "quality": null, "filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "annotation": null}, "referenceName": null, "start": null, "end": null, "variantCallingAnnotations": {"filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "downsampled": null, "baseQRankSum": null, "fisherStrandBiasPValue": 3.0, "rmsMapQ": 0.0, "mapq0Reads": 5, "mqRankSum": null, "readPositionRankSum": null, "genotypePriors": [], "genotypePosteriors": [], "vqslod": null, "culprit": null, "attributes": {}}, "sampleId": "NA12878", "sampleDescription": null, "processingDescription": null, "alleles": ["REF", "ALT"], "expectedAlleleDosage": null, "referenceReadDepth": null, "alternateReadDepth": null, "readDepth": null, "minReadDepth": null, "genotypeQuality": null, "genotypeLikelihoods": [], "nonReferenceLikelihoods": [], "strandBiasComponents": [0, 2], "splitFromMultiAllelic": false, "phased": false, "phaseSetId": null, "phaseQuality": null} failed with java.lang.IllegalArgumentException: requirement failed: Illegal strand bias components length. Must be empty or 4. In:
{"variant": {"referenceName": "1", "start": 0, "end": 1, "names": [], "splitFromMultiAllelic": false, "referenceAllele": "A", "alternateAllele": "T", "quality": null, "filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "annotation": null}, "referenceName": null, "start": null, "end": null, "variantCallingAnnotations": {"filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "downsampled": null, "baseQRankSum": null, "fisherStrandBiasPValue": 3.0, "rmsMapQ": 0.0, "mapq0Reads": 5, "mqRankSum": null, "readPositionRankSum": null, "genotypePriors": [], "genotypePosteriors": [], "vqslod": null, "culprit": null, "attributes": {}}, "sampleId": "NA12878", "sampleDescription": null, "processingDescription": null, "alleles": ["REF", "ALT"], "expectedAlleleDosage": null, "referenceReadDepth": null, "alternateReadDepth": null, "readDepth": null, "minReadDepth": null, "genotypeQuality": null, "genotypeLikelihoods": [], "nonReferenceLikelihoods": [], "strandBiasComponents": [0, 2], "splitFromMultiAllelic": false, "phased": false, "phaseSetId": null, "phaseQuality": null}.
- Convert ADAM SNV w/ genotypes but bad SB to htsjdk with lenient validation
- Convert htsjdk multi-allelic sites-only SNVs to ADAM
- Convert htsjdk multi-allelic SNVs to ADAM and back to htsjdk
- Convert gVCF reference records to ADAM
- Convert htsjdk variant context with no IDs to ADAM
- Convert htsjdk variant context with one ID to ADAM
- Convert htsjdk variant context with multiple IDs to ADAM
- Convert ADAM variant context with no names to htsjdk
- Convert ADAM variant context with one name to htsjdk
- Convert ADAM variant context with multiple names to htsjdk
- Convert ADAM variant context with null filters applied to htsjdk
- Convert ADAM variant context with no filters applied to htsjdk
- Convert ADAM variant context with passing filters to htsjdk
- Convert ADAM variant context with failing filters to htsjdk
- no phasing set going htsjdk->adam
- phased but no phase set info going htsjdk->adam
- set phase set and extract going htsjdk->adam
- no allelic depth going htsjdk->adam
- set allelic depth going htsjdk->adam
- no gt read depth going htsjdk->adam
- extract gt read depth going htsjdk->adam
- no min gt read depth going htsjdk->adam
- extract min gt read depth going htsjdk->adam
- no genotype quality going htsjdk->adam
- extract genotype quality going htsjdk->adam
- no phred likelihood going htsjdk->adam
- extract phred likelihoods going htsjdk->adam
- no strand bias info going htsjdk->adam
- extract strand bias info going htsjdk->adam
- no filters going htsjdk->adam
- filters passed going htsjdk->adam
- extract single filter going htsjdk->adam
- extract multiple filters going htsjdk->adam
- no fisher strand bias going htsjdk->adam
- extract fisher strand bias going htsjdk->adam
- no rms mapping quality going htsjdk->adam
- extract rms mapping quality going htsjdk->adam
- no mq0 going htsjdk->adam
- extract mq0 going htsjdk->adam
- no gt read depth going adam->htsjdk
- extract gt read depth going adam->htsjdk
- throw iae if missing one component of gt read depth going adam->htsjdk
- no depth going adam->htsjdk
- extract depth going adam->htsjdk
- no min depth going adam->htsjdk
- extract min depth going adam->htsjdk
- no quality going adam->htsjdk
- extract quality going adam->htsjdk
- no genotype likelihoods going adam->htsjdk
- extract genotype likelihoods going adam->htsjdk
- no strand bias going adam->htsjdk
- malformed strand bias going adam->htsjdk
- extract strand bias going adam->htsjdk
- no phasing info going adam->htsjdk
- unphased going adam->htsjdk
- phased but no ps/pq going adam->htsjdk
- phased but no pq going adam->htsjdk
- phased but no ps going adam->htsjdk
- phased going adam->htsjdk
- no filter info going adam->htsjdk
- if filters applied, must set passed/failed going adam->htsjdk
- filters passed going adam->htsjdk
- if filters failed, must set filters failed going adam->htsjdk
- single filter failed going adam->htsjdk
- multiple filters failed going adam->htsjdk
- no fisher strand bias going adam->htsjdk
- extract fisher strand bias going adam->htsjdk
- no rms mapping quality going adam->htsjdk
- extract rms mapping quality going adam->htsjdk
- no mapping quality 0 reads going adam->htsjdk
- extract mapping quality 0 reads going adam->htsjdk
- no names set going htsjdk->adam
- single name set going htsjdk->adam
- multiple names set going htsjdk->adam
- no quality going htsjdk->adam
- quality set going htsjdk->adam
- no filters applied going htsjdk->adam
- filters applied and passed going htsjdk->adam
- single filter applied and failed going htsjdk->adam
- multiple filters applied and failed going htsjdk->adam
- no names set adam->htsjdk
- set a single name adam->htsjdk
- set multiple names adam->htsjdk
- no qual set adam->htsjdk
- qual is set adam->htsjdk
- no filters applied adam->htsjdk
- null filters applied adam->htsjdk
- filters passed adam->htsjdk
- if filter failed, must have filters adam->htsjdk
- single filter failed adam->htsjdk
- multiple filters failed adam->htsjdk
- no ancestral allele set going htsjdk->adam
- ancestral allele set going htsjdk->adam
- no dbsnp membership set going htsjdk->adam
- dbsnp membership set going htsjdk->adam
- no hapmap2 membership set going htsjdk->adam
- hapmap2 membership set going htsjdk->adam
- no hapmap3 membership set going htsjdk->adam
- hapmap3 membership set going htsjdk->adam
- no validated set going htsjdk->adam
- validated set going htsjdk->adam
- no 1000G membership set going htsjdk->adam
- 1000G membership set going htsjdk->adam
- not somatic going htsjdk->adam
- somatic going htsjdk->adam
- no allele count going htsjdk->adam
- single allele count going htsjdk->adam
- multiple allele counts going htsjdk->adam
- no allele frequency going htsjdk->adam
- single allele frequency going htsjdk->adam
- single allele frequency is +Inf going htsjdk->adam
- single allele frequency is -Inf going htsjdk->adam
- multiple allele frequencies going htsjdk->adam
- no CIGAR going htsjdk->adam
- single CIGAR going htsjdk->adam
- multiple CIGARs going htsjdk->adam
- no read depth going htsjdk->adam
- single read depth going htsjdk->adam
- multiple read depths going htsjdk->adam
- no forward read depth going htsjdk->adam
- single forward read depth going htsjdk->adam
- multiple forward read depths going htsjdk->adam
- no reverse read depth going htsjdk->adam
- single reverse read depth going htsjdk->adam
- multiple reverse read depths going htsjdk->adam
- no ancestral allele set adam->htsjdk
- ancestral allele set adam->htsjdk
- no dbsnp membership set adam->htsjdk
- dbsnp membership set adam->htsjdk
- no hapmap2 membership set adam->htsjdk
- hapmap2 membership set adam->htsjdk
- no hapmap3 membership set adam->htsjdk
- hapmap3 membership set adam->htsjdk
- no validated set adam->htsjdk
- validated set adam->htsjdk
- no 1000G membership set adam->htsjdk
- 1000G membership set adam->htsjdk
- no allele count set adam->htsjdk
- allele count set adam->htsjdk
- no allele frequency set adam->htsjdk
- allele frequency set adam->htsjdk
- no cigar set adam->htsjdk
- cigar set adam->htsjdk
- no read depth set adam->htsjdk
- read depth set adam->htsjdk
- read depth without reference read depth
- reference read depth without read depth
- no forward read depth set adam->htsjdk
- forward read depth set adam->htsjdk
- reference forward read depth without forward read depth
- forward read depth without reference forward read depth
- no reverse read depth set adam->htsjdk
- reverse read depth set adam->htsjdk
- reference reverse read depth without reverse read depth
- reverse read depth without reference reverse read depth
- VCF INFO attribute Number=0 Type=Flag adam->htsjdk
- VCF INFO attribute Number=4 Type=Flag adam->htsjdk unsupported, strict !!! IGNORED !!!
- VCF INFO attribute Number=1 Type=Integer adam->htsjdk
- VCF INFO attribute Number=4 Type=Integer adam->htsjdk
- VCF INFO attribute Number=A Type=Integer adam->htsjdk
- VCF INFO attribute Number=R Type=Integer adam->htsjdk
- VCF INFO attribute Number=R Type=String adam->htsjdk
- VCF INFO attribute Number=G Type=String adam->htsjdk not supported
- VCF INFO attribute Number=0 Type=Flag htsjdk->adam
- VCF INFO attribute Number=1 Type=Integer htsjdk->adam
- VCF INFO attribute Number=4 Type=Integer htsjdk->adam
- VCF INFO attribute Number=4 Type=Float htsjdk->adam
- VCF INFO attribute Number=A Type=Integer htsjdk->adam
- VCF INFO attribute Number=R Type=Integer htsjdk->adam
- VCF INFO attribute Number=R Type=String htsjdk->adam
- VCF INFO attribute Number=G Type=String htsjdk->adam not supported
- VCF FORMAT attribute Number=0 Type=Flag adam->htsjdk not supported
- VCF FORMAT attribute Number=1 Type=Integer adam->htsjdk
- VCF FORMAT attribute Number=4 Type=Integer adam->htsjdk
- VCF FORMAT attribute Number=A Type=Integer adam->htsjdk
- VCF FORMAT attribute Number=R Type=Integer adam->htsjdk
- VCF FORMAT attribute Number=R Type=String adam->htsjdk
- VCF FORMAT attribute Number=0 Type=Flag htsjdk->adam is not supported
- VCF FORMAT attribute Number=1 Type=Integer htsjdk->adam
- VCF FORMAT attribute Number=4 Type=Integer htsjdk->adam
- VCF FORMAT attribute Number=4 Type=Float htsjdk->adam
- VCF FORMAT attribute Number=A Type=Integer htsjdk->adam
- VCF FORMAT attribute Number=R Type=Integer htsjdk->adam
- VCF FORMAT attribute Number=R Type=String htsjdk->adam
- VCF FORMAT attribute Number=G Type=String htsjdk->adam
- respect end position for symbolic alts
FastqRecordConverterSuite:
- test read name suffix and index of pair must match
- test parseReadInFastq, read suffix removal
- test parseReadInFastq, read metadata removal
- test parseReadInFastq, read quality shorter than read length, padded with B
- test parseReadInFastq, read quality longer than read length 
- test parseReadInFastq, no read quality
- testing FastqRecordConverter.convertPair with valid input
- testing FastqRecordConverter.convertPair with 7-line invalid input
- testing FastqRecordConverter.convertPair with invalid input: first read length and qual don't match
- testing FastqRecordConverter.convertPair with invalid input: second read length and qual don't match
- testing FastqRecordConverter.convertFragment with valid input
- testing FastqRecordConverter.convertFragment with another valid input having /1, /2 suffixes
- testing FastqRecordConverter.convertFragment with invalid input: different read names
- testing FastqRecordConverter.convertRead with valid input
- testing FastqRecordConverter.convertRead with valid input: setFirstOfPair set to true
- testing FastqRecordConverter.convertRead with valid input: setSecondOfPair set to true
- testing FastqRecordConverter.convertRead with valid input: setFirstOfPair and setSecondOfPair both true
- testing FastqRecordConverter.convertRead with valid input, no qual, strict
- testing FastqRecordConverter.convertRead with valid input, no qual, not strict
AttributeSuite:
- test SAMTagAndValue parsing
- Attributes can be correctly re-encoded as text SAM tags
GFF3HeaderWriterSuite:
- write gff3 header pragma
BinQualitiesSuite:
- make a quality score bin
- can't have a quality score bin with negative score
- can't have a quality score bin with high score below low
- can't have a quality score bin with high score above 255
- can't have a quality score bin with score outside
- make a quality score bin from a string
- quality score bin must have exactly 3 elements
- quality score bin must be integers
- must define at least one bin
- build multiple bins
- rewrite quality scores for a read
- rewriting quality scores fails if bins overlap
- rewriting quality scores fails if base is out of bounds
- skip read if qualities are null
- rewrite a read
IndexedFastaFileSuite:
- correctly generates sequence dictionary from .dict file
- correctly gets sequence
- fails when fai index is not provided
2020-01-24 12:03:19 WARN  IndexedFastaFile:190 - Caught exception java.lang.NullPointerException when loading FASTA sequence dictionary. Using empty dictionary instead.
- passes when dict is not provided and ValidationStringency = LENIENT
SAMRecordConverterSuite:
- testing the fields in an Alignment obtained from a mapped samRecord conversion
- testing the fields in an Alignment obtained from an unmapped samRecord conversion
- '*' quality gets nulled out
- don't keep denormalized fields
ADAMContextSuite:
- ctr is accessible
- load from an empty directory
- sc.loadParquet should not fail on unmapped reads
- sc.loadParquet should not load a file without a type specified
- can read a small .SAM file
- can read a small .SAM file with a bad header with lenient validation
- loading a sam file with a bad header and strict stringency should fail
- can read a small .CRAM file
- can read a small .SAM with all attribute tag types
- can filter a .SAM file based on quality
- Can convert to phred
- Can convert from phred
- Can read a .gtf file
- Can read a .bed file
- Can read a BED 12 file
- Can read a .narrowPeak file
- Can read a .interval_list file
2020-01-24 12:03:22 WARN  VariantContextConverter:1662 - Saw invalid info field java.lang.IllegalArgumentException: Multivalued flags are not supported for INFO lines: INFO=<ID=ABADFLAG,Number=.,Type=Flag,Description="A no good, very bad flag.">. Ignoring...
2020-01-24 12:03:22 WARN  VariantContextConverter:2169 - Generating field extractor from header line INFO=<ID=ABADFLAG,Number=.,Type=Flag,Description="A no good, very bad flag."> failed: java.lang.IllegalArgumentException: Multivalue flags are not supported for INFO lines: INFO=<ID=ABADFLAG,Number=.,Type=Flag,Description="A no good, very bad flag.">
- can read a small .vcf file with a validation issue
- can read a small .vcf file
2020-01-24 12:03:23 WARN  VCFInputFormat:218 - file:/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/2.11.12/test-classes/test.vcf.gz is not splittable, consider using block-compressed gzip (BGZF)
- can read a gzipped .vcf file
- can read a vcf file with an empty alt
- can read a BGZF gzipped .vcf file with .gz file extension
- can read a BGZF gzipped .vcf file with .bgz file extension
- can read a vcf file with a projection
- can read an uncompressed BCFv2.2 file !!! IGNORED !!!
- can read a BGZF compressed BCFv2.2 file !!! IGNORED !!!
- loadIndexedVcf with 1 ReferenceRegion
- loadIndexedVcf with multiple ReferenceRegions
- load block compressed interleaved fastq
- import records from interleaved FASTQ: 1
- import records from interleaved FASTQ: 2
- import records from interleaved FASTQ: 3
- import records from interleaved FASTQ: 4
- import block compressed single fastq
- import records from single ended FASTQ: 1
- import records from single ended FASTQ: 2
- import records from single ended FASTQ: 3
- import records from single ended FASTQ: 4
- filter on load using the filter2 API
- saveAsParquet with file path
- saveAsParquet with file path, block size, page size
- saveAsParquet with save args
- read a HLA fasta from GRCh38
- read a gzipped fasta file
- read a fasta file with comments, gaps, and translation stops
- loadIndexedBam with 1 ReferenceRegion
- loadIndexedBam with multiple ReferenceRegions
- loadIndexedBam with multiple ReferenceRegions and indexed bams
- loadIndexedBam with multiple ReferenceRegions and a directory of indexed bams
- loadIndexedBam should throw exception without an index file
- loadIndexedBam should work with indexed file with index naming format <filename>.bai
- loadIndexedBam glob should throw exception without an index file
- loadBam with a glob
- loadBam with a directory
- load vcf with a glob
- load vcf from a directory
- load gvcf which contains a multi-allelic row from a directory
- load and save gvcf which contains rows without likelihoods
- parse annotations for multi-allelic rows
- load parquet with globs
- bad glob should fail
- empty directory should fail
- can read a SnpEff-annotated .vcf file
- loadAlignments should not fail on single-end and paired-end fastq reads
- load queryname sorted sam as fragments
- load query grouped sam as fragments
- load paired fastq
- load paired fastq without cache
- load paired fastq as fragments
- load paired fastq as fragments without cache
- load HTSJDK sequence dictionary
- load Bedtools .genome file as sequence dictionary
- load Bedtools .genome.txt file as sequence dictionary
- load UCSC Genome Browser chromInfo.txt file as sequence dictionary
- load unrecognized file extension as sequence dictionary fails
- load BED features with Bedtools .genome file as sequence dictionary
- load BED features with Bedtools .genome file as sequence dictionary, no matching features
- convert program record
- load program record from sam file
- load alignments from data frame
- load features from data frame
- load fragments from data frame
- load genotypes from data frame with default header lines
- load genotypes from data frame with empty header lines
- load reads from data frame
- load sequences from data frame
- load slices from data frame
- load variant contexts from data frame with default header lines
- load variant contexts from data frame with empty header lines
- load variants from data frame with default header lines
- load variants from data frame with empty header lines
- load alignments with metadata from data frame
- load features with metadata from data frame
- load fragments with metadata from data frame
- load genotypes with metadata from data frame
- load variant contexts with metadata from data frame
- load variants with metadata from data frame
- read a fasta file with short sequences as sequences
- read a fasta file with long sequences as sequences
- read a fasta file with short sequences as slices
- read a fasta file with long sequences as slices
CycleCovariateSuite:
- compute covariates for an unpaired read on the negative strand
- compute covariates for a first-of-pair read on the negative strand
- compute covariates for a second-of-pair read on the negative strand
- compute covariates for an unpaired read on the positive strand
- compute covariates for a first-of-pair read on the positive strand
- compute covariates for a second-of-pair read on the positive strand
AlignmentConverterSuite:
- testing the fields in a converted ADAM Read
- converting a read with null quality is OK
- convert a read to fastq
- reverse complement reads when converting to fastq
- converting to fastq with unmapped reads where  read reverse complemented flag (Ox10) was NOT set
- converting to fastq with unmapped reads where reverse complemented flag (0x10) was set
- converting a fragment with no alignments should yield unaligned reads
- converting a fragment with alignments should restore the alignments
- read negative strand is propagated even when not mapped
ConsensusGeneratorFromKnownsSuite:
- no consensuses for empty target
- no consensuses for reads that don't overlap a target
- return a consensus for read overlapping a single target
RichCigarSuite:
- moving 2 bp from a deletion to a match operator
- moving 2 bp from a insertion to a match operator
- moving 1 base in a two element cigar
- move to start of read
- process right clipped cigar
- process left clipped cigar
- process cigar clipped on both ends
MDTaggingSuite:
- test adding MDTags over boundary
- test adding MDTags; reads span full contig
- test adding MDTags; reads start inside first fragment
- test adding MDTags; reads end inside last fragment
- test adding MDTags; reads start inside first fragment and end inside last fragment
- test adding MDTags; reads start and end in middle fragements
2020-01-24 12:04:07 WARN  BlockManager:66 - Putting block rdd_5_3 failed due to exception java.lang.Exception: Contig chr2 not found in reference map with keys: chr1.
2020-01-24 12:04:07 WARN  BlockManager:66 - Block rdd_5_3 could not be removed as it was not found on disk or in memory
2020-01-24 12:04:07 ERROR Executor:91 - Exception in task 3.0 in stage 2.0 (TID 11)
java.lang.Exception: Contig chr2 not found in reference map with keys: chr1
	at org.bdgenomics.adam.util.ReferenceContigMap$$anonfun$extract$1.apply(ReferenceContigMap.scala:63)
	at org.bdgenomics.adam.util.ReferenceContigMap$$anonfun$extract$1.apply(ReferenceContigMap.scala:63)
	at scala.collection.MapLike$class.getOrElse(MapLike.scala:128)
	at scala.collection.AbstractMap.getOrElse(Map.scala:59)
	at org.bdgenomics.adam.util.ReferenceContigMap.extract(ReferenceContigMap.scala:61)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1$$anonfun$apply$2.apply(MDTagging.scala:76)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1$$anonfun$apply$2.apply(MDTagging.scala:71)
	at scala.Option$WithFilter.map(Option.scala:207)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1.apply(MDTagging.scala:71)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1.apply(MDTagging.scala:69)
	at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
	at org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:222)
	at org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:299)
	at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1165)
	at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1156)
	at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:1091)
	at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1156)
	at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:882)
	at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:335)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:286)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:123)
	at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
2020-01-24 12:04:07 WARN  TaskSetManager:66 - Lost task 3.0 in stage 2.0 (TID 11, localhost, executor driver): java.lang.Exception: Contig chr2 not found in reference map with keys: chr1
	at org.bdgenomics.adam.util.ReferenceContigMap$$anonfun$extract$1.apply(ReferenceContigMap.scala:63)
	at org.bdgenomics.adam.util.ReferenceContigMap$$anonfun$extract$1.apply(ReferenceContigMap.scala:63)
	at scala.collection.MapLike$class.getOrElse(MapLike.scala:128)
	at scala.collection.AbstractMap.getOrElse(Map.scala:59)
	at org.bdgenomics.adam.util.ReferenceContigMap.extract(ReferenceContigMap.scala:61)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1$$anonfun$apply$2.apply(MDTagging.scala:76)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1$$anonfun$apply$2.apply(MDTagging.scala:71)
	at scala.Option$WithFilter.map(Option.scala:207)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1.apply(MDTagging.scala:71)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1.apply(MDTagging.scala:69)
	at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
	at org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:222)
	at org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:299)
	at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1165)
	at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1156)
	at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:1091)
	at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1156)
	at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:882)
	at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:335)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:286)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:123)
	at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

2020-01-24 12:04:07 ERROR TaskSetManager:70 - Task 3 in stage 2.0 failed 1 times; aborting job
- try realigning a read on a missing contig, stringency == STRICT
2020-01-24 12:04:07 WARN  MDTagging:190 - Caught exception when processing read chr2: java.lang.Exception: Contig chr2 not found in reference map with keys: chr1
- try realigning a read on a missing contig, stringency == LENIENT
FileExtensionsSuite:
- ends in gzip extension
- is a vcf extension
PhredUtilsSuite:
- convert low phred score to log and back
- convert high phred score to log and back
- convert overflowing phred score to log and back and clip
- convert negative zero log probability to phred and clip
- round trip log probabilities
ReadDatasetSuite:
- create a new read genomic dataset
- create a new read genomic dataset with sequence dictionary
- save as parquet
- round trip as parquet
- save as fastq
- save as single file fastq
- filter read genomic dataset by reference region
- broadcast region join reads and features
- shuffle region join reads and features
- convert reads to alignments
- convert reads to sequences
- convert reads to slices
AlignmentDatasetSuite:
- sorting reads
- unmapped reads go at the end when sorting
- coverage does not fail on unmapped reads
- computes coverage
- computes coverage with multiple samples
- merges adjacent records with equal coverage values
- sorting reads by reference index
- round trip from ADAM to SAM and back to ADAM produces equivalent Read values
- round trip with single CRAM file produces equivalent Read values
- round trip with sharded CRAM file produces equivalent Read values
- SAM conversion sets read mapped flag properly
- load long FASTQ reads
- load FASTQ with no bases
- convert malformed FASTQ (no quality scores) => SAM => well-formed FASTQ => SAM
- round trip from ADAM to FASTQ and back to ADAM produces equivalent Read values
- round trip from ADAM to paired-FASTQ and back to ADAM produces equivalent Read values
- writing a small file with tags should produce the expected result
- writing a small sorted file as SAM should produce the expected result
- writing unordered sam from unordered sam
- writing ordered sam from unordered sam
- write single sam file back
- write single bam file back
- saveAsParquet with save args, sequence dictionary, and read group dictionary
- load parquet to sql, save, re-read from avro
- load from sam, save as partitioned parquet, and re-read from partitioned parquet
- save as SAM format
- save as sorted SAM format
- save as BAM format
- save as sorted BAM format
- save as FASTQ format
- save as ADAM parquet format
- saveAsSam SAM format
- saveAsSam SAM format single file
- saveAsSam sorted SAM format single file
- saveAsSam BAM format
- saveAsSam BAM format single file
- saveAsSam sorted BAM format single file
- saveAsFastq
- saveAsFastq as single file
- saveAsFastq with original base qualities
- saveAsFastq sorted by read name
- saveAsFastq sorted by read name with original base qualities
2020-01-24 12:04:32 WARN  RDDBoundAlignmentDataset:190 - Found 20 read names that don't occur exactly twice:
	1x:	20

Samples:
	simread:1:237728409:true
	simread:1:195211965:false
	simread:1:163841413:false
	simread:1:231911906:false
	simread:1:26472783:false
	simread:1:165341382:true
	simread:1:240344442:true
	simread:1:50683371:false
	simread:1:240997787:true
	simread:1:14397233:false
	simread:1:207027738:true
	simread:1:20101800:true
	simread:1:101556378:false
	simread:1:37577445:false
	simread:1:189606653:true
	simread:1:5469106:true
	simread:1:186794283:true
	simread:1:89554252:false
	simread:1:153978724:false
	simread:1:169801933:true
- saveAsFastq paired FASTQ
2020-01-24 12:04:33 WARN  RDDBoundAlignmentDataset:190 - Found 20 read names that don't occur exactly twice:
	1x:	20

Samples:
	simread:1:237728409:true
	simread:1:195211965:false
	simread:1:163841413:false
	simread:1:231911906:false
	simread:1:26472783:false
	simread:1:165341382:true
	simread:1:240344442:true
	simread:1:50683371:false
	simread:1:240997787:true
	simread:1:14397233:false
	simread:1:207027738:true
	simread:1:20101800:true
	simread:1:101556378:false
	simread:1:37577445:false
	simread:1:189606653:true
	simread:1:5469106:true
	simread:1:186794283:true
	simread:1:89554252:false
	simread:1:153978724:false
	simread:1:169801933:true
- saveAsPairedFastq
2020-01-24 12:04:34 WARN  RDDBoundAlignmentDataset:190 - Found 20 read names that don't occur exactly twice:
	1x:	20

Samples:
	simread:1:237728409:true
	simread:1:195211965:false
	simread:1:163841413:false
	simread:1:231911906:false
	simread:1:26472783:false
	simread:1:165341382:true
	simread:1:240344442:true
	simread:1:50683371:false
	simread:1:240997787:true
	simread:1:14397233:false
	simread:1:207027738:true
	simread:1:20101800:true
	simread:1:101556378:false
	simread:1:37577445:false
	simread:1:189606653:true
	simread:1:5469106:true
	simread:1:186794283:true
	simread:1:89554252:false
	simread:1:153978724:false
	simread:1:169801933:true
- saveAsPairedFastq as single files
- don't lose any reads when piping as SAM
2020-01-24 12:04:45 WARN  OutFormatterRunner:190 - Piped command List(sleep, 10) timed out after 5 seconds.
2020-01-24 12:04:45 WARN  OutFormatterRunner:190 - Piped command List(sleep, 10) timed out after 5 seconds.
- lose all records when a command times out
- lose no records without a timeout
2020-01-24 12:05:06 WARN  OutFormatterRunner:190 - Piped command List(python, /tmp/adamTestMvnPRoviTH/spark-121632f9-6878-49fb-b2db-c73140ef710f/userFiles-220ba5d7-60e5-45f5-bd5d-06978b7d9ae8/timeout.py) timed out after 5 seconds.
2020-01-24 12:05:06 WARN  OutFormatterRunner:190 - Piped command List(python, /tmp/adamTestMvnPRoviTH/spark-121632f9-6878-49fb-b2db-c73140ef710f/userFiles-220ba5d7-60e5-45f5-bd5d-06978b7d9ae8/timeout.py) timed out after 5 seconds.
- lose some records when a command times out
- don't lose any reads when piping as SAM using java pipe
- don't lose any reads when piping as BAM
- don't lose any reads when piping fastq to sam
- can properly set environment variables inside of a pipe
- read vcf from alignment pipe !!! IGNORED !!!
- use broadcast join to pull down reads mapped to targets
- use broadcast join against to pull down reads mapped to targets
- use right outer broadcast join to pull down reads mapped to targets
- use right outer broadcast join against to pull down reads mapped to targets
- use shuffle join with feature spanning partitions
- use shuffle join to pull down reads mapped to targets
- use shuffle join with flankSize to pull down reads mapped close to targets
- use right outer shuffle join to pull down reads mapped to targets
- use left outer shuffle join to pull down reads mapped to targets
- use full outer shuffle join to pull down reads mapped to targets
- use shuffle join with group by to pull down reads mapped to targets
- use right outer shuffle join with group by to pull down reads mapped to targets
- cannot provide empty quality score bins
- cannot provide bins with a gap
- cannot provide overlapping bins
- binning quality scores in reads succeeds even if reads have no quality scores
- bin quality scores in reads
- union two read files together
- test k-mer counter
- test dataset based k-mer counter
- transform reads to slice genomic dataset
- transform reads to coverage genomic dataset
- transform reads to feature genomic dataset
- transform reads to fragment genomic dataset
- transform reads to genotype genomic dataset
loading /tmp/adamTestMvnPRoviTH/TempSuite8445296142518449013.adam as parquet into RDD...
loading /tmp/adamTestMvnPRoviTH/TempSuite2489955710542731886.adam as parquet into RDD...
- transform reads to variant genomic dataset
- cannot have a null processing step ID
- convert a processing description to htsjdk
- GenomicDataset.sort does not fail on unmapped reads
- GenomicDataset.sortLexicographically does not fail on unmapped reads
- left normalize indels
- running base recalibration with downsampling doesn't drop reads
- filter RDD bound alignments by MAPQ
- filter dataset bound alignments by MAPQ
- filter RDD bound unaligned alignments
- filter dataset bound unaligned alignments
- filter RDD bound unpaired alignments
- filter dataset bound unpaired alignments
- filter RDD bound duplicate alignments
- filter dataset bound duplicate alignments
- filter RDD bound alignments to primary alignments
- filter dataset bound alignments to primary alignments
- filter RDD bound alignments to read group
- filter dataset bound alignments to read group
- filter RDD bound alignments to read groups
- filter dataset bound alignments to read groups
- filter RDD bound alignments to sample
- filter dataset bound alignments to sample
- filter RDD bound alignments to samples
- filter dataset bound alignments to samples
- sort by read name
- transform dataset via java API
- convert alignments to reads
SmithWatermanSuite:
- gather max position from simple scoring matrix
- gather max position from irregular scoring matrix
- gather max position from irregular scoring matrix with deletions
- score simple alignment with constant gap
- score irregular scoring matrix
- score irregular scoring matrix with indel
- can unroll cigars correctly
- execute simple trackback
- execute trackback with indel
- run end to end smith waterman for simple reads
- run end to end smith waterman for short sequences with indel
- run end to end smith waterman for longer sequences with snp
- run end to end smith waterman for longer sequences with short indel
- run end to end smith waterman for shorter sequence in longer sequence
- run end to end smith waterman for shorter sequence in longer sequence, with indel
- smithWaterman - simple alignment
MdTagSuite:
- null md tag
- zero length md tag
- md tag with non-digit initial value
- md tag invalid base
- md tag, pure insertion
- md tag, pure insertion, test 2
- md tag pure insertion equality
- md tag equality and hashcode
- valid md tags
- get start of read with no mismatches or deletions
- get start of read with no mismatches, but with a deletion at the start
- get start of read with mismatches at the start
- get end of read with no mismatches or deletions
- check that mdtag and rich record return same end
- get end of read with no mismatches, but a deletion at end
- CIGAR with N operator
- CIGAR with multiple N operators
- CIGAR with P operators
- Get correct matches for mdtag with insertion
- Get correct matches for mdtag with mismatches and insertion
- Get correct matches for mdtag with insertion between mismatches
- Get correct matches for mdtag with intron between mismatches
- Get correct matches for mdtag with intron and deletion between mismatches
- Throw exception when number of deleted bases in mdtag disagrees with CIGAR
- Get correct matches for mdtag with mismatch, insertion and deletion
- Get correct matches for mdtag with mismatches, insertion and deletion
- Get correct matches for MDTag with mismatches and deletions
- Get correct matches base from MDTag and CIGAR with N
- get end of read with mismatches and a deletion at end
- get correct string out of mdtag with no mismatches
- get correct string out of mdtag with mismatches at start
- get correct string out of mdtag with deletion at end
- get correct string out of mdtag with mismatches at end
- get correct string out of complex mdtag
- check complex mdtag
- get gapped reference
- move a cigar alignment by two for a read
- rewrite alignment to all matches
- rewrite alignment to two mismatches followed by all matches
- rewrite alignment to include a deletion but otherwise all matches
- rewrite alignment to include an insertion at the start of the read but otherwise all matches
- create new md tag from read vs. reference, perfect match
- create new md tag from read vs. reference, perfect alignment match, 1 mismatch
- create new md tag from read vs. reference, alignment with deletion
- create new md tag from read vs. reference, alignment with insert
- handle '=' and 'X' operators
- CIGAR/MD tag mismatch should cause errors
GenomicDatasetSuite:
- processing a command that is the spark root directory should return an absolute path
- processing a command that is just a single word should do nothing
- processing a command should handle arguments that include spaces
- processing a command that is a single substitution should succeed
- processing a command that is multiple words should split the string
- process a command that is multiple words with a replacement
- process a command that is multiple words with multiple replacements
ParallelFileMergerSuite:
- cannot write both empty gzip block and cram eof
- buffer size must be non-negative
- get the size of several files
- block size must be positive and non-zero when trying to merge files
- must provide files to merge
- if two files are both below the block size, they should merge into one shard
- merge two files where one is greater than the block size
- merge a sharded sam file
- merge a sharded bam file
- merge a sharded cram file
- can't turn a negative index into a path
- generate a path from an index
IndelTableSuite:
- check for indels in a region with known indels
- check for indels in a contig that doesn't exist
- check for indels in a region without known indels
- build indel table from rdd of variants
SnpTableSuite:
- create an empty snp table
- create a snp table from variants on multiple contigs
- create a snp table from a larger set of variants
- perform lookups on multi-contig snp table
- perform lookups on larger snp table
RealignIndelsSuite:
- map reads to targets
- checking mapping to targets for artificial reads
- checking alternative consensus for artificial reads
- checking extraction of reference from reads
- checking realigned reads for artificial input
- checking realigned reads for artificial input with reference file
- checking realigned reads for artificial input using knowns
- checking realigned reads for artificial input using knowns and reads
- skip realigning reads if target is highly covered
- skip realignment if target is an insufficient LOD improvement
- realign reads to an insertion
- test mismatch quality scoring
- test mismatch quality scoring for no mismatches
- test mismatch quality scoring for offset
- test mismatch quality scoring with early exit
- test mismatch quality scoring after unpacking read
- we shouldn't try to realign a region with no target
- we shouldn't try to realign reads with no indel evidence
- test OP and OC tags
- realign a read with an insertion that goes off the end of the read
- if realigning a target doesn't improve the LOD, don't drop reads
- extract seq/qual from a read with no clipped bases
- extract seq/qual from a read with clipped bases at start
- extract seq/qual from a read with clipped bases at end
- if unclip is selected, don't drop base when extracting from a read with clipped bases
- get cigar and coordinates for read that spans indel, no clipped bases
- get cigar and coordinates for read that spans deletion, clipped bases at start
- get cigar and coordinates for read that falls wholly before insertion
- get cigar and coordinates for read that falls wholly after insertion
- get cigar and coordinates for read that falls wholly after deletion
- get cigar and coordinates for read that partially spans insertion, no clipped bases
- get cigar and coordinates for read that partially spans insertion, clipped bases at end
- get cigar and coordinates for read that partially spans insertion, clipped bases both ends
BaseQualityRecalibrationSuite:
- BQSR Test Input #1 w/ VCF Sites without caching
- BQSR Test Input #1 w/ VCF Sites with caching
- BQSR Test Input #1 w/ VCF Sites with serialized caching
DinucCovariateSuite:
- computing dinucleotide pairs for a single base sequence should return (N,N)
- compute dinucleotide pairs for a string of all valid bases
- compute dinucleotide pairs for a string with an N
- compute covariates for a read on the negative strand
- compute covariates for a read on the positive strand
SequenceDictionarySuite:
- Convert from sam sequence record and back
- Convert from SAM sequence dictionary file (with extra fields)
- merge into existing dictionary
- Convert from SAM sequence dictionary and back
- Can retrieve sequence by name
- SequenceDictionary's with same single element are equal
- SequenceDictionary's with same two elements are equals
- SequenceDictionary's with different elements are unequal
- SequenceDictionaries with same elements in different order are compatible
- isCompatible tests equality on overlap
- The addition + works correctly
- The append operation ++ works correctly
- ContainsRefName works correctly for different string types
- Apply on name works correctly for different String types
- convert from sam sequence record and back
- convert from sam sequence dictionary and back
- conversion to sam sequence dictionary has correct sort order
- load sequence dictionary from VCF file
- empty sequence dictionary must be empty
- test filter to reference name
- test filter to reference names
- test filter to reference name by function
GenomicPositionPartitionerSuite:
- partitions the UNMAPPED ReferencePosition into the top partition
- if we do not have a contig for a record, we throw an IAE
- partitioning into N pieces on M total sequence length, where N > M, results in M partitions
- correctly partitions a single dummy sequence into two pieces
- correctly counts cumulative lengths
- correctly partitions positions across two dummy sequences
- test that we can range partition ADAMRecords
- test that we can range partition ADAMRecords indexed by sample
- test that simple partitioning works okay on a reasonable set of ADAMRecords
- test indexed ReferencePosition partitioning works on a set of indexed ADAMRecords
CoverageSuite:
- Convert to coverage from valid Feature
- Convert to coverage from valid Feature with sampleId
- Convert to coverage from Feature with null/empty contigName fails with correct error
- Convert to coverage from Feature with no start/end position fails with correct error
- Convert to coverage from Feature with no score fails with correct error
InnerTreeRegionJoinSuite:
- Ensure same reference regions get passed together
- Overlapping reference regions
- Multiple reference regions do not throw exception
VariantDatasetSuite:
- union two variant genomic datasets together
- round trip to parquet
- save and reload from partitioned parquet
- use broadcast join to pull down variants mapped to targets
- use right outer broadcast join to pull down variants mapped to targets
- use shuffle join to pull down variants mapped to targets
- use right outer shuffle join to pull down variants mapped to targets
- use left outer shuffle join to pull down variants mapped to targets
- use full outer shuffle join to pull down variants mapped to targets
- use shuffle join with group by to pull down variants mapped to targets
- use right outer shuffle join with group by to pull down variants mapped to targets
- convert back to variant contexts
- load parquet to sql, save, re-read from avro
2020-01-24 12:06:31 WARN  DatasetBoundSliceDataset:190 - Saving directly as Parquet from SQL. Options other than compression codec are ignored.
- transform variants to slice genomic dataset
- transform variants to coverage genomic dataset
- transform variants to feature genomic dataset
- transform variants to fragment genomic dataset
- transform variants to read genomic dataset
- transform variants to genotype genomic dataset
- transform variants to variant context genomic dataset
- filter RDD bound variants to filters passed
- filter dataset bound variants to filters passed
- filter RDD bound variants by quality
- filter dataset bound variants by quality
- filter RDD bound variants by read depth
- filter dataset bound variants by read depth
- filter RDD bound variants by reference read depth
- filter dataset bound variants by reference read depth
- filter RDD bound single nucleotide variants
- filter dataset bound single nucleotide variants
- filter RDD bound multiple nucleotide variants
- filter dataset bound multiple nucleotide variants
- filter RDD bound indel variants
- filter dataset bound indel variants
- filter RDD bound variants to single nucleotide variants
- filter dataset bound variants to single nucleotide variants
- filter RDD bound variants to multiple nucleotide variants
- filter dataset bound variants to multiple nucleotide variants
- filter RDD bound variants to indel variants
- filter dataset bound variants to indel variants
- transform dataset via java API
Run completed in 6 minutes, 20 seconds.
Total number of tests run: 1175
Suites: completed 68, aborted 0
Tests: succeeded 1175, failed 0, canceled 0, ignored 5, pending 0
All tests passed.
[INFO] 
[INFO] >>> scoverage-maven-plugin:1.1.1:report (default-cli) > [scoverage]test @ adam-core-spark2_2.11 >>>
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-versions) @ adam-core-spark2_2.11 ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ adam-core-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-source (add-source) @ adam-core-spark2_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/main/scala added.
[INFO] Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/generated-sources/src/main/scala added.
[INFO] 
[INFO] --- exec-maven-plugin:1.5.0:java (generate-scala-products) @ adam-core-spark2_2.11 ---
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
[INFO] 
[INFO] --- exec-maven-plugin:1.5.0:java (generate-scala-projection-fields) @ adam-core-spark2_2.11 ---
SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
SLF4J: Defaulting to no-operation (NOP) logger implementation
SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ adam-core-spark2_2.11 ---
[INFO] Modified 2 of 203 .scala files
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:pre-compile (default-cli) @ adam-core-spark2_2.11 ---
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:resources (default-resources) @ adam-core-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/main/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ adam-core-spark2_2.11 ---
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/main/java:-1: info: compiling
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/main/scala:-1: info: compiling
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/generated-sources/src/main/scala:-1: info: compiling
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/generated-sources/annotations:-1: info: compiling
[INFO] Compiling 138 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/2.11.12/scoverage-classes at 1579896412952
[WARNING] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/main/scala/org/bdgenomics/adam/rdd/GenomicDataset.scala:3099: warning: no valid targets for annotation on value uTag - it is discarded unused. You may specify targets with meta-annotations, e.g. @(transient @getter)
[WARNING]   @transient val uTag: TypeTag[U]
[WARNING]    ^
[INFO] [info] Cleaning datadir [/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/scoverage-data]
[INFO] [info] Beginning coverage instrumentation
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[WARNING] [warn] Could not instrument [EmptyTree$/null]. No pos.
[INFO] [info] Instrumentation completed [17311 statements]
[INFO] [info] Wrote instrumentation file [/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/scoverage-data/scoverage.coverage.xml]
[INFO] [info] Will write measurement data to [/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/scoverage-data]
[WARNING] warning: there were 87 deprecation warnings; re-run with -deprecation for details
[WARNING] warning: there were 5 feature warnings; re-run with -feature for details
[WARNING] three warnings found
[INFO] prepare-compile in 0 s
[INFO] compile in 35 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:compile (default-compile) @ adam-core-spark2_2.11 ---
[INFO] Changes detected - recompiling the module!
[INFO] Compiling 5 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/2.11.12/scoverage-classes
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:post-compile (default-cli) @ adam-core-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-test-source (add-test-source) @ adam-core-spark2_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/src/test/scala added.
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:testResources (default-testResources) @ adam-core-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 152 resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ adam-core-spark2_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:testCompile (default-testCompile) @ adam-core-spark2_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- maven-surefire-plugin:3.0.0-M3:test (default-test) @ adam-core-spark2_2.11 ---
[INFO] Tests are skipped.
[INFO] 
[INFO] --- scalatest-maven-plugin:2.0.0:test (test) @ adam-core-spark2_2.11 ---
Discovery starting.
Discovery completed in 2 seconds, 21 milliseconds.
Run starting. Expected test count is: 1175
RepairPartitionsSuite:
- don't pull from the first partition
- properly handle pulling from an empty iterator
- gets a single read from the partition if there are no other reads in the pair
- gets all the reads from a pair from the start of a partition
- properly handle dropping from an empty iterator
- don't drop from the first partition
- drop a single read from the partition if there are no other reads in the pair
- drops all the reads from a pair from the start of a partition
- only append to the first partition
- drop a single read from the partition and append read when in the middle
- drop reads from the start and don't append when in the last partition
- can't have more records than number of partitions
- unroll array for broadcast
2020-01-24 12:07:32 WARN  Utils:66 - Your hostname, research-jenkins-worker-08 resolves to a loopback address: 127.0.1.1; using 192.168.10.28 instead (on interface eth0)
2020-01-24 12:07:32 WARN  Utils:66 - Set SPARK_LOCAL_IP if you need to bind to another address
2020-01-24 12:07:32 WARN  NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
- move pairs around an rdd
FlagStatSuite:
- Standard FlagStat test
ModPartitionerSuite:
- report number of partitions correctly
- partition a number that is lower than the number of partitions and positive
- partition a number that is greater than the number of partitions and positive
- partition a number that is lower than the number of partitions and negative
- partition a number that is greater than the number of partitions and negative
- fire an exception if input is not an integer
TwoBitFileSuite:
- correctly read sequence from .2bit file
- correctly return masked sequences from .2bit file
- correctly return Ns from .2bit file
- correctly calculates sequence dictionary
NonoverlappingRegionsSuite:
- alternating returns an alternating seq of items
- Single region returns itself
- Two adjacent regions will be merged
- Nonoverlapping regions will all be returned
- Many overlapping regions will all be merged
- ADAMRecords return proper references
ReferencePositionSuite:
- create reference position from mapped read
- create reference position from variant
- create reference position from genotype
- create reference position from variant starting at vcf 0
- create reference position from genotype starting at vcf 0
ConsensusGeneratorFromReadsSuite:
- checking search for consensus list for artificial reads
FragmentConverterSuite:
- build a fragment collector and convert to a read
- if a fragment isn't associated with a contig, don't get a fragment collector
- convert an rdd of discontinuous fragments, all from the same contig
- convert an rdd of contiguous fragments, all from the same contig
- convert an rdd of varied fragments from multiple contigs
FileMergerSuite:
- cannot write both empty gzip block and cram eof
- buffer size must be non-negative
RichAlignmentSuite:
- Unclipped Start
- Unclipped End
- tags contains optional fields
- read overlap unmapped read
- read overlap reference position
- read overlap same position different contig
SingleReadBucketSuite:
- convert unmapped pair to fragment
- convert proper pair to fragment
- convert read pair to fragment with first of pair chimeric read
RecalibrationTableSuite:
- look up quality scores in table
FlankSlicesSuite:
- don't put flanks on non-adjacent slices
- put flanks on adjacent slices
InterleavedFastqInputFormatSuite:
- interleaved FASTQ hadoop reader: interleaved_fastq_sample1.ifq->interleaved_fastq_sample1.ifq.output
- interleaved FASTQ hadoop reader: interleaved_fastq_sample2.ifq->interleaved_fastq_sample2.ifq.output
- interleaved FASTQ hadoop reader: interleaved_fastq_sample3.ifq->interleaved_fastq_sample3.ifq.output
- interleaved FASTQ hadoop reader: interleaved_fastq_sample4.ifq->interleaved_fastq_sample4.ifq.output
- interleaved FASTQ hadoop reader: interleaved_fastq_sample5.ifq->interleaved_fastq_sample5.ifq.output
TreeRegionJoinSuite:
- run a join between data on a single contig
ReferenceRegionSuite:
- cannot create an empty predicate
- parse empty string throws IllegalArgumentException
- parse contigName only string into reference regions
- parse to end strings into reference regions
- parse string into reference regions
- contains(: ReferenceRegion)
- contains(: ReferencePosition)
- merge
- overlaps and covers
- overlapsBy and coversBy
- distance(: ReferenceRegion)
- distance(: ReferencePosition)
- unstrandedDistance
- subtract fails on non-overlapping and non-covering regions
- subtract
- create region from unmapped read fails
- create region from read with null alignment positions fails
- create stranded region from unmapped read fails
- create stranded region from read with null alignment positions fails
- create stranded region from read with null alignment strand fails
- create stranded region from read on forward strand
- create stranded region from read on reverse strand
- create region from mapped read contains read start and end
- validate that adjacent regions can be merged
- validate that non-adjacent regions cannot be merged
- validate that nearby regions can be merged
- validate that non-nearby regions cannot be merged
- compute convex hull of two sets
- region name is sanitized when creating region from read
- intersection fails on non-overlapping regions
- intersection fails when minOverlap is not met
- compute intersection
- overlap tests for oriented reference region
- check the width of a reference region
- make a reference region for a variant or genotype
- make a reference region for a variant or genotype starting at VCF 0
- uniformly pad a reference region
- uniformly pad a reference region into negative coordinate space, ensure non negative start
- unevenly pad a reference region
- unevenly pad a reference region into negative coordinate space, ensure non negative start
- can build an open ended reference region
- can build an open ended reference region with strand
- can build a reference region with an open start position
- can build a reference region with an open start position with strand
- can build a reference region that covers the entirety of a contig
- can build a reference region that covers the entirety of a contig with strand
- convert a genotype and then get the reference region
- create region from feature with null alignment positions fails
- create stranded region from feature with null alignment positions fails
- create stranded region from feature with null alignment strand fails
- create stranded region from feature on forward strand
- create stranded region from feature on reverse strand
CoverageDatasetSuite:
- correctly saves coverage
- can read a bed file to coverage
- correctly filters coverage with predicate
- keeps sample metadata
- can read a bed file with multiple samples to coverage
- correctly flatmaps coverage without aggregated bins
- correctly flatmaps coverage with aggregated bins
- collapses coverage records in one partition
- approximately collapses coverage records in multiple partitions
2020-01-24 12:07:50 WARN  DatasetBoundSliceDataset:190 - Saving directly as Parquet from SQL. Options other than compression codec are ignored.
- transform coverage to slice genomic dataset
- transform coverage to feature genomic dataset
2020-01-24 12:07:52 WARN  Utils:66 - Truncated the string representation of a plan since it was too large. This behavior can be adjusted by setting 'spark.debug.maxToStringFields' in SparkEnv.conf.
- transform coverage to fragment genomic dataset
- transform coverage to read genomic dataset
- transform coverage to genotype genomic dataset
- transform coverage to variant genomic dataset
- transform coverage to variant context genomic dataset
- copy coverage rdd
- copy coverage dataset
- copy coverage rdd without sequence dictionary
- copy coverage dataset without sequence dictionary
- transform dataset via java API
RecalibratorSuite:
- don't replace quality if quality was null
- if no covariates, return alignment
- skip recalibration if base is below quality threshold
- recalibrate changed bases above quality threshold
SliceDatasetSuite:
- create a new slice genomic dataset
- create a new slice genomic dataset with sequence dictionary
- merge slices into a sequence genomic dataset
- save as parquet
- round trip as parquet
- save as fasta
- save as single file fasta
- convert slices to reads
- convert slices to sequences
GenotypeDatasetSuite:
- union two genotype genomic datasets together
- round trip to parquet
- round trip to partitioned parquet
- use broadcast join to pull down genotypes mapped to targets
- use right outer broadcast join to pull down genotypes mapped to targets
- use shuffle join to pull down genotypes mapped to targets
- use right outer shuffle join to pull down genotypes mapped to targets
- use left outer shuffle join to pull down genotypes mapped to targets
- use full outer shuffle join to pull down genotypes mapped to targets
- use shuffle join with group by to pull down genotypes mapped to targets
- use right outer shuffle join with group by to pull down genotypes mapped to targets
- convert back to variant contexts
- load parquet to sql, save, re-read from avro
2020-01-24 12:08:17 WARN  DatasetBoundSliceDataset:190 - Saving directly as Parquet from SQL. Options other than compression codec are ignored.
- transform genotypes to slice genomic dataset
- transform genotypes to coverage genomic dataset
- transform genotypes to feature genomic dataset
- transform genotypes to fragment genomic dataset
- transform genotypes to read genomic dataset
- transform genotypes to variant genomic dataset
- transform genotypes to variant context genomic dataset
- loading genotypes then converting to variants yields same output as loading variants
- filter RDD bound genotypes to genotype filters passed
- filter dataset bound genotypes to genotype filters passed
- filter RDD bound genotypes by genotype quality
- filter dataset bound genotypes by genotype quality
- filter RDD bound genotypes by read depth
- filter dataset bound genotypes by read depth
- filter RDD bound genotypes by alternate read depth
- filter dataset bound genotypes by alternate read depth
- filter RDD bound genotypes by reference read depth
- filter dataset bound genotypes by reference read depth
- filter RDD bound genotypes by sample
- filter dataset bound genotypes by sample
- filter RDD bound genotypes by samples
- filter dataset bound genotypes by samples
- filter RDD bound no call genotypes
- filter dataset no call genotypes
- round trip gVCF END attribute without nested variant annotations rdd bound
- round trip gVCF END attribute without nested variant annotations dataset bound
- round trip gVCF END attribute with nested variant annotations rdd bound
- round trip gVCF END attribute with nested variant annotations dataset bound
- transform dataset via java API
FragmentDatasetSuite:
- don't lose any reads when piping interleaved fastq to sam
- don't lose any reads when piping tab5 to sam
- don't lose any reads when piping tab6 to sam
- use broadcast join to pull down fragments mapped to targets
- use right outer broadcast join to pull down fragments mapped to targets
- use shuffle join to pull down fragments mapped to targets
- use right outer shuffle join to pull down fragments mapped to targets
- use left outer shuffle join to pull down fragments mapped to targets
- use full outer shuffle join to pull down fragments mapped to targets
- use shuffle join with group by to pull down fragments mapped to targets
- use right outer shuffle join with group by to pull down fragments mapped to targets
- bin quality scores in fragments
- union two genomic datasets of fragments together
- load parquet to sql, save, re-read from avro
- transform fragments to slice genomic dataset
- transform fragments to coverage genomic dataset
- transform fragments to feature genomic dataset
- transform fragments to read genomic dataset
- transform fragments to genotype genomic dataset
- transform fragments to variant genomic dataset
- transform fragments to variant context genomic dataset
- paired read names with index sequences in read names can group into fragments
- interleaved paired read names with index sequences in read names can group into fragments
- interleaved paired read names with index sequences in read names as fragments
- transform dataset via java API
- dataset and rdd conversion to alignments are equivalent
SingleFastqInputFormatSuite:
- FASTQ hadoop reader: fastq_sample1.fq->single_fastq_sample1.fq.output
- FASTQ hadoop reader: fastq_sample2.fq->single_fastq_sample2.fq.output
- FASTQ hadoop reader: fastq_sample3.fq->single_fastq_sample3.fq.output
- FASTQ hadoop reader: fastq_sample4.fq->single_fastq_sample4.fq.output
VariantContextDatasetSuite:
- load a gvcf with a missing info field set to .
- union two variant context genomic datasets together
- can write, then read in .vcf file
- can write as a single file via simple saveAsVcf method, then read in .vcf file
- can write as a single file via full saveAsVcf method, then read in .vcf file
- transform a vcf file with bad header
- read a vcf file with multi-allelic variants to split
- support VCFs with +Inf/-Inf float values
- support VCFs with `nan` instead of `NaN` float values
- don't lose any variants when piping as VCF
- pipe works with empty partitions
- don't lose any non-default VCF header lines or attributes when piping as VCF
- save a file sorted by contig index
- save a lexicographically sorted file
- save a multiallelic gvcf !!! IGNORED !!!
- test metadata
- save sharded bgzip vcf
- save bgzip vcf as single file
- can't save file with non-vcf extension
- transform variant contexts to slice genomic dataset
- transform variant contexts to coverage genomic dataset
- transform variant contexts to feature genomic dataset
- transform variant contexts to fragment genomic dataset
- transform variant contexts to read genomic dataset
- transform variant contexts to genotype genomic dataset
- transform variant contexts to variant genomic dataset
- save and reload from partitioned parquet
- transform dataset via java API
ConsensusSuite:
- test the insertion of a consensus insertion into a reference
- test the insertion of a consensus deletion into a reference
- inserting empty consensus returns the reference
SequenceDatasetSuite:
- create a new sequence genomic dataset
- create a new sequence genomic dataset with sequence dictionary
- save as parquet
- round trip as parquet
- save as fasta
- save as single file fasta
- convert sequences to reads
- convert sequences to slices
- slice sequences to a maximum length
- slice sequences shorter than maximum length
- filter sequences by overlapping region
- filter sequences failing to overlap region
- filter sequences by overlapping regions
- filter sequences failing to overlap regions
- slice sequences overlapping a smaller region
- slice sequences overlapping a larger region
- slice sequences failing to overlap a region
- slice sequences overlapping smaller regions
- slice sequences overlapping larger regions
- slice sequences failing to overlap regions
AttributeUtilsSuite:
- parseTags returns a reasonable set of tagStrings
- parseTags works with NumericSequence tagType
- empty string is parsed as zero tagStrings
- incorrectly formatted tag throws an exception
- string tag with a ':' in it is correctly parsed
- oq string tag with many ':' in it is correctly parsed
- oq string tag with a ',' in it is correctly parsed
- if a tag is an array but doesn't define it's format, throw
MarkDuplicatesSuite:
- single read
- reads at different positions
- reads at the same position
- reads at the same position with clipping
- reads on reverse strand
- unmapped reads
- read pairs
- read pairs with fragments
- quality scores
- read pairs that cross chromosomes
- single fragment
- fragments at different positions
- fragments at the same position
- fragments at the same position with clipping
- fragments on reverse strand
- unmapped fragments
- read pairs as fragments
- read pairs with fragments as fragments
- chimeric fragments
- inverse pairs
- supplemental reads
TranscriptEffectConverterSuite:
- parse empty transcript effect
- parse empty transcript effect strict validation stringency
- parse invalid transcript effect
- parse invalid transcript effect strict validation stringency
- parse transcript effect
- parse empty list VCF ANN attribute
- parse empty list VCF ANN attribute strict validation stringency
- parse empty string VCF ANN attribute
- parse empty string VCF ANN attribute strict validation stringency
- parse invalid VCF ANN attribute
- parse invalid VCF ANN attribute strict validation stringency
- parse VCF ANN attribute with one transcript effect
- parse VCF ANN attribute with multiple transcript effects
- convert to transcript effect from null VCF ANN attribute in variant context
- convert to transcript effect from empty list VCF ANN attribute in variant context
- convert to transcript effect from empty list VCF ANN attribute in variant context strict validation stringency
- convert to transcript effect from empty string VCF ANN attribute in variant context
- convert to transcript effect from empty string VCF ANN attribute in variant context strict validation stringency
- convert to transcript effect from invalid VCF ANN attribute in variant context
- convert to transcript effect from invalid VCF ANN attribute in variant context strict validation stringency
2020-01-24 12:09:35 WARN  TranscriptEffectConverter$:190 - Could not convert VCF INFO reserved key ANN value to TranscriptEffect, caught java.lang.NumberFormatException: For input string: "not a number".
- convert to transcript effect from VCF ANN attribute with invalid number in variant context lenient validation stringency
2020-01-24 12:09:35 WARN  TranscriptEffectConverter$:190 - Could not convert VCF INFO reserved key ANN value to TranscriptEffect, caught java.lang.NumberFormatException: For input string: "not a number".
- convert to transcript effect from VCF ANN attribute with invalid fraction in variant context lenient validation stringency
- convert to transcript effect from VCF ANN attribute with invalid number in variant context strict validation stringency
- convert to transcript effect from VCF ANN attribute with invalid fraction in variant context strict validation stringency
- convert to transcript effect from VCF ANN attribute in variant context different alt allele
- convert to transcript effect from VCF ANN attribute in variant context same alt allele
- convert to transcript effect from VCF ANN attribute in variant context multiple effects same alt allele
- convert transcript effect to VCF ANN attribute value
- convert transcript effect with null fields to VCF ANN attribute value
2020-01-24 12:09:35 WARN  TranscriptEffectConverter$:190 - Incorrect fractional value ?/2, missing numerator
- convert transcript effect with incorrect fractional value to VCF ANN attribute value
IndelRealignmentTargetSuite:
- checking simple realignment target
- creating simple target from read with deletion
- creating simple target from read with insertion
- joining simple realignment targets on same chr
- joining simple realignment targets on different chr throws exception
- creating targets from three intersecting reads, same indel
- creating targets from three intersecting reads, two different indel
- creating targets from two disjoint reads
- creating targets for artificial reads: one-by-one
- creating targets for artificial reads: all-at-once (merged)
- creating indel targets for mason reads
- additional indel targets taken from consensus generator
NormalizationUtilsSuite:
- cannot move an indel left if there are no bases to it's left
- move a simple indel to farthest position left until bases run out
- move a simple indel to farthest position left, past length of indel
- cannot move a left normalized indel in a short tandem repeat
- move an indel in a short tandem repeat
- move an indel in a short tandem repeat of more than 2 bases, where shift is not an integer multiple of repeated sequence length
- moving a simple read with single deletion that cannot shift
- shift an indel left by 0 in a cigar
- shift an indel left by 1 in a cigar
- do not left align a complex read which is already left aligned
ReadGroupDictionarySuite:
- simple conversion to and from sam read group
- sample name must be set
- simple equality checks
- get samples from read group dictionary
- empty read group is empty
- merging a dictionary with itself should work
- round trip a record with all attributes set
RightOuterTreeRegionJoinSuite:
- Ensure same reference regions get passed together
- Overlapping reference regions
InnerShuffleRegionJoinSuite:
- Overlapping reference regions
- Multiple reference regions do not throw exception
FeatureDatasetSuite:
- round trip GTF format
- save GTF as GFF3 format
- save GTF as BED format
- save GTF as IntervalList format
- save GTF as NarrowPeak format
- save GFF3 as GTF format
- save GFF3 as BED format
- save GFF3 as IntervalList format
- save GFF3 as NarrowPeak format
- round trip GFF3 format
- ignore FASTA sequence in GFF3 file
- save BED as GTF format
- save BED as GFF3 format
- save BED as IntervalList format
- save BED as NarrowPeak format
- round trip BED6 format
- keeps sample metadata
- round trip BED12 format
- save to UCSC BED format
- save IntervalList as GTF format
- save IntervalList as GFF3 format
- save IntervalList as BED format
- save IntervalList as IntervalList format
- save IntervalList as NarrowPeak format
- round trip IntervalList format
- save NarrowPeak as GTF format
- save NarrowPeak as GFF3 format
- save NarrowPeak as BED format
- save NarrowPeak as IntervalList format
- save NarrowPeak as NarrowPeak format
- round trip NarrowPeak format
- sort by reference
- sort by reference and feature fields
- sort gene features by reference and gene structure
- sort transcript features by reference and gene structure
- sort exon features by reference and gene structure
- sort intron features by reference and gene structure
- correctly flatmaps CoverageDataset from FeatureDataset
- use broadcast join to pull down features mapped to targets
- use right outer broadcast join to pull down features mapped to targets
- use shuffle join to pull down features mapped to targets
- use right outer shuffle join to pull down features mapped to targets
- use left outer shuffle join to pull down features mapped to targets
- use full outer shuffle join to pull down features mapped to targets
- use shuffle join with group by to pull down features mapped to targets
- use right outer shuffle join with group by to pull down features mapped to targets
- union two feature rdds together
- obtain sequence dictionary contig lengths from header in IntervalList format
- don't lose any features when piping as BED format
- don't lose any features when piping as GTF format
- don't lose any features when piping as GFF3 format
- don't lose any features when piping as NarrowPeak format
- load parquet to sql, save, re-read from avro
- load partitioned parquet to sql, save, re-read from avro
2020-01-24 12:10:06 WARN  DatasetBoundSliceDataset:190 - Saving directly as Parquet from SQL. Options other than compression codec are ignored.
- transform features to slice genomic dataset
- transform features to coverage genomic dataset
- transform features to fragment genomic dataset
- transform features to read genomic dataset
- transform features to genotype genomic dataset
- transform features to variant genomic dataset
- transform features to variant context genomic dataset
- filter RDD bound features by feature type
- filter dataset bound features by feature type
- filter RDD bound features by feature types
- filter dataset bound features by feature types
- filter RDD bound features by gene
- filter dataset bound features by gene
- filter RDD bound features by genes
- filter dataset bound features by genes
- filter RDD bound features by transcript
- filter dataset bound features by transcript
- filter RDD bound features by transcripts
- filter dataset bound features by transcripts
- filter RDD bound features by exon
- filter dataset bound features by exon
- filter RDD bound features by exons
- filter dataset bound features by exons
- filter RDD bound features by score
- filter dataset bound features by score
- filter RDD bound features by parent
- filter dataset bound features by parent
- filter RDD bound features by parents
- filter dataset bound features by parents
- filter RDD bound features by attribute
- filter dataset bound features by attribute
- transform dataset via java API
AlphabetSuite:
- test size of a case-sensitive alphabet
- test apply of a case-sensitive alphabet
- test reverse complement of a case-sensitive alphabet
- test exact reverse complement of a case-sensitive alphabet
- test size of a case-insensitive alphabet
- test apply of a case-insensitive alphabet
- test reverse complement of a case-insensitive alphabet
- test exact reverse complement of a case-insensitive alphabet
- DNA alphabet
- map unknown bases to N
SortedGenomicDatasetSuite:
- testing that partition and sort provide correct outputs
- testing copartition maintains or adds sort
- testing that we don't drop any data on the right side even though it doesn't map to a partition on the left
- testing that sorted shuffleRegionJoin matches unsorted
- testing that sorted fullOuterShuffleRegionJoin matches unsorted
- testing that sorted rightOuterShuffleRegionJoin matches unsorted
- testing that sorted leftOuterShuffleRegionJoin matches unsorted
- testing that we can persist the sorted knowledge
VariantContextConverterSuite:
- Convert htsjdk site-only SNV to ADAM
- Convert somatic htsjdk site-only SNV to ADAM
- Convert htsjdk site-only CNV to ADAM
- Convert htsjdk SNV w/ genotypes w/ phase information to ADAM
- Convert htsjdk SNV with different variant filters to ADAM
- Convert htsjdk SNV with different genotype filters to ADAM
- Convert ADAM site-only SNV to htsjdk
- Convert ADAM SNV w/ genotypes to htsjdk
- Convert ADAM SNV w/ genotypes but bad SB to htsjdk with strict validation
2020-01-24 12:10:23 WARN  VariantContextConverter:2300 - Applying annotation extraction function <function2> to {"variant": {"referenceName": "1", "start": 0, "end": 1, "names": [], "splitFromMultiAllelic": false, "referenceAllele": "A", "alternateAllele": "T", "quality": null, "filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "annotation": null}, "referenceName": null, "start": null, "end": null, "variantCallingAnnotations": {"filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "downsampled": null, "baseQRankSum": null, "fisherStrandBiasPValue": 3.0, "rmsMapQ": 0.0, "mapq0Reads": 5, "mqRankSum": null, "readPositionRankSum": null, "genotypePriors": [], "genotypePosteriors": [], "vqslod": null, "culprit": null, "attributes": {}}, "sampleId": "NA12878", "sampleDescription": null, "processingDescription": null, "alleles": ["REF", "ALT"], "expectedAlleleDosage": null, "referenceReadDepth": null, "alternateReadDepth": null, "readDepth": null, "minReadDepth": null, "genotypeQuality": null, "genotypeLikelihoods": [], "nonReferenceLikelihoods": [], "strandBiasComponents": [0, 2], "splitFromMultiAllelic": false, "phased": false, "phaseSetId": null, "phaseQuality": null} failed with java.lang.IllegalArgumentException: requirement failed: Illegal strand bias components length. Must be empty or 4. In:
{"variant": {"referenceName": "1", "start": 0, "end": 1, "names": [], "splitFromMultiAllelic": false, "referenceAllele": "A", "alternateAllele": "T", "quality": null, "filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "annotation": null}, "referenceName": null, "start": null, "end": null, "variantCallingAnnotations": {"filtersApplied": null, "filtersPassed": null, "filtersFailed": [], "downsampled": null, "baseQRankSum": null, "fisherStrandBiasPValue": 3.0, "rmsMapQ": 0.0, "mapq0Reads": 5, "mqRankSum": null, "readPositionRankSum": null, "genotypePriors": [], "genotypePosteriors": [], "vqslod": null, "culprit": null, "attributes": {}}, "sampleId": "NA12878", "sampleDescription": null, "processingDescription": null, "alleles": ["REF", "ALT"], "expectedAlleleDosage": null, "referenceReadDepth": null, "alternateReadDepth": null, "readDepth": null, "minReadDepth": null, "genotypeQuality": null, "genotypeLikelihoods": [], "nonReferenceLikelihoods": [], "strandBiasComponents": [0, 2], "splitFromMultiAllelic": false, "phased": false, "phaseSetId": null, "phaseQuality": null}.
- Convert ADAM SNV w/ genotypes but bad SB to htsjdk with lenient validation
- Convert htsjdk multi-allelic sites-only SNVs to ADAM
- Convert htsjdk multi-allelic SNVs to ADAM and back to htsjdk
- Convert gVCF reference records to ADAM
- Convert htsjdk variant context with no IDs to ADAM
- Convert htsjdk variant context with one ID to ADAM
- Convert htsjdk variant context with multiple IDs to ADAM
- Convert ADAM variant context with no names to htsjdk
- Convert ADAM variant context with one name to htsjdk
- Convert ADAM variant context with multiple names to htsjdk
- Convert ADAM variant context with null filters applied to htsjdk
- Convert ADAM variant context with no filters applied to htsjdk
- Convert ADAM variant context with passing filters to htsjdk
- Convert ADAM variant context with failing filters to htsjdk
- no phasing set going htsjdk->adam
- phased but no phase set info going htsjdk->adam
- set phase set and extract going htsjdk->adam
- no allelic depth going htsjdk->adam
- set allelic depth going htsjdk->adam
- no gt read depth going htsjdk->adam
- extract gt read depth going htsjdk->adam
- no min gt read depth going htsjdk->adam
- extract min gt read depth going htsjdk->adam
- no genotype quality going htsjdk->adam
- extract genotype quality going htsjdk->adam
- no phred likelihood going htsjdk->adam
- extract phred likelihoods going htsjdk->adam
- no strand bias info going htsjdk->adam
- extract strand bias info going htsjdk->adam
- no filters going htsjdk->adam
- filters passed going htsjdk->adam
- extract single filter going htsjdk->adam
- extract multiple filters going htsjdk->adam
- no fisher strand bias going htsjdk->adam
- extract fisher strand bias going htsjdk->adam
- no rms mapping quality going htsjdk->adam
- extract rms mapping quality going htsjdk->adam
- no mq0 going htsjdk->adam
- extract mq0 going htsjdk->adam
- no gt read depth going adam->htsjdk
- extract gt read depth going adam->htsjdk
- throw iae if missing one component of gt read depth going adam->htsjdk
- no depth going adam->htsjdk
- extract depth going adam->htsjdk
- no min depth going adam->htsjdk
- extract min depth going adam->htsjdk
- no quality going adam->htsjdk
- extract quality going adam->htsjdk
- no genotype likelihoods going adam->htsjdk
- extract genotype likelihoods going adam->htsjdk
- no strand bias going adam->htsjdk
- malformed strand bias going adam->htsjdk
- extract strand bias going adam->htsjdk
- no phasing info going adam->htsjdk
- unphased going adam->htsjdk
- phased but no ps/pq going adam->htsjdk
- phased but no pq going adam->htsjdk
- phased but no ps going adam->htsjdk
- phased going adam->htsjdk
- no filter info going adam->htsjdk
- if filters applied, must set passed/failed going adam->htsjdk
- filters passed going adam->htsjdk
- if filters failed, must set filters failed going adam->htsjdk
- single filter failed going adam->htsjdk
- multiple filters failed going adam->htsjdk
- no fisher strand bias going adam->htsjdk
- extract fisher strand bias going adam->htsjdk
- no rms mapping quality going adam->htsjdk
- extract rms mapping quality going adam->htsjdk
- no mapping quality 0 reads going adam->htsjdk
- extract mapping quality 0 reads going adam->htsjdk
- no names set going htsjdk->adam
- single name set going htsjdk->adam
- multiple names set going htsjdk->adam
- no quality going htsjdk->adam
- quality set going htsjdk->adam
- no filters applied going htsjdk->adam
- filters applied and passed going htsjdk->adam
- single filter applied and failed going htsjdk->adam
- multiple filters applied and failed going htsjdk->adam
- no names set adam->htsjdk
- set a single name adam->htsjdk
- set multiple names adam->htsjdk
- no qual set adam->htsjdk
- qual is set adam->htsjdk
- no filters applied adam->htsjdk
- null filters applied adam->htsjdk
- filters passed adam->htsjdk
- if filter failed, must have filters adam->htsjdk
- single filter failed adam->htsjdk
- multiple filters failed adam->htsjdk
- no ancestral allele set going htsjdk->adam
- ancestral allele set going htsjdk->adam
- no dbsnp membership set going htsjdk->adam
- dbsnp membership set going htsjdk->adam
- no hapmap2 membership set going htsjdk->adam
- hapmap2 membership set going htsjdk->adam
- no hapmap3 membership set going htsjdk->adam
- hapmap3 membership set going htsjdk->adam
- no validated set going htsjdk->adam
- validated set going htsjdk->adam
- no 1000G membership set going htsjdk->adam
- 1000G membership set going htsjdk->adam
- not somatic going htsjdk->adam
- somatic going htsjdk->adam
- no allele count going htsjdk->adam
- single allele count going htsjdk->adam
- multiple allele counts going htsjdk->adam
- no allele frequency going htsjdk->adam
- single allele frequency going htsjdk->adam
- single allele frequency is +Inf going htsjdk->adam
- single allele frequency is -Inf going htsjdk->adam
- multiple allele frequencies going htsjdk->adam
- no CIGAR going htsjdk->adam
- single CIGAR going htsjdk->adam
- multiple CIGARs going htsjdk->adam
- no read depth going htsjdk->adam
- single read depth going htsjdk->adam
- multiple read depths going htsjdk->adam
- no forward read depth going htsjdk->adam
- single forward read depth going htsjdk->adam
- multiple forward read depths going htsjdk->adam
- no reverse read depth going htsjdk->adam
- single reverse read depth going htsjdk->adam
- multiple reverse read depths going htsjdk->adam
- no ancestral allele set adam->htsjdk
- ancestral allele set adam->htsjdk
- no dbsnp membership set adam->htsjdk
- dbsnp membership set adam->htsjdk
- no hapmap2 membership set adam->htsjdk
- hapmap2 membership set adam->htsjdk
- no hapmap3 membership set adam->htsjdk
- hapmap3 membership set adam->htsjdk
- no validated set adam->htsjdk
- validated set adam->htsjdk
- no 1000G membership set adam->htsjdk
- 1000G membership set adam->htsjdk
- no allele count set adam->htsjdk
- allele count set adam->htsjdk
- no allele frequency set adam->htsjdk
- allele frequency set adam->htsjdk
- no cigar set adam->htsjdk
- cigar set adam->htsjdk
- no read depth set adam->htsjdk
- read depth set adam->htsjdk
- read depth without reference read depth
- reference read depth without read depth
- no forward read depth set adam->htsjdk
- forward read depth set adam->htsjdk
- reference forward read depth without forward read depth
- forward read depth without reference forward read depth
- no reverse read depth set adam->htsjdk
- reverse read depth set adam->htsjdk
- reference reverse read depth without reverse read depth
- reverse read depth without reference reverse read depth
- VCF INFO attribute Number=0 Type=Flag adam->htsjdk
- VCF INFO attribute Number=4 Type=Flag adam->htsjdk unsupported, strict !!! IGNORED !!!
- VCF INFO attribute Number=1 Type=Integer adam->htsjdk
- VCF INFO attribute Number=4 Type=Integer adam->htsjdk
- VCF INFO attribute Number=A Type=Integer adam->htsjdk
- VCF INFO attribute Number=R Type=Integer adam->htsjdk
- VCF INFO attribute Number=R Type=String adam->htsjdk
- VCF INFO attribute Number=G Type=String adam->htsjdk not supported
- VCF INFO attribute Number=0 Type=Flag htsjdk->adam
- VCF INFO attribute Number=1 Type=Integer htsjdk->adam
- VCF INFO attribute Number=4 Type=Integer htsjdk->adam
- VCF INFO attribute Number=4 Type=Float htsjdk->adam
- VCF INFO attribute Number=A Type=Integer htsjdk->adam
- VCF INFO attribute Number=R Type=Integer htsjdk->adam
- VCF INFO attribute Number=R Type=String htsjdk->adam
- VCF INFO attribute Number=G Type=String htsjdk->adam not supported
- VCF FORMAT attribute Number=0 Type=Flag adam->htsjdk not supported
- VCF FORMAT attribute Number=1 Type=Integer adam->htsjdk
- VCF FORMAT attribute Number=4 Type=Integer adam->htsjdk
- VCF FORMAT attribute Number=A Type=Integer adam->htsjdk
- VCF FORMAT attribute Number=R Type=Integer adam->htsjdk
- VCF FORMAT attribute Number=R Type=String adam->htsjdk
- VCF FORMAT attribute Number=0 Type=Flag htsjdk->adam is not supported
- VCF FORMAT attribute Number=1 Type=Integer htsjdk->adam
- VCF FORMAT attribute Number=4 Type=Integer htsjdk->adam
- VCF FORMAT attribute Number=4 Type=Float htsjdk->adam
- VCF FORMAT attribute Number=A Type=Integer htsjdk->adam
- VCF FORMAT attribute Number=R Type=Integer htsjdk->adam
- VCF FORMAT attribute Number=R Type=String htsjdk->adam
- VCF FORMAT attribute Number=G Type=String htsjdk->adam
- respect end position for symbolic alts
FastqRecordConverterSuite:
- test read name suffix and index of pair must match
- test parseReadInFastq, read suffix removal
- test parseReadInFastq, read metadata removal
- test parseReadInFastq, read quality shorter than read length, padded with B
- test parseReadInFastq, read quality longer than read length 
- test parseReadInFastq, no read quality
- testing FastqRecordConverter.convertPair with valid input
- testing FastqRecordConverter.convertPair with 7-line invalid input
- testing FastqRecordConverter.convertPair with invalid input: first read length and qual don't match
- testing FastqRecordConverter.convertPair with invalid input: second read length and qual don't match
- testing FastqRecordConverter.convertFragment with valid input
- testing FastqRecordConverter.convertFragment with another valid input having /1, /2 suffixes
- testing FastqRecordConverter.convertFragment with invalid input: different read names
- testing FastqRecordConverter.convertRead with valid input
- testing FastqRecordConverter.convertRead with valid input: setFirstOfPair set to true
- testing FastqRecordConverter.convertRead with valid input: setSecondOfPair set to true
- testing FastqRecordConverter.convertRead with valid input: setFirstOfPair and setSecondOfPair both true
- testing FastqRecordConverter.convertRead with valid input, no qual, strict
- testing FastqRecordConverter.convertRead with valid input, no qual, not strict
AttributeSuite:
- test SAMTagAndValue parsing
- Attributes can be correctly re-encoded as text SAM tags
GFF3HeaderWriterSuite:
- write gff3 header pragma
BinQualitiesSuite:
- make a quality score bin
- can't have a quality score bin with negative score
- can't have a quality score bin with high score below low
- can't have a quality score bin with high score above 255
- can't have a quality score bin with score outside
- make a quality score bin from a string
- quality score bin must have exactly 3 elements
- quality score bin must be integers
- must define at least one bin
- build multiple bins
- rewrite quality scores for a read
- rewriting quality scores fails if bins overlap
- rewriting quality scores fails if base is out of bounds
- skip read if qualities are null
- rewrite a read
IndexedFastaFileSuite:
- correctly generates sequence dictionary from .dict file
- correctly gets sequence
- fails when fai index is not provided
2020-01-24 12:10:24 WARN  IndexedFastaFile:190 - Caught exception java.lang.NullPointerException when loading FASTA sequence dictionary. Using empty dictionary instead.
- passes when dict is not provided and ValidationStringency = LENIENT
SAMRecordConverterSuite:
- testing the fields in an Alignment obtained from a mapped samRecord conversion
- testing the fields in an Alignment obtained from an unmapped samRecord conversion
- '*' quality gets nulled out
- don't keep denormalized fields
ADAMContextSuite:
- ctr is accessible
- load from an empty directory
- sc.loadParquet should not fail on unmapped reads
- sc.loadParquet should not load a file without a type specified
- can read a small .SAM file
- can read a small .SAM file with a bad header with lenient validation
- loading a sam file with a bad header and strict stringency should fail
- can read a small .CRAM file
- can read a small .SAM with all attribute tag types
- can filter a .SAM file based on quality
- Can convert to phred
- Can convert from phred
- Can read a .gtf file
- Can read a .bed file
- Can read a BED 12 file
- Can read a .narrowPeak file
- Can read a .interval_list file
2020-01-24 12:10:27 WARN  VariantContextConverter:1662 - Saw invalid info field java.lang.IllegalArgumentException: Multivalued flags are not supported for INFO lines: INFO=<ID=ABADFLAG,Number=.,Type=Flag,Description="A no good, very bad flag.">. Ignoring...
2020-01-24 12:10:27 WARN  VariantContextConverter:2169 - Generating field extractor from header line INFO=<ID=ABADFLAG,Number=.,Type=Flag,Description="A no good, very bad flag."> failed: java.lang.IllegalArgumentException: Multivalue flags are not supported for INFO lines: INFO=<ID=ABADFLAG,Number=.,Type=Flag,Description="A no good, very bad flag.">
- can read a small .vcf file with a validation issue
- can read a small .vcf file
2020-01-24 12:10:28 WARN  VCFInputFormat:218 - file:/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-core/target/2.11.12/test-classes/test.vcf.gz is not splittable, consider using block-compressed gzip (BGZF)
- can read a gzipped .vcf file
- can read a vcf file with an empty alt
- can read a BGZF gzipped .vcf file with .gz file extension
- can read a BGZF gzipped .vcf file with .bgz file extension
- can read a vcf file with a projection
- can read an uncompressed BCFv2.2 file !!! IGNORED !!!
- can read a BGZF compressed BCFv2.2 file !!! IGNORED !!!
- loadIndexedVcf with 1 ReferenceRegion
- loadIndexedVcf with multiple ReferenceRegions
- load block compressed interleaved fastq
- import records from interleaved FASTQ: 1
- import records from interleaved FASTQ: 2
- import records from interleaved FASTQ: 3
- import records from interleaved FASTQ: 4
- import block compressed single fastq
- import records from single ended FASTQ: 1
- import records from single ended FASTQ: 2
- import records from single ended FASTQ: 3
- import records from single ended FASTQ: 4
- filter on load using the filter2 API
- saveAsParquet with file path
- saveAsParquet with file path, block size, page size
- saveAsParquet with save args
- read a HLA fasta from GRCh38
- read a gzipped fasta file
- read a fasta file with comments, gaps, and translation stops
- loadIndexedBam with 1 ReferenceRegion
- loadIndexedBam with multiple ReferenceRegions
- loadIndexedBam with multiple ReferenceRegions and indexed bams
- loadIndexedBam with multiple ReferenceRegions and a directory of indexed bams
- loadIndexedBam should throw exception without an index file
- loadIndexedBam should work with indexed file with index naming format <filename>.bai
- loadIndexedBam glob should throw exception without an index file
- loadBam with a glob
- loadBam with a directory
- load vcf with a glob
- load vcf from a directory
- load gvcf which contains a multi-allelic row from a directory
- load and save gvcf which contains rows without likelihoods
- parse annotations for multi-allelic rows
- load parquet with globs
- bad glob should fail
- empty directory should fail
- can read a SnpEff-annotated .vcf file
- loadAlignments should not fail on single-end and paired-end fastq reads
- load queryname sorted sam as fragments
- load query grouped sam as fragments
- load paired fastq
- load paired fastq without cache
- load paired fastq as fragments
- load paired fastq as fragments without cache
- load HTSJDK sequence dictionary
- load Bedtools .genome file as sequence dictionary
- load Bedtools .genome.txt file as sequence dictionary
- load UCSC Genome Browser chromInfo.txt file as sequence dictionary
- load unrecognized file extension as sequence dictionary fails
- load BED features with Bedtools .genome file as sequence dictionary
- load BED features with Bedtools .genome file as sequence dictionary, no matching features
- convert program record
- load program record from sam file
- load alignments from data frame
- load features from data frame
- load fragments from data frame
- load genotypes from data frame with default header lines
- load genotypes from data frame with empty header lines
- load reads from data frame
- load sequences from data frame
- load slices from data frame
- load variant contexts from data frame with default header lines
- load variant contexts from data frame with empty header lines
- load variants from data frame with default header lines
- load variants from data frame with empty header lines
- load alignments with metadata from data frame
- load features with metadata from data frame
- load fragments with metadata from data frame
- load genotypes with metadata from data frame
- load variant contexts with metadata from data frame
- load variants with metadata from data frame
- read a fasta file with short sequences as sequences
- read a fasta file with long sequences as sequences
- read a fasta file with short sequences as slices
- read a fasta file with long sequences as slices
CycleCovariateSuite:
- compute covariates for an unpaired read on the negative strand
- compute covariates for a first-of-pair read on the negative strand
- compute covariates for a second-of-pair read on the negative strand
- compute covariates for an unpaired read on the positive strand
- compute covariates for a first-of-pair read on the positive strand
- compute covariates for a second-of-pair read on the positive strand
AlignmentConverterSuite:
- testing the fields in a converted ADAM Read
- converting a read with null quality is OK
- convert a read to fastq
- reverse complement reads when converting to fastq
- converting to fastq with unmapped reads where  read reverse complemented flag (Ox10) was NOT set
- converting to fastq with unmapped reads where reverse complemented flag (0x10) was set
- converting a fragment with no alignments should yield unaligned reads
- converting a fragment with alignments should restore the alignments
- read negative strand is propagated even when not mapped
ConsensusGeneratorFromKnownsSuite:
- no consensuses for empty target
- no consensuses for reads that don't overlap a target
- return a consensus for read overlapping a single target
RichCigarSuite:
- moving 2 bp from a deletion to a match operator
- moving 2 bp from a insertion to a match operator
- moving 1 base in a two element cigar
- move to start of read
- process right clipped cigar
- process left clipped cigar
- process cigar clipped on both ends
MDTaggingSuite:
- test adding MDTags over boundary
- test adding MDTags; reads span full contig
- test adding MDTags; reads start inside first fragment
- test adding MDTags; reads end inside last fragment
- test adding MDTags; reads start inside first fragment and end inside last fragment
- test adding MDTags; reads start and end in middle fragements
2020-01-24 12:11:12 WARN  BlockManager:66 - Putting block rdd_5_3 failed due to exception java.lang.Exception: Contig chr2 not found in reference map with keys: chr1.
2020-01-24 12:11:12 WARN  BlockManager:66 - Block rdd_5_3 could not be removed as it was not found on disk or in memory
2020-01-24 12:11:12 ERROR Executor:91 - Exception in task 3.0 in stage 2.0 (TID 11)
java.lang.Exception: Contig chr2 not found in reference map with keys: chr1
	at org.bdgenomics.adam.util.ReferenceContigMap$$anonfun$extract$1.apply(ReferenceContigMap.scala:63)
	at org.bdgenomics.adam.util.ReferenceContigMap$$anonfun$extract$1.apply(ReferenceContigMap.scala:63)
	at scala.collection.MapLike$class.getOrElse(MapLike.scala:128)
	at scala.collection.AbstractMap.getOrElse(Map.scala:59)
	at org.bdgenomics.adam.util.ReferenceContigMap.extract(ReferenceContigMap.scala:61)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1$$anonfun$apply$2.apply(MDTagging.scala:76)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1$$anonfun$apply$2.apply(MDTagging.scala:71)
	at scala.Option$WithFilter.map(Option.scala:207)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1.apply(MDTagging.scala:71)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1.apply(MDTagging.scala:69)
	at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
	at org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:222)
	at org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:299)
	at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1165)
	at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1156)
	at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:1091)
	at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1156)
	at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:882)
	at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:335)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:286)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:123)
	at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)
2020-01-24 12:11:12 WARN  TaskSetManager:66 - Lost task 3.0 in stage 2.0 (TID 11, localhost, executor driver): java.lang.Exception: Contig chr2 not found in reference map with keys: chr1
	at org.bdgenomics.adam.util.ReferenceContigMap$$anonfun$extract$1.apply(ReferenceContigMap.scala:63)
	at org.bdgenomics.adam.util.ReferenceContigMap$$anonfun$extract$1.apply(ReferenceContigMap.scala:63)
	at scala.collection.MapLike$class.getOrElse(MapLike.scala:128)
	at scala.collection.AbstractMap.getOrElse(Map.scala:59)
	at org.bdgenomics.adam.util.ReferenceContigMap.extract(ReferenceContigMap.scala:61)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1$$anonfun$apply$2.apply(MDTagging.scala:76)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1$$anonfun$apply$2.apply(MDTagging.scala:71)
	at scala.Option$WithFilter.map(Option.scala:207)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1.apply(MDTagging.scala:71)
	at org.bdgenomics.adam.rdd.read.MDTagging$$anonfun$addMDTagsBroadcast$1.apply(MDTagging.scala:69)
	at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
	at org.apache.spark.storage.memory.MemoryStore.putIterator(MemoryStore.scala:222)
	at org.apache.spark.storage.memory.MemoryStore.putIteratorAsValues(MemoryStore.scala:299)
	at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1165)
	at org.apache.spark.storage.BlockManager$$anonfun$doPutIterator$1.apply(BlockManager.scala:1156)
	at org.apache.spark.storage.BlockManager.doPut(BlockManager.scala:1091)
	at org.apache.spark.storage.BlockManager.doPutIterator(BlockManager.scala:1156)
	at org.apache.spark.storage.BlockManager.getOrElseUpdate(BlockManager.scala:882)
	at org.apache.spark.rdd.RDD.getOrCompute(RDD.scala:335)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:286)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
	at org.apache.spark.scheduler.Task.run(Task.scala:123)
	at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
	at java.lang.Thread.run(Thread.java:748)

2020-01-24 12:11:12 ERROR TaskSetManager:70 - Task 3 in stage 2.0 failed 1 times; aborting job
- try realigning a read on a missing contig, stringency == STRICT
2020-01-24 12:11:13 WARN  MDTagging:190 - Caught exception when processing read chr2: java.lang.Exception: Contig chr2 not found in reference map with keys: chr1
- try realigning a read on a missing contig, stringency == LENIENT
FileExtensionsSuite:
- ends in gzip extension
- is a vcf extension
PhredUtilsSuite:
- convert low phred score to log and back
- convert high phred score to log and back
- convert overflowing phred score to log and back and clip
- convert negative zero log probability to phred and clip
- round trip log probabilities
ReadDatasetSuite:
- create a new read genomic dataset
- create a new read genomic dataset with sequence dictionary
- save as parquet
- round trip as parquet
- save as fastq
- save as single file fastq
- filter read genomic dataset by reference region
- broadcast region join reads and features
- shuffle region join reads and features
- convert reads to alignments
- convert reads to sequences
- convert reads to slices
AlignmentDatasetSuite:
- sorting reads
- unmapped reads go at the end when sorting
- coverage does not fail on unmapped reads
- computes coverage
- computes coverage with multiple samples
- merges adjacent records with equal coverage values
- sorting reads by reference index
- round trip from ADAM to SAM and back to ADAM produces equivalent Read values
- round trip with single CRAM file produces equivalent Read values
- round trip with sharded CRAM file produces equivalent Read values
- SAM conversion sets read mapped flag properly
- load long FASTQ reads
- load FASTQ with no bases
- convert malformed FASTQ (no quality scores) => SAM => well-formed FASTQ => SAM
- round trip from ADAM to FASTQ and back to ADAM produces equivalent Read values
- round trip from ADAM to paired-FASTQ and back to ADAM produces equivalent Read values
- writing a small file with tags should produce the expected result
- writing a small sorted file as SAM should produce the expected result
- writing unordered sam from unordered sam
- writing ordered sam from unordered sam
- write single sam file back
- write single bam file back
- saveAsParquet with save args, sequence dictionary, and read group dictionary
- load parquet to sql, save, re-read from avro
- load from sam, save as partitioned parquet, and re-read from partitioned parquet
- save as SAM format
- save as sorted SAM format
- save as BAM format
- save as sorted BAM format
- save as FASTQ format
- save as ADAM parquet format
- saveAsSam SAM format
- saveAsSam SAM format single file
- saveAsSam sorted SAM format single file
- saveAsSam BAM format
- saveAsSam BAM format single file
- saveAsSam sorted BAM format single file
- saveAsFastq
- saveAsFastq as single file
- saveAsFastq with original base qualities
- saveAsFastq sorted by read name
- saveAsFastq sorted by read name with original base qualities
2020-01-24 12:11:40 WARN  RDDBoundAlignmentDataset:190 - Found 20 read names that don't occur exactly twice:
	1x:	20

Samples:
	simread:1:237728409:true
	simread:1:195211965:false
	simread:1:163841413:false
	simread:1:231911906:false
	simread:1:26472783:false
	simread:1:165341382:true
	simread:1:240344442:true
	simread:1:50683371:false
	simread:1:240997787:true
	simread:1:14397233:false
	simread:1:207027738:true
	simread:1:20101800:true
	simread:1:101556378:false
	simread:1:37577445:false
	simread:1:189606653:true
	simread:1:5469106:true
	simread:1:186794283:true
	simread:1:89554252:false
	simread:1:153978724:false
	simread:1:169801933:true
- saveAsFastq paired FASTQ
2020-01-24 12:11:41 WARN  RDDBoundAlignmentDataset:190 - Found 20 read names that don't occur exactly twice:
	1x:	20

Samples:
	simread:1:237728409:true
	simread:1:195211965:false
	simread:1:163841413:false
	simread:1:231911906:false
	simread:1:26472783:false
	simread:1:165341382:true
	simread:1:240344442:true
	simread:1:50683371:false
	simread:1:240997787:true
	simread:1:14397233:false
	simread:1:207027738:true
	simread:1:20101800:true
	simread:1:101556378:false
	simread:1:37577445:false
	simread:1:189606653:true
	simread:1:5469106:true
	simread:1:186794283:true
	simread:1:89554252:false
	simread:1:153978724:false
	simread:1:169801933:true
- saveAsPairedFastq
2020-01-24 12:11:42 WARN  RDDBoundAlignmentDataset:190 - Found 20 read names that don't occur exactly twice:
	1x:	20

Samples:
	simread:1:237728409:true
	simread:1:195211965:false
	simread:1:163841413:false
	simread:1:231911906:false
	simread:1:26472783:false
	simread:1:165341382:true
	simread:1:240344442:true
	simread:1:50683371:false
	simread:1:240997787:true
	simread:1:14397233:false
	simread:1:207027738:true
	simread:1:20101800:true
	simread:1:101556378:false
	simread:1:37577445:false
	simread:1:189606653:true
	simread:1:5469106:true
	simread:1:186794283:true
	simread:1:89554252:false
	simread:1:153978724:false
	simread:1:169801933:true
- saveAsPairedFastq as single files
- don't lose any reads when piping as SAM
2020-01-24 12:11:53 WARN  OutFormatterRunner:190 - Piped command List(sleep, 10) timed out after 5 seconds.
2020-01-24 12:11:53 WARN  OutFormatterRunner:190 - Piped command List(sleep, 10) timed out after 5 seconds.
- lose all records when a command times out
- lose no records without a timeout
2020-01-24 12:12:14 WARN  OutFormatterRunner:190 - Piped command List(python, /tmp/adamTestMvnPRoviTH/spark-8c561f31-9a08-40f8-9689-ef7eb6dac745/userFiles-985e088a-c082-4a91-8ce5-49d2a5ff82e0/timeout.py) timed out after 5 seconds.
2020-01-24 12:12:14 WARN  OutFormatterRunner:190 - Piped command List(python, /tmp/adamTestMvnPRoviTH/spark-8c561f31-9a08-40f8-9689-ef7eb6dac745/userFiles-985e088a-c082-4a91-8ce5-49d2a5ff82e0/timeout.py) timed out after 5 seconds.
- lose some records when a command times out
- don't lose any reads when piping as SAM using java pipe
- don't lose any reads when piping as BAM
- don't lose any reads when piping fastq to sam
- can properly set environment variables inside of a pipe
- read vcf from alignment pipe !!! IGNORED !!!
- use broadcast join to pull down reads mapped to targets
- use broadcast join against to pull down reads mapped to targets
- use right outer broadcast join to pull down reads mapped to targets
- use right outer broadcast join against to pull down reads mapped to targets
- use shuffle join with feature spanning partitions
- use shuffle join to pull down reads mapped to targets
- use shuffle join with flankSize to pull down reads mapped close to targets
- use right outer shuffle join to pull down reads mapped to targets
- use left outer shuffle join to pull down reads mapped to targets
- use full outer shuffle join to pull down reads mapped to targets
- use shuffle join with group by to pull down reads mapped to targets
- use right outer shuffle join with group by to pull down reads mapped to targets
- cannot provide empty quality score bins
- cannot provide bins with a gap
- cannot provide overlapping bins
- binning quality scores in reads succeeds even if reads have no quality scores
- bin quality scores in reads
- union two read files together
- test k-mer counter
- test dataset based k-mer counter
- transform reads to slice genomic dataset
- transform reads to coverage genomic dataset
- transform reads to feature genomic dataset
- transform reads to fragment genomic dataset
- transform reads to genotype genomic dataset
loading /tmp/adamTestMvnPRoviTH/TempSuite8888731791637263269.adam as parquet into RDD...
loading /tmp/adamTestMvnPRoviTH/TempSuite7923475347296730811.adam as parquet into RDD...
- transform reads to variant genomic dataset
- cannot have a null processing step ID
- convert a processing description to htsjdk
- GenomicDataset.sort does not fail on unmapped reads
- GenomicDataset.sortLexicographically does not fail on unmapped reads
- left normalize indels
- running base recalibration with downsampling doesn't drop reads
- filter RDD bound alignments by MAPQ
- filter dataset bound alignments by MAPQ
- filter RDD bound unaligned alignments
- filter dataset bound unaligned alignments
- filter RDD bound unpaired alignments
- filter dataset bound unpaired alignments
- filter RDD bound duplicate alignments
- filter dataset bound duplicate alignments
- filter RDD bound alignments to primary alignments
- filter dataset bound alignments to primary alignments
- filter RDD bound alignments to read group
- filter dataset bound alignments to read group
- filter RDD bound alignments to read groups
- filter dataset bound alignments to read groups
- filter RDD bound alignments to sample
- filter dataset bound alignments to sample
- filter RDD bound alignments to samples
- filter dataset bound alignments to samples
- sort by read name
- transform dataset via java API
- convert alignments to reads
SmithWatermanSuite:
- gather max position from simple scoring matrix
- gather max position from irregular scoring matrix
- gather max position from irregular scoring matrix with deletions
- score simple alignment with constant gap
- score irregular scoring matrix
- score irregular scoring matrix with indel
- can unroll cigars correctly
- execute simple trackback
- execute trackback with indel
- run end to end smith waterman for simple reads
- run end to end smith waterman for short sequences with indel
- run end to end smith waterman for longer sequences with snp
- run end to end smith waterman for longer sequences with short indel
- run end to end smith waterman for shorter sequence in longer sequence
- run end to end smith waterman for shorter sequence in longer sequence, with indel
- smithWaterman - simple alignment
MdTagSuite:
- null md tag
- zero length md tag
- md tag with non-digit initial value
- md tag invalid base
- md tag, pure insertion
- md tag, pure insertion, test 2
- md tag pure insertion equality
- md tag equality and hashcode
- valid md tags
- get start of read with no mismatches or deletions
- get start of read with no mismatches, but with a deletion at the start
- get start of read with mismatches at the start
- get end of read with no mismatches or deletions
- check that mdtag and rich record return same end
- get end of read with no mismatches, but a deletion at end
- CIGAR with N operator
- CIGAR with multiple N operators
- CIGAR with P operators
- Get correct matches for mdtag with insertion
- Get correct matches for mdtag with mismatches and insertion
- Get correct matches for mdtag with insertion between mismatches
- Get correct matches for mdtag with intron between mismatches
- Get correct matches for mdtag with intron and deletion between mismatches
- Throw exception when number of deleted bases in mdtag disagrees with CIGAR
- Get correct matches for mdtag with mismatch, insertion and deletion
- Get correct matches for mdtag with mismatches, insertion and deletion
- Get correct matches for MDTag with mismatches and deletions
- Get correct matches base from MDTag and CIGAR with N
- get end of read with mismatches and a deletion at end
- get correct string out of mdtag with no mismatches
- get correct string out of mdtag with mismatches at start
- get correct string out of mdtag with deletion at end
- get correct string out of mdtag with mismatches at end
- get correct string out of complex mdtag
- check complex mdtag
- get gapped reference
- move a cigar alignment by two for a read
- rewrite alignment to all matches
- rewrite alignment to two mismatches followed by all matches
- rewrite alignment to include a deletion but otherwise all matches
- rewrite alignment to include an insertion at the start of the read but otherwise all matches
- create new md tag from read vs. reference, perfect match
- create new md tag from read vs. reference, perfect alignment match, 1 mismatch
- create new md tag from read vs. reference, alignment with deletion
- create new md tag from read vs. reference, alignment with insert
- handle '=' and 'X' operators
- CIGAR/MD tag mismatch should cause errors
GenomicDatasetSuite:
- processing a command that is the spark root directory should return an absolute path
- processing a command that is just a single word should do nothing
- processing a command should handle arguments that include spaces
- processing a command that is a single substitution should succeed
- processing a command that is multiple words should split the string
- process a command that is multiple words with a replacement
- process a command that is multiple words with multiple replacements
ParallelFileMergerSuite:
- cannot write both empty gzip block and cram eof
- buffer size must be non-negative
- get the size of several files
- block size must be positive and non-zero when trying to merge files
- must provide files to merge
- if two files are both below the block size, they should merge into one shard
- merge two files where one is greater than the block size
- merge a sharded sam file
- merge a sharded bam file
- merge a sharded cram file
- can't turn a negative index into a path
- generate a path from an index
IndelTableSuite:
- check for indels in a region with known indels
- check for indels in a contig that doesn't exist
- check for indels in a region without known indels
- build indel table from rdd of variants
SnpTableSuite:
- create an empty snp table
- create a snp table from variants on multiple contigs
- create a snp table from a larger set of variants
- perform lookups on multi-contig snp table
- perform lookups on larger snp table
RealignIndelsSuite:
- map reads to targets
- checking mapping to targets for artificial reads
- checking alternative consensus for artificial reads
- checking extraction of reference from reads
- checking realigned reads for artificial input
- checking realigned reads for artificial input with reference file
- checking realigned reads for artificial input using knowns
- checking realigned reads for artificial input using knowns and reads
- skip realigning reads if target is highly covered
- skip realignment if target is an insufficient LOD improvement
- realign reads to an insertion
- test mismatch quality scoring
- test mismatch quality scoring for no mismatches
- test mismatch quality scoring for offset
- test mismatch quality scoring with early exit
- test mismatch quality scoring after unpacking read
- we shouldn't try to realign a region with no target
- we shouldn't try to realign reads with no indel evidence
- test OP and OC tags
- realign a read with an insertion that goes off the end of the read
- if realigning a target doesn't improve the LOD, don't drop reads
- extract seq/qual from a read with no clipped bases
- extract seq/qual from a read with clipped bases at start
- extract seq/qual from a read with clipped bases at end
- if unclip is selected, don't drop base when extracting from a read with clipped bases
- get cigar and coordinates for read that spans indel, no clipped bases
- get cigar and coordinates for read that spans deletion, clipped bases at start
- get cigar and coordinates for read that falls wholly before insertion
- get cigar and coordinates for read that falls wholly after insertion
- get cigar and coordinates for read that falls wholly after deletion
- get cigar and coordinates for read that partially spans insertion, no clipped bases
- get cigar and coordinates for read that partially spans insertion, clipped bases at end
- get cigar and coordinates for read that partially spans insertion, clipped bases both ends
BaseQualityRecalibrationSuite:
- BQSR Test Input #1 w/ VCF Sites without caching
- BQSR Test Input #1 w/ VCF Sites with caching
- BQSR Test Input #1 w/ VCF Sites with serialized caching
DinucCovariateSuite:
- computing dinucleotide pairs for a single base sequence should return (N,N)
- compute dinucleotide pairs for a string of all valid bases
- compute dinucleotide pairs for a string with an N
- compute covariates for a read on the negative strand
- compute covariates for a read on the positive strand
SequenceDictionarySuite:
- Convert from sam sequence record and back
- Convert from SAM sequence dictionary file (with extra fields)
- merge into existing dictionary
- Convert from SAM sequence dictionary and back
- Can retrieve sequence by name
- SequenceDictionary's with same single element are equal
- SequenceDictionary's with same two elements are equals
- SequenceDictionary's with different elements are unequal
- SequenceDictionaries with same elements in different order are compatible
- isCompatible tests equality on overlap
- The addition + works correctly
- The append operation ++ works correctly
- ContainsRefName works correctly for different string types
- Apply on name works correctly for different String types
- convert from sam sequence record and back
- convert from sam sequence dictionary and back
- conversion to sam sequence dictionary has correct sort order
- load sequence dictionary from VCF file
- empty sequence dictionary must be empty
- test filter to reference name
- test filter to reference names
- test filter to reference name by function
GenomicPositionPartitionerSuite:
- partitions the UNMAPPED ReferencePosition into the top partition
- if we do not have a contig for a record, we throw an IAE
- partitioning into N pieces on M total sequence length, where N > M, results in M partitions
- correctly partitions a single dummy sequence into two pieces
- correctly counts cumulative lengths
- correctly partitions positions across two dummy sequences
- test that we can range partition ADAMRecords
- test that we can range partition ADAMRecords indexed by sample
- test that simple partitioning works okay on a reasonable set of ADAMRecords
- test indexed ReferencePosition partitioning works on a set of indexed ADAMRecords
CoverageSuite:
- Convert to coverage from valid Feature
- Convert to coverage from valid Feature with sampleId
- Convert to coverage from Feature with null/empty contigName fails with correct error
- Convert to coverage from Feature with no start/end position fails with correct error
- Convert to coverage from Feature with no score fails with correct error
InnerTreeRegionJoinSuite:
- Ensure same reference regions get passed together
- Overlapping reference regions
- Multiple reference regions do not throw exception
VariantDatasetSuite:
- union two variant genomic datasets together
- round trip to parquet
- save and reload from partitioned parquet
- use broadcast join to pull down variants mapped to targets
- use right outer broadcast join to pull down variants mapped to targets
- use shuffle join to pull down variants mapped to targets
- use right outer shuffle join to pull down variants mapped to targets
- use left outer shuffle join to pull down variants mapped to targets
- use full outer shuffle join to pull down variants mapped to targets
- use shuffle join with group by to pull down variants mapped to targets
- use right outer shuffle join with group by to pull down variants mapped to targets
- convert back to variant contexts
- load parquet to sql, save, re-read from avro
2020-01-24 12:13:51 WARN  DatasetBoundSliceDataset:190 - Saving directly as Parquet from SQL. Options other than compression codec are ignored.
- transform variants to slice genomic dataset
- transform variants to coverage genomic dataset
- transform variants to feature genomic dataset
- transform variants to fragment genomic dataset
- transform variants to read genomic dataset
- transform variants to genotype genomic dataset
- transform variants to variant context genomic dataset
- filter RDD bound variants to filters passed
- filter dataset bound variants to filters passed
- filter RDD bound variants by quality
- filter dataset bound variants by quality
- filter RDD bound variants by read depth
- filter dataset bound variants by read depth
- filter RDD bound variants by reference read depth
- filter dataset bound variants by reference read depth
- filter RDD bound single nucleotide variants
- filter dataset bound single nucleotide variants
- filter RDD bound multiple nucleotide variants
- filter dataset bound multiple nucleotide variants
- filter RDD bound indel variants
- filter dataset bound indel variants
- filter RDD bound variants to single nucleotide variants
- filter dataset bound variants to single nucleotide variants
- filter RDD bound variants to multiple nucleotide variants
- filter dataset bound variants to multiple nucleotide variants
- filter RDD bound variants to indel variants
- filter dataset bound variants to indel variants
- transform dataset via java API
Run completed in 6 minutes, 36 seconds.
Total number of tests run: 1175
Suites: completed 68, aborted 0
Tests: succeeded 1175, failed 0, canceled 0, ignored 5, pending 0
All tests passed.
[INFO] 
[INFO] <<< scoverage-maven-plugin:1.1.1:report (default-cli) < [scoverage]test @ adam-core-spark2_2.11 <<<
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:report (default-cli) @ adam-core-spark2_2.11 ---
[INFO] [scoverage] Generating cobertura XML report...
[INFO] [scoverage] Generating scoverage XML report...
[INFO] [scoverage] Generating scoverage HTML report...
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11: APIs for Java, Python 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-versions) @ adam-apis-spark2_2.11 ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ adam-apis-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-source (add-source) @ adam-apis-spark2_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ adam-apis-spark2_2.11 ---
[INFO] Modified 0 of 5 .scala files
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:resources (default-resources) @ adam-apis-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/main/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ adam-apis-spark2_2.11 ---
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/main/scala:-1: info: compiling
[INFO] Compiling 4 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/target/2.11.12/classes at 1579896853627
[WARNING] warning: there were two feature warnings; re-run with -feature for details
[WARNING] one warning found
[INFO] prepare-compile in 0 s
[INFO] compile in 5 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:compile (default-compile) @ adam-apis-spark2_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-test-source (add-test-source) @ adam-apis-spark2_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/test/scala added.
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:testResources (default-testResources) @ adam-apis-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 2 resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ adam-apis-spark2_2.11 ---
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/test/java:-1: info: compiling
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/test/scala:-1: info: compiling
[INFO] Compiling 9 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/target/2.11.12/test-classes at 1579896859457
[INFO] prepare-compile in 0 s
[INFO] compile in 4 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:testCompile (default-testCompile) @ adam-apis-spark2_2.11 ---
[INFO] Changes detected - recompiling the module!
[INFO] Compiling 8 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/target/2.11.12/test-classes
[INFO] 
[INFO] --- maven-surefire-plugin:3.0.0-M3:test (default-test) @ adam-apis-spark2_2.11 ---
[INFO] Tests are skipped.
[INFO] 
[INFO] --- scalatest-maven-plugin:2.0.0:test (test) @ adam-apis-spark2_2.11 ---
Discovery starting.
Discovery completed in 180 milliseconds.
Run starting. Expected test count is: 10
JavaADAMContextSuite:
2020-01-24 12:14:25 WARN  Utils:66 - Your hostname, research-jenkins-worker-08 resolves to a loopback address: 127.0.1.1; using 192.168.10.28 instead (on interface eth0)
2020-01-24 12:14:25 WARN  Utils:66 - Set SPARK_LOCAL_IP if you need to bind to another address
2020-01-24 12:14:25 WARN  NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
- can read and write a small .SAM file
- loadIndexedBam with multiple ReferenceRegions
- can read and write a small .SAM file as fragments
- can read and write a small .bed file as features
- can read and write a small .bed file as coverage
- can read and write a small .vcf as genotypes
- can read and write a small .vcf as variants
- can read a two bit file
2020-01-24 12:14:34 WARN  RDDBoundSequenceDataset:190 - asSingleFile = true ignored when saving as Parquet.
- can read and write .fa as sequences
2020-01-24 12:14:34 WARN  RDDBoundSliceDataset:190 - asSingleFile = true ignored when saving as Parquet.
- can read and write .fa as slices
Run completed in 10 seconds, 437 milliseconds.
Total number of tests run: 10
Suites: completed 2, aborted 0
Tests: succeeded 10, failed 0, canceled 0, ignored 0, pending 0
All tests passed.
[INFO] 
[INFO] >>> scoverage-maven-plugin:1.1.1:report (default-cli) > [scoverage]test @ adam-apis-spark2_2.11 >>>
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-versions) @ adam-apis-spark2_2.11 ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ adam-apis-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-source (add-source) @ adam-apis-spark2_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ adam-apis-spark2_2.11 ---
[INFO] Modified 0 of 5 .scala files
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:pre-compile (default-cli) @ adam-apis-spark2_2.11 ---
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:resources (default-resources) @ adam-apis-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/main/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ adam-apis-spark2_2.11 ---
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/main/scala:-1: info: compiling
[INFO] Compiling 4 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/target/2.11.12/scoverage-classes at 1579896875832
[INFO] [info] Cleaning datadir [/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/target/scoverage-data]
[INFO] [info] Beginning coverage instrumentation
[INFO] [info] Instrumentation completed [265 statements]
[INFO] [info] Wrote instrumentation file [/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/target/scoverage-data/scoverage.coverage.xml]
[INFO] [info] Will write measurement data to [/home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/target/scoverage-data]
[WARNING] warning: there were two feature warnings; re-run with -feature for details
[WARNING] one warning found
[INFO] prepare-compile in 0 s
[INFO] compile in 5 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:compile (default-compile) @ adam-apis-spark2_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:post-compile (default-cli) @ adam-apis-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-test-source (add-test-source) @ adam-apis-spark2_2.11 ---
[INFO] Test Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/test/scala added.
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:testResources (default-testResources) @ adam-apis-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 2 resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:testCompile (scala-test-compile-first) @ adam-apis-spark2_2.11 ---
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/test/java:-1: info: compiling
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/src/test/scala:-1: info: compiling
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/target/generated-test-sources/test-annotations:-1: info: compiling
[INFO] Compiling 9 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-apis/target/2.11.12/test-classes at 1579896881942
[INFO] prepare-compile in 0 s
[INFO] compile in 4 s
[INFO] 
[INFO] --- maven-compiler-plugin:3.8.0:testCompile (default-testCompile) @ adam-apis-spark2_2.11 ---
[INFO] Nothing to compile - all classes are up to date
[INFO] 
[INFO] --- maven-surefire-plugin:3.0.0-M3:test (default-test) @ adam-apis-spark2_2.11 ---
[INFO] Tests are skipped.
[INFO] 
[INFO] --- scalatest-maven-plugin:2.0.0:test (test) @ adam-apis-spark2_2.11 ---
Discovery starting.
Discovery completed in 181 milliseconds.
Run starting. Expected test count is: 10
JavaADAMContextSuite:
2020-01-24 12:14:47 WARN  Utils:66 - Your hostname, research-jenkins-worker-08 resolves to a loopback address: 127.0.1.1; using 192.168.10.28 instead (on interface eth0)
2020-01-24 12:14:47 WARN  Utils:66 - Set SPARK_LOCAL_IP if you need to bind to another address
2020-01-24 12:14:47 WARN  NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
- can read and write a small .SAM file
- loadIndexedBam with multiple ReferenceRegions
- can read and write a small .SAM file as fragments
- can read and write a small .bed file as features
- can read and write a small .bed file as coverage
- can read and write a small .vcf as genotypes
- can read and write a small .vcf as variants
- can read a two bit file
2020-01-24 12:14:56 WARN  RDDBoundSequenceDataset:190 - asSingleFile = true ignored when saving as Parquet.
- can read and write .fa as sequences
2020-01-24 12:14:57 WARN  RDDBoundSliceDataset:190 - asSingleFile = true ignored when saving as Parquet.
- can read and write .fa as slices
Run completed in 10 seconds, 772 milliseconds.
Total number of tests run: 10
Suites: completed 2, aborted 0
Tests: succeeded 10, failed 0, canceled 0, ignored 0, pending 0
All tests passed.
[INFO] 
[INFO] <<< scoverage-maven-plugin:1.1.1:report (default-cli) < [scoverage]test @ adam-apis-spark2_2.11 <<<
[INFO] 
[INFO] --- scoverage-maven-plugin:1.1.1:report (default-cli) @ adam-apis-spark2_2.11 ---
[INFO] [scoverage] Generating cobertura XML report...
[INFO] [scoverage] Generating scoverage XML report...
[INFO] [scoverage] Generating scoverage HTML report...
[INFO]                                                                         
[INFO] ------------------------------------------------------------------------
[INFO] Building ADAM_2.11: CLI 0.30.0-SNAPSHOT
[INFO] ------------------------------------------------------------------------
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-versions) @ adam-cli-spark2_2.11 ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.0:enforce (enforce-maven) @ adam-cli-spark2_2.11 ---
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:timestamp-property (timestamp-property) @ adam-cli-spark2_2.11 ---
[INFO] 
[INFO] --- git-commit-id-plugin:2.2.2:revision (default) @ adam-cli-spark2_2.11 ---
[INFO] 
[INFO] --- templating-maven-plugin:1.0.0:filter-sources (filter-src) @ adam-cli-spark2_2.11 ---
[INFO] Coping files with filtering to temporary directory.
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 1 resource
[INFO] Copied 1 files to output directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-cli/target/generated-sources/java-templates
[INFO] Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-cli/target/generated-sources/java-templates added.
[INFO] 
[INFO] --- build-helper-maven-plugin:3.0.0:add-source (add-source) @ adam-cli-spark2_2.11 ---
[INFO] Source directory: /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-cli/src/main/scala added.
[INFO] 
[INFO] --- scalariform-maven-plugin:0.1.4:format (default-cli) @ adam-cli-spark2_2.11 ---
[INFO] Modified 0 of 29 .scala files
[INFO] 
[INFO] --- maven-resources-plugin:3.1.0:resources (default-resources) @ adam-cli-spark2_2.11 ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] skip non existing resourceDirectory /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-cli/src/main/resources
[INFO] 
[INFO] --- scala-maven-plugin:3.2.2:compile (scala-compile-first) @ adam-cli-spark2_2.11 ---
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-cli/target/generated-sources/java-templates:-1: info: compiling
[INFO] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-cli/src/main/scala:-1: info: compiling
[INFO] Compiling 18 source files to /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-cli/target/2.11.12/classes at 1579896899138
[ERROR] /home/jenkins/workspace/ADAM-prb/HADOOP_VERSION/2.7.5/SCALAVER/2.11/SPARK_VERSION/2.4.4/label/ubuntu/adam-cli/src/main/scala/org/bdgenomics/adam/cli/PrintADAM.scala:85: error: overloaded method value jsonEncoder with alternatives:
[ERROR]   (x$1: org.apache.avro.Schema,x$2: java.io.OutputStream,x$3: Boolean)org.apache.avro.io.JsonEncoder <and>
[ERROR]   (x$1: org.apache.avro.Schema,x$2: java.io.OutputStream)org.apache.avro.io.JsonEncoder
[ERROR]  cannot be applied to (org.apache.avro.Schema, java.io.PrintStream, pretty: Boolean)
[ERROR]               val encoder = EncoderFactory.get().jsonEncoder(schema, out, pretty = true)
[ERROR]                                                  ^
[ERROR] one error found
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] ADAM_2.11 .......................................... SUCCESS [ 11.832 s]
[INFO] ADAM_2.11: Avro-to-Dataset codegen utils ........... SUCCESS [ 11.083 s]
[INFO] ADAM_2.11: Core .................................... SUCCESS [15:05 min]
[INFO] ADAM_2.11: APIs for Java, Python ................... SUCCESS [ 44.879 s]
[INFO] ADAM_2.11: CLI ..................................... FAILURE [  4.923 s]
[INFO] ADAM_2.11: Assembly ................................ SKIPPED
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 16:18 min
[INFO] Finished at: 2020-01-24T12:15:03-08:00
[INFO] Final Memory: 77M/1466M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal net.alchim31.maven:scala-maven-plugin:3.2.2:compile (scala-compile-first) on project adam-cli-spark2_2.11: wrap: org.apache.commons.exec.ExecuteException: Process exited with an error: 1 (Exit value: 1) -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please read the following articles:
[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoExecutionException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :adam-cli-spark2_2.11
Build step 'Execute shell' marked build as failure
Recording test results
Publishing Scoverage XML and HTML report...
Setting commit status on GitHub for https://github.com/bigdatagenomics/adam/commit/9a16079f5a139ff66e00e34273154a8fc202520b
Finished: FAILURE