本文整理汇总了Java中org.broadinstitute.hellbender.utils.read.ReadConstants类的典型用法代码示例。如果您正苦于以下问题:Java ReadConstants类的具体用法?Java ReadConstants怎么用?Java ReadConstants使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ReadConstants类属于org.broadinstitute.hellbender.utils.read包,在下文中一共展示了ReadConstants类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: shardedReadsSparkSourceTest
import org.broadinstitute.hellbender.utils.read.ReadConstants; //导入依赖的package包/类
@Test(dataProvider = "loadShardedReads", groups = "spark")
public void shardedReadsSparkSourceTest(String expectedBam, String shardedBam, String referencePath) {
JavaSparkContext ctx = SparkContextFactory.getTestSparkContext();
ReadsSparkSource readSource = new ReadsSparkSource(ctx);
JavaRDD<GATKRead> rddSerialReads = getSerialReads(ctx, expectedBam, referencePath, ReadConstants.DEFAULT_READ_VALIDATION_STRINGENCY);
JavaRDD<GATKRead> rddParallelReads = readSource.getParallelReads(shardedBam, referencePath);
List<GATKRead> serialReads = rddSerialReads.collect();
List<GATKRead> parallelReads = rddParallelReads.collect();
Assert.assertEquals(parallelReads.size(), serialReads.size());
}
示例2: ReadsDataSource
import org.broadinstitute.hellbender.utils.read.ReadConstants; //导入依赖的package包/类
/**
* Initialize this data source with multiple SAM/BAM/CRAM files, explicit indices for those files,
* and a custom SamReaderFactory.
*
* @param samPaths paths to SAM/BAM/CRAM files, not null
* @param samIndices indices for all of the SAM/BAM/CRAM files, in the same order as samPaths. May be null,
* in which case index paths are inferred automatically.
* @param customSamReaderFactory SamReaderFactory to use, if null a default factory with no reference and validation
* stringency SILENT is used.
* @param cloudWrapper caching/prefetching wrapper for the data, if on Google Cloud.
* @param cloudIndexWrapper caching/prefetching wrapper for the index, if on Google Cloud.
*/
public ReadsDataSource( final List<Path> samPaths, final List<Path> samIndices,
SamReaderFactory customSamReaderFactory,
Function<SeekableByteChannel, SeekableByteChannel> cloudWrapper,
Function<SeekableByteChannel, SeekableByteChannel> cloudIndexWrapper) {
Utils.nonNull(samPaths);
Utils.nonEmpty(samPaths, "ReadsDataSource cannot be created from empty file list");
if ( samIndices != null && samPaths.size() != samIndices.size() ) {
throw new UserException(String.format("Must have the same number of BAM/CRAM/SAM paths and indices. Saw %d BAM/CRAM/SAMs but %d indices",
samPaths.size(), samIndices.size()));
}
readers = new LinkedHashMap<>(samPaths.size() * 2);
backingPaths = new LinkedHashMap<>(samPaths.size() * 2);
indicesAvailable = true;
final SamReaderFactory samReaderFactory =
customSamReaderFactory == null ?
SamReaderFactory.makeDefault().validationStringency(ReadConstants.DEFAULT_READ_VALIDATION_STRINGENCY) :
customSamReaderFactory;
int samCount = 0;
for ( final Path samPath : samPaths ) {
// Ensure each file can be read
try {
IOUtil.assertFileIsReadable(samPath);
}
catch ( SAMException|IllegalArgumentException e ) {
throw new UserException.CouldNotReadInputFile(samPath.toString(), e);
}
Function<SeekableByteChannel, SeekableByteChannel> wrapper =
(BucketUtils.isCloudStorageUrl(samPath)
? cloudWrapper
: Function.identity());
// if samIndices==null then we'll guess the index name from the file name.
// If the file's on the cloud, then the search will only consider locations that are also
// in the cloud.
Function<SeekableByteChannel, SeekableByteChannel> indexWrapper =
((samIndices != null && BucketUtils.isCloudStorageUrl(samIndices.get(samCount))
|| (samIndices == null && BucketUtils.isCloudStorageUrl(samPath)))
? cloudIndexWrapper
: Function.identity());
SamReader reader;
if ( samIndices == null ) {
reader = samReaderFactory.open(samPath, wrapper, indexWrapper);
}
else {
final SamInputResource samResource = SamInputResource.of(samPath, wrapper);
Path indexPath = samIndices.get(samCount);
samResource.index(indexPath, indexWrapper);
reader = samReaderFactory.open(samResource);
}
// Ensure that each file has an index
if ( ! reader.hasIndex() ) {
indicesAvailable = false;
}
readers.put(reader, null);
backingPaths.put(reader, samPath);
++samCount;
}
// Prepare a header merger only if we have multiple readers
headerMerger = samPaths.size() > 1 ? createHeaderMerger() : null;
}
示例3: loadReadsNonExistentReference
import org.broadinstitute.hellbender.utils.read.ReadConstants; //导入依赖的package包/类
@Test(expectedExceptions = UserException.MissingReference.class)
public void loadReadsNonExistentReference() {
doLoadReads(dir + "valid.cram",
GATKBaseTest.getSafeNonExistentFile("NonExistentReference.fasta").getAbsolutePath(),
ReadConstants.DEFAULT_READ_VALIDATION_STRINGENCY);
}
示例4: doLoadReadsTest
import org.broadinstitute.hellbender.utils.read.ReadConstants; //导入依赖的package包/类
private void doLoadReadsTest(String bam, String referencePath) {
doLoadReads(bam, referencePath, ReadConstants.DEFAULT_READ_VALIDATION_STRINGENCY);
}
示例5: readsSparkSourceUnknownHostTest
import org.broadinstitute.hellbender.utils.read.ReadConstants; //导入依赖的package包/类
@Test(expectedExceptions = UserException.class, expectedExceptionsMessageRegExp = ".*Failed to read bam header from hdfs://bogus/path.bam.*")
public void readsSparkSourceUnknownHostTest() {
JavaSparkContext ctx = SparkContextFactory.getTestSparkContext();
ReadsSparkSource readSource = new ReadsSparkSource(ctx, ReadConstants.DEFAULT_READ_VALIDATION_STRINGENCY);
readSource.getParallelReads("hdfs://bogus/path.bam", null);
}