本文整理匯總了Java中org.broadinstitute.hellbender.utils.read.ReadConstants類的典型用法代碼示例。如果您正苦於以下問題:Java ReadConstants類的具體用法?Java ReadConstants怎麽用?Java ReadConstants使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
ReadConstants類屬於org.broadinstitute.hellbender.utils.read包,在下文中一共展示了ReadConstants類的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: shardedReadsSparkSourceTest
import org.broadinstitute.hellbender.utils.read.ReadConstants; //導入依賴的package包/類
@Test(dataProvider = "loadShardedReads", groups = "spark")
public void shardedReadsSparkSourceTest(String expectedBam, String shardedBam, String referencePath) {
JavaSparkContext ctx = SparkContextFactory.getTestSparkContext();
ReadsSparkSource readSource = new ReadsSparkSource(ctx);
JavaRDD<GATKRead> rddSerialReads = getSerialReads(ctx, expectedBam, referencePath, ReadConstants.DEFAULT_READ_VALIDATION_STRINGENCY);
JavaRDD<GATKRead> rddParallelReads = readSource.getParallelReads(shardedBam, referencePath);
List<GATKRead> serialReads = rddSerialReads.collect();
List<GATKRead> parallelReads = rddParallelReads.collect();
Assert.assertEquals(parallelReads.size(), serialReads.size());
}
示例2: ReadsDataSource
import org.broadinstitute.hellbender.utils.read.ReadConstants; //導入依賴的package包/類
/**
* Initialize this data source with multiple SAM/BAM/CRAM files, explicit indices for those files,
* and a custom SamReaderFactory.
*
* @param samPaths paths to SAM/BAM/CRAM files, not null
* @param samIndices indices for all of the SAM/BAM/CRAM files, in the same order as samPaths. May be null,
* in which case index paths are inferred automatically.
* @param customSamReaderFactory SamReaderFactory to use, if null a default factory with no reference and validation
* stringency SILENT is used.
* @param cloudWrapper caching/prefetching wrapper for the data, if on Google Cloud.
* @param cloudIndexWrapper caching/prefetching wrapper for the index, if on Google Cloud.
*/
public ReadsDataSource( final List<Path> samPaths, final List<Path> samIndices,
SamReaderFactory customSamReaderFactory,
Function<SeekableByteChannel, SeekableByteChannel> cloudWrapper,
Function<SeekableByteChannel, SeekableByteChannel> cloudIndexWrapper) {
Utils.nonNull(samPaths);
Utils.nonEmpty(samPaths, "ReadsDataSource cannot be created from empty file list");
if ( samIndices != null && samPaths.size() != samIndices.size() ) {
throw new UserException(String.format("Must have the same number of BAM/CRAM/SAM paths and indices. Saw %d BAM/CRAM/SAMs but %d indices",
samPaths.size(), samIndices.size()));
}
readers = new LinkedHashMap<>(samPaths.size() * 2);
backingPaths = new LinkedHashMap<>(samPaths.size() * 2);
indicesAvailable = true;
final SamReaderFactory samReaderFactory =
customSamReaderFactory == null ?
SamReaderFactory.makeDefault().validationStringency(ReadConstants.DEFAULT_READ_VALIDATION_STRINGENCY) :
customSamReaderFactory;
int samCount = 0;
for ( final Path samPath : samPaths ) {
// Ensure each file can be read
try {
IOUtil.assertFileIsReadable(samPath);
}
catch ( SAMException|IllegalArgumentException e ) {
throw new UserException.CouldNotReadInputFile(samPath.toString(), e);
}
Function<SeekableByteChannel, SeekableByteChannel> wrapper =
(BucketUtils.isCloudStorageUrl(samPath)
? cloudWrapper
: Function.identity());
// if samIndices==null then we'll guess the index name from the file name.
// If the file's on the cloud, then the search will only consider locations that are also
// in the cloud.
Function<SeekableByteChannel, SeekableByteChannel> indexWrapper =
((samIndices != null && BucketUtils.isCloudStorageUrl(samIndices.get(samCount))
|| (samIndices == null && BucketUtils.isCloudStorageUrl(samPath)))
? cloudIndexWrapper
: Function.identity());
SamReader reader;
if ( samIndices == null ) {
reader = samReaderFactory.open(samPath, wrapper, indexWrapper);
}
else {
final SamInputResource samResource = SamInputResource.of(samPath, wrapper);
Path indexPath = samIndices.get(samCount);
samResource.index(indexPath, indexWrapper);
reader = samReaderFactory.open(samResource);
}
// Ensure that each file has an index
if ( ! reader.hasIndex() ) {
indicesAvailable = false;
}
readers.put(reader, null);
backingPaths.put(reader, samPath);
++samCount;
}
// Prepare a header merger only if we have multiple readers
headerMerger = samPaths.size() > 1 ? createHeaderMerger() : null;
}
示例3: loadReadsNonExistentReference
import org.broadinstitute.hellbender.utils.read.ReadConstants; //導入依賴的package包/類
@Test(expectedExceptions = UserException.MissingReference.class)
public void loadReadsNonExistentReference() {
doLoadReads(dir + "valid.cram",
GATKBaseTest.getSafeNonExistentFile("NonExistentReference.fasta").getAbsolutePath(),
ReadConstants.DEFAULT_READ_VALIDATION_STRINGENCY);
}
示例4: doLoadReadsTest
import org.broadinstitute.hellbender.utils.read.ReadConstants; //導入依賴的package包/類
private void doLoadReadsTest(String bam, String referencePath) {
doLoadReads(bam, referencePath, ReadConstants.DEFAULT_READ_VALIDATION_STRINGENCY);
}
示例5: readsSparkSourceUnknownHostTest
import org.broadinstitute.hellbender.utils.read.ReadConstants; //導入依賴的package包/類
@Test(expectedExceptions = UserException.class, expectedExceptionsMessageRegExp = ".*Failed to read bam header from hdfs://bogus/path.bam.*")
public void readsSparkSourceUnknownHostTest() {
JavaSparkContext ctx = SparkContextFactory.getTestSparkContext();
ReadsSparkSource readSource = new ReadsSparkSource(ctx, ReadConstants.DEFAULT_READ_VALIDATION_STRINGENCY);
readSource.getParallelReads("hdfs://bogus/path.bam", null);
}