本文整理汇总了Java中org.apache.spark.broadcast.Broadcast.value方法的典型用法代码示例。如果您正苦于以下问题:Java Broadcast.value方法的具体用法?Java Broadcast.value怎么用?Java Broadcast.value使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.spark.broadcast.Broadcast
的用法示例。
在下文中一共展示了Broadcast.value方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: sliceOperations
import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
public T sliceOperations(long rl, long ru, long cl, long cu, T block)
throws DMLRuntimeException
{
T ret = null;
for( Broadcast<PartitionedBlock<T>> bc : _pbc ) {
PartitionedBlock<T> pm = bc.value();
T tmp = pm.sliceOperations(rl, ru, cl, cu, block);
if( ret != null )
ret.merge(tmp, false);
else
ret = tmp;
}
return ret;
}
示例2: shardsToAssemblyRegions
import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
/**
* @return and RDD of {@link Tuple2<AssemblyRegion, SimpleInterval>} which pairs each AssemblyRegion with the
* interval it was generated in
*/
private static FlatMapFunction<Iterator<Shard<GATKRead>>, Tuple2<AssemblyRegion, SimpleInterval>> shardsToAssemblyRegions(
final Broadcast<ReferenceMultiSource> reference,
final Broadcast<HaplotypeCallerArgumentCollection> hcArgsBroadcast,
final ShardingArgumentCollection assemblyArgs,
final SAMFileHeader header,
final Broadcast<VariantAnnotatorEngine> annotatorEngineBroadcast) {
return shards -> {
final ReferenceMultiSource referenceMultiSource = reference.value();
final ReferenceMultiSourceAdapter referenceSource = new ReferenceMultiSourceAdapter(referenceMultiSource);
final HaplotypeCallerEngine hcEngine = new HaplotypeCallerEngine(hcArgsBroadcast.value(), false, false, header, referenceSource, annotatorEngineBroadcast.getValue());
final ReadsDownsampler readsDownsampler = assemblyArgs.maxReadsPerAlignmentStart > 0 ?
new PositionalDownsampler(assemblyArgs.maxReadsPerAlignmentStart, header) : null;
return Utils.stream(shards)
//TODO we've hacked multi interval shards here with a shim, but we should investigate as smarter approach https://github.com/broadinstitute/gatk/issues/4299
.map(shard -> new ShardToMultiIntervalShardAdapter<>(
new DownsampleableSparkReadShard(new ShardBoundary(shard.getInterval(), shard.getPaddedInterval()), shard, readsDownsampler)))
.flatMap(shardToRegion(assemblyArgs, header, referenceSource, hcEngine)).iterator();
};
}
示例3: callVariantsFromAssemblyRegions
import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
/**
* Call variants from Tuples of AssemblyRegion and Simple Interval
* The interval should be the non-padded shard boundary for the shard that the corresponding AssemblyRegion was
* created in, it's used to eliminate redundant variant calls at the edge of shard boundaries.
*/
private static FlatMapFunction<Iterator<Tuple2<AssemblyRegion, SimpleInterval>>, VariantContext> callVariantsFromAssemblyRegions(
final AuthHolder authHolder,
final SAMFileHeader header,
final Broadcast<ReferenceMultiSource> referenceBroadcast,
final Broadcast<HaplotypeCallerArgumentCollection> hcArgsBroadcast) {
return regionAndIntervals -> {
//HaplotypeCallerEngine isn't serializable but is expensive to instantiate, so construct and reuse one for every partition
final ReferenceMultiSourceAdapter referenceReader = new ReferenceMultiSourceAdapter(referenceBroadcast.getValue(), authHolder);
final HaplotypeCallerEngine hcEngine = new HaplotypeCallerEngine(hcArgsBroadcast.value(), header, referenceReader);
return iteratorToStream(regionAndIntervals).flatMap(regionToVariants(hcEngine)).iterator();
};
}
示例4: shardsToAssemblyRegions
import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
/**
* @return and RDD of {@link Tuple2<AssemblyRegion, SimpleInterval>} which pairs each AssemblyRegion with the
* interval it was generated in
*/
private static FlatMapFunction<Iterator<Shard<GATKRead>>, Tuple2<AssemblyRegion, SimpleInterval>> shardsToAssemblyRegions(
final AuthHolder authHolder,
final Broadcast<ReferenceMultiSource> reference,
final Broadcast<HaplotypeCallerArgumentCollection> hcArgsBroadcast,
final ShardingArgumentCollection assemblyArgs,
final SAMFileHeader header) {
return shards -> {
final ReferenceMultiSource referenceMultiSource = reference.value();
final ReferenceMultiSourceAdapter referenceSource = new ReferenceMultiSourceAdapter(referenceMultiSource, authHolder);
final HaplotypeCallerEngine hcEngine = new HaplotypeCallerEngine(hcArgsBroadcast.value(), header, referenceSource);
return iteratorToStream(shards).flatMap(shardToRegion(assemblyArgs, header, referenceSource, hcEngine)).iterator();
};
}
示例5: callVariantsFromAssemblyRegions
import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
/**
* Call variants from Tuples of AssemblyRegion and Simple Interval
* The interval should be the non-padded shard boundary for the shard that the corresponding AssemblyRegion was
* created in, it's used to eliminate redundant variant calls at the edge of shard boundaries.
*/
private static FlatMapFunction<Iterator<Tuple2<AssemblyRegion, SimpleInterval>>, VariantContext> callVariantsFromAssemblyRegions(
final SAMFileHeader header,
final Broadcast<ReferenceMultiSource> referenceBroadcast,
final Broadcast<HaplotypeCallerArgumentCollection> hcArgsBroadcast,
final Broadcast<VariantAnnotatorEngine> annotatorEngineBroadcast) {
return regionAndIntervals -> {
//HaplotypeCallerEngine isn't serializable but is expensive to instantiate, so construct and reuse one for every partition
final ReferenceMultiSource referenceMultiSource = referenceBroadcast.value();
final ReferenceMultiSourceAdapter referenceSource = new ReferenceMultiSourceAdapter(referenceMultiSource);
final HaplotypeCallerEngine hcEngine = new HaplotypeCallerEngine(hcArgsBroadcast.value(), false, false, header, referenceSource, annotatorEngineBroadcast.getValue());
return Utils.stream(regionAndIntervals).flatMap(regionToVariants(hcEngine)).iterator();
};
}
示例6: main
import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("Big Apple").setMaster("local");
JavaSparkContext sc = new JavaSparkContext(conf);
Broadcast<int[]> broadcastVar = sc.broadcast(new int[] {1, 2, 3});
broadcastVar.value();
Accumulator<Integer> accum = sc.accumulator(0);
sc.parallelize(Arrays.asList(1, 2, 3, 4)).foreach(x -> accum.add(x));
accum.value();
}