当前位置: 首页>>代码示例>>Java>>正文


Java Broadcast.destroy方法代码示例

本文整理汇总了Java中org.apache.spark.broadcast.Broadcast.destroy方法的典型用法代码示例。如果您正苦于以下问题:Java Broadcast.destroy方法的具体用法?Java Broadcast.destroy怎么用?Java Broadcast.destroy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.spark.broadcast.Broadcast的用法示例。


在下文中一共展示了Broadcast.destroy方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
public static void main(String[] args) {
		
	
//	SparkConf conf = new SparkConf().setMaster("local").setAppName("BroadCasting");
//	JavaSparkContext jsc = new JavaSparkContext(conf);
//	
//	Broadcast<String> broadcastVar = jsc.broadcast("Hello Spark");
//	
	 SparkSession sparkSession = SparkSession.builder().master("local").appName("My App")
			 .config("spark.sql.warehouse.dir", "file:////C:/Users/sgulati/spark-warehouse").getOrCreate();
	 
	 Broadcast<String> broadcastVar= sparkSession.sparkContext().broadcast("Hello Spark",  scala.reflect.ClassTag$.MODULE$.apply(String.class));
	 System.out.println(broadcastVar.getValue());
	 
	 broadcastVar.unpersist();
	// broadcastVar.unpersist(true);
	 broadcastVar.destroy();
	
	}
 
开发者ID:PacktPublishing,项目名称:Apache-Spark-2x-for-Java-Developers,代码行数:20,代码来源:BroadcastVariable.java

示例2: find

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
protected boolean find(ColumnCombinationBitset columnCombination) {
    
    if(this.columnCombinationMap.isEmpty()){
        return false;
    }
    else{
        Broadcast<ColumnCombinationBitset> bCcb = Singleton.getSparkContext().broadcast(columnCombination);
        JavaRDD<ColumnCombinationBitset> check = this.columnCombinationMap.filter((ColumnCombinationBitset ccb) -> ccb.equals((Object) bCcb.value()));
        if(check.isEmpty()){
            bCcb.destroy();
            return false;
        }
        else{
            bCcb.destroy();
            return true;
        }
        
    }
}
 
开发者ID:mpoiitis,项目名称:DUCCspark,代码行数:20,代码来源:SimplePruningGraph.java

示例3: getNextParentColumnCombination

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
protected ColumnCombinationBitset getNextParentColumnCombination(ColumnCombinationBitset column) {
    
    Broadcast<ColumnCombinationBitset> bColumn = Singleton.getSparkContext().broadcast(column);
   
    //if minimal positives contain column return null
    if(!this.minimalPositives.filter((ColumnCombinationBitset ccb) -> ccb.equals(bColumn.value())).isEmpty()){
        return null;
    }
    List<ColumnCombinationBitset> supersets = column.getDirectSupersets(this.bitmaskForNonUniqueColumns);
    JavaRDD<ColumnCombinationBitset> supersetsRdd = Singleton.getSparkContext().parallelize(supersets);
    
    //destroy broadcast variable
    bColumn.destroy();
    
    return this.findUnprunedSet(supersetsRdd);
}
 
开发者ID:mpoiitis,项目名称:DUCCspark,代码行数:17,代码来源:SimpleGraphTraverser.java

示例4: getNextChildColumnCombination

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
protected ColumnCombinationBitset getNextChildColumnCombination(ColumnCombinationBitset column) {
    if (column.size() == 1) {
        return null;
    }
    Broadcast<ColumnCombinationBitset> bColumn = Singleton.getSparkContext().broadcast(column);
    
    //if maximal negatives contain column return null
    if(!this.maximalNegatives.filter((ColumnCombinationBitset ccb) -> ccb.equals(bColumn.value())).isEmpty()){
        return null;
    }
    
    List<ColumnCombinationBitset> subsets = column.getDirectSubsets();
    JavaRDD<ColumnCombinationBitset> subsetsRdd = Singleton.getSparkContext().parallelize(subsets);
    
    //destroy broadcast variable
    bColumn.destroy();
    
    return this.findUnprunedSet(subsetsRdd);
}
 
开发者ID:mpoiitis,项目名称:DUCCspark,代码行数:20,代码来源:SimpleGraphTraverser.java

示例5: isMinimalPositive

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
/**
 * check if given column combination is minimal unique
 */
protected boolean isMinimalPositive(ColumnCombinationBitset ccb){
    
    Broadcast<ColumnCombinationBitset> bCcb = Singleton.getSparkContext().broadcast(ccb);
    JavaRDD<ColumnCombinationBitset> rdd = this.minimalPositives.filter((ColumnCombinationBitset c) ->{
        return c.equals(bCcb.value());
    });
    
    bCcb.destroy();
    return !rdd.isEmpty();
}
 
开发者ID:mpoiitis,项目名称:DUCCspark,代码行数:14,代码来源:SimpleGraphTraverser.java

示例6: isMaximalNegative

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
/**
 * check if given column combination is maximal non unique
 */
protected boolean isMaximalNegative(ColumnCombinationBitset ccb){
    
    Broadcast<ColumnCombinationBitset> bCcb = Singleton.getSparkContext().broadcast(ccb);
    JavaRDD<ColumnCombinationBitset> rdd = this.maximalNegatives.filter((ColumnCombinationBitset c) ->{
        return c.equals(bCcb.value());
    });
    
    bCcb.destroy();
    return !rdd.isEmpty();
}
 
开发者ID:mpoiitis,项目名称:DUCCspark,代码行数:14,代码来源:SimpleGraphTraverser.java

示例7: cleanupBroadcastVariable

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
/**
 * This call destroys a broadcast variable at all executors and the driver.
 * Hence, it is intended to be used on rmvar only. Depending on the
 * ASYNCHRONOUS_VAR_DESTROY configuration, this is asynchronous or not.
 *
 * @param bvar broadcast variable
 */
public static void cleanupBroadcastVariable(Broadcast<?> bvar)
{
	//In comparison to 'unpersist' (which would only delete the broadcast
	//from the executors), this call also deletes related data from the driver.
	if( bvar.isValid() ) {
		bvar.destroy( !ASYNCHRONOUS_VAR_DESTROY );
	}
}
 
开发者ID:apache,项目名称:systemml,代码行数:16,代码来源:SparkExecutionContext.java

示例8: destroyBroadcast

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
/** Sometimes Spark has trouble destroying a broadcast variable, but we'd like the app to continue anyway. */
public static <T> void destroyBroadcast(final Broadcast<T> broadcast, final String whatBroadcast ) {
    try {
        broadcast.destroy();
    } catch ( final Exception e ) {
        logger.warn("Failed to destroy broadcast for " + whatBroadcast, e);
    }
}
 
开发者ID:broadinstitute,项目名称:gatk,代码行数:9,代码来源:SparkUtils.java

示例9: operate

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
@Override
    public INDArray operate(@Nonnull final INDArray W_tl) throws DimensionMismatchException {
        if (W_tl.rank() != 2 || W_tl.shape()[0] != numTargets || W_tl.shape()[1] != numLatents)
            throw new DimensionMismatchException(W_tl.length(), numTargets * numLatents);

        /* Z F W */
        final long startTimeZFW = System.nanoTime();
        final INDArray Z_F_W_tl = Nd4j.create(numTargets, numLatents);
        IntStream.range(0, numLatents).parallel().forEach(li ->
                Z_F_W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign(
                        F_tt.operate(W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li)))));
        Z_F_W_tl.assign(Nd4j.gemm(Z_F_W_tl, Z_ll, false, false));
        final long endTimeZFW = System.nanoTime();

        /* perform a broadcast hash join */
        final long startTimeQW = System.nanoTime();
        final Map<LinearlySpacedIndexBlock, INDArray> W_tl_map = CoverageModelSparkUtils.partitionINDArrayToMap(targetSpaceBlocks, W_tl);
        final Broadcast<Map<LinearlySpacedIndexBlock, INDArray>> W_tl_bc = ctx.broadcast(W_tl_map);
        final INDArray Q_W_tl = CoverageModelSparkUtils.assembleINDArrayBlocksFromRDD(
                computeRDD.mapValues(cb -> {
                    final INDArray W_tl_chunk = W_tl_bc.value().get(cb.getTargetSpaceBlock());
                    final INDArray Q_tll_chunk = cb.getINDArrayFromCache(CoverageModelEMComputeBlock.CoverageModelICGCacheNode.Q_tll);
                    final Collection<INDArray> W_Q_chunk = IntStream.range(0, cb.getTargetSpaceBlock().getNumElements()).parallel()
                            .mapToObj(ti -> Q_tll_chunk.get(NDArrayIndex.point(ti))
                                    .mmul(W_tl_chunk.get(NDArrayIndex.point(ti)).transpose()))
                            .collect(Collectors.toList());
                    return Nd4j.vstack(W_Q_chunk);
                }), 0);
        W_tl_bc.destroy();
//        final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> W_tl_RDD = CoverageModelSparkUtils.rddFromINDArray(W_tl,
//                targetSpaceBlocks, ctx, true);
//        final INDArray Q_W_tl = CoverageModelSparkUtils.assembleINDArrayBlocks(
//                computeRDD.join(W_tl_RDD).mapValues(p -> {
//                    final CoverageModelEMComputeBlock cb = p._1;
//                    final INDArray W_tl_chunk = p._2;
//                    final INDArray Q_tll_chunk = cb.getINDArrayFromCache("Q_tll");
//                    return Nd4j.vstack(IntStream.range(0, cb.getTargetSpaceBlock().getNumElements()).parallel()
//                            .mapToObj(ti -> Q_tll_chunk.get(NDArrayIndex.point(ti)).mmul(W_tl_chunk.get(NDArrayIndex.point(ti)).transpose()))
//                            .collect(Collectors.toList()));
//                }), false);
//        W_tl_RDD.unpersist();
        final long endTimeQW = System.nanoTime();

        logger.debug("Local [Z] [F] [W] timing: " + (endTimeZFW - startTimeZFW)/1000000 + " ms");
        logger.debug("Spark [Q] [W] timing: " + (endTimeQW - startTimeQW)/1000000 + " ms");

        return Q_W_tl.addi(Z_F_W_tl);
    }
 
开发者ID:broadinstitute,项目名称:gatk-protected,代码行数:49,代码来源:CoverageModelWLinearOperatorSpark.java

示例10: operate

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
@Override
    public INDArray operate(@Nonnull final INDArray W_tl)
            throws DimensionMismatchException {
        if (W_tl.rank() != 2 || W_tl.shape()[0] != numTargets || W_tl.shape()[1] != numLatents) {
            throw new DimensionMismatchException(W_tl.length(), numTargets * numLatents);
        }
        long startTimeRFFT = System.nanoTime();
        /* forward rfft */
        final INDArray W_kl = Nd4j.create(fftSize, numLatents);
        IntStream.range(0, numLatents).parallel().forEach(li ->
                W_kl.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign(
                        Nd4j.create(F_tt.getForwardFFT(W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li))),
                                new int[]{fftSize, 1})));
        long endTimeRFFT = System.nanoTime();

        /* apply the preconditioner in the Fourier space */
        long startTimePrecond = System.nanoTime();
        final Map<LinearlySpacedIndexBlock, INDArray> W_kl_map = CoverageModelSparkUtils.partitionINDArrayToMap(fourierSpaceBlocks, W_kl);
        final Broadcast<Map<LinearlySpacedIndexBlock, INDArray>> W_kl_bc = ctx.broadcast(W_kl_map);
        final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> preconditionedWRDD = linOpPairRDD
                .mapToPair(p -> {
                    final INDArray W_kl_chuck = W_kl_bc.value().get(p._1);
                    final INDArray linOp_chunk = p._2;
                    final int blockSize = linOp_chunk.shape()[0];
                    final List<INDArray> linOpWList = IntStream.range(0, blockSize).parallel()
                            .mapToObj(k -> CoverageModelEMWorkspaceMathUtils.linsolve(linOp_chunk.get(NDArrayIndex.point(k)),
                                    W_kl_chuck.get(NDArrayIndex.point(k))))
                            .collect(Collectors.toList());
                    return new Tuple2<>(p._1, Nd4j.vstack(linOpWList));
                });
        W_kl.assign(CoverageModelSparkUtils.assembleINDArrayBlocksFromRDD(preconditionedWRDD, 0));
        W_kl_bc.destroy();
//        final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> W_kl_RDD = CoverageModelSparkUtils.rddFromINDArray(W_kl,
//                fourierSpaceBlocks, ctx, true);
//        W_kl.assign(CoverageModelSparkUtils.assembleINDArrayBlocks(linOpPairRDD.join((W_kl_RDD))
//                .mapValues(p -> {
//                    final INDArray linOp = p._1;
//                    final INDArray W = p._2;
//                    final int blockSize = linOp.shape()[0];
//                    final List<INDArray> linOpWList = IntStream.range(0, blockSize).parallel().mapToObj(k ->
//                            CoverageModelEMWorkspaceMathUtils.linsolve(linOp.get(NDArrayIndex.point(k)),
//                                    W.get(NDArrayIndex.point(k))))
//                            .collect(Collectors.toList());
//                    return Nd4j.vstack(linOpWList);
//                }), false));
//        W_kl_RDD.unpersist();
        long endTimePrecond = System.nanoTime();

        /* irfft */
        long startTimeIRFFT = System.nanoTime();
        final INDArray res = Nd4j.create(numTargets, numLatents);
        IntStream.range(0, numLatents).parallel().forEach(li ->
                res.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign(
                        F_tt.getInverseFFT(W_kl.get(NDArrayIndex.all(), NDArrayIndex.point(li)))));
        long endTimeIRFFT = System.nanoTime();

        logger.debug("Local FFT timing: " + (endTimeRFFT - startTimeRFFT + endTimeIRFFT - startTimeIRFFT)/1000000 + " ms");
        logger.debug("Spark preconditioner application timing: " + (endTimePrecond - startTimePrecond)/1000000 + " ms");

        return res;
    }
 
开发者ID:broadinstitute,项目名称:gatk-protected,代码行数:62,代码来源:CoverageModelWPreconditionerSpark.java

示例11: popUdf

import org.apache.spark.broadcast.Broadcast; //导入方法依赖的package包/类
/**
 * Pops a BroadcastableValueSets from the user-defined function stack.
 *
 * @param spark the spark session
 * @return true if there is still a registered in_valuset UDF, false otherwise
 */
public static synchronized boolean popUdf(SparkSession spark) {

  if (valueSetStack.isEmpty()) {

    return false;

  } else {

    // Cleanup the previously broadcast valuesets
    Broadcast<BroadcastableValueSets> old = valueSetStack.pop();

    old.destroy();

    if (valueSetStack.isEmpty()) {

      return false;

    } else {

      // Re-apply the previous function.
      Broadcast<BroadcastableValueSets> current = valueSetStack.peek();

      spark.udf()
          .register("in_valueset",
              new InValuesetUdf(current),
              DataTypes.BooleanType);

      return true;
    }
  }
}
 
开发者ID:cerner,项目名称:bunsen,代码行数:38,代码来源:ValueSetUdfs.java


注:本文中的org.apache.spark.broadcast.Broadcast.destroy方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。