當前位置: 首頁>>代碼示例>>Java>>正文


Java Broadcast.destroy方法代碼示例

本文整理匯總了Java中org.apache.spark.broadcast.Broadcast.destroy方法的典型用法代碼示例。如果您正苦於以下問題:Java Broadcast.destroy方法的具體用法?Java Broadcast.destroy怎麽用?Java Broadcast.destroy使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.spark.broadcast.Broadcast的用法示例。


在下文中一共展示了Broadcast.destroy方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
public static void main(String[] args) {
		
	
//	SparkConf conf = new SparkConf().setMaster("local").setAppName("BroadCasting");
//	JavaSparkContext jsc = new JavaSparkContext(conf);
//	
//	Broadcast<String> broadcastVar = jsc.broadcast("Hello Spark");
//	
	 SparkSession sparkSession = SparkSession.builder().master("local").appName("My App")
			 .config("spark.sql.warehouse.dir", "file:////C:/Users/sgulati/spark-warehouse").getOrCreate();
	 
	 Broadcast<String> broadcastVar= sparkSession.sparkContext().broadcast("Hello Spark",  scala.reflect.ClassTag$.MODULE$.apply(String.class));
	 System.out.println(broadcastVar.getValue());
	 
	 broadcastVar.unpersist();
	// broadcastVar.unpersist(true);
	 broadcastVar.destroy();
	
	}
 
開發者ID:PacktPublishing,項目名稱:Apache-Spark-2x-for-Java-Developers,代碼行數:20,代碼來源:BroadcastVariable.java

示例2: find

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
protected boolean find(ColumnCombinationBitset columnCombination) {
    
    if(this.columnCombinationMap.isEmpty()){
        return false;
    }
    else{
        Broadcast<ColumnCombinationBitset> bCcb = Singleton.getSparkContext().broadcast(columnCombination);
        JavaRDD<ColumnCombinationBitset> check = this.columnCombinationMap.filter((ColumnCombinationBitset ccb) -> ccb.equals((Object) bCcb.value()));
        if(check.isEmpty()){
            bCcb.destroy();
            return false;
        }
        else{
            bCcb.destroy();
            return true;
        }
        
    }
}
 
開發者ID:mpoiitis,項目名稱:DUCCspark,代碼行數:20,代碼來源:SimplePruningGraph.java

示例3: getNextParentColumnCombination

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
protected ColumnCombinationBitset getNextParentColumnCombination(ColumnCombinationBitset column) {
    
    Broadcast<ColumnCombinationBitset> bColumn = Singleton.getSparkContext().broadcast(column);
   
    //if minimal positives contain column return null
    if(!this.minimalPositives.filter((ColumnCombinationBitset ccb) -> ccb.equals(bColumn.value())).isEmpty()){
        return null;
    }
    List<ColumnCombinationBitset> supersets = column.getDirectSupersets(this.bitmaskForNonUniqueColumns);
    JavaRDD<ColumnCombinationBitset> supersetsRdd = Singleton.getSparkContext().parallelize(supersets);
    
    //destroy broadcast variable
    bColumn.destroy();
    
    return this.findUnprunedSet(supersetsRdd);
}
 
開發者ID:mpoiitis,項目名稱:DUCCspark,代碼行數:17,代碼來源:SimpleGraphTraverser.java

示例4: getNextChildColumnCombination

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
protected ColumnCombinationBitset getNextChildColumnCombination(ColumnCombinationBitset column) {
    if (column.size() == 1) {
        return null;
    }
    Broadcast<ColumnCombinationBitset> bColumn = Singleton.getSparkContext().broadcast(column);
    
    //if maximal negatives contain column return null
    if(!this.maximalNegatives.filter((ColumnCombinationBitset ccb) -> ccb.equals(bColumn.value())).isEmpty()){
        return null;
    }
    
    List<ColumnCombinationBitset> subsets = column.getDirectSubsets();
    JavaRDD<ColumnCombinationBitset> subsetsRdd = Singleton.getSparkContext().parallelize(subsets);
    
    //destroy broadcast variable
    bColumn.destroy();
    
    return this.findUnprunedSet(subsetsRdd);
}
 
開發者ID:mpoiitis,項目名稱:DUCCspark,代碼行數:20,代碼來源:SimpleGraphTraverser.java

示例5: isMinimalPositive

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
/**
 * check if given column combination is minimal unique
 */
protected boolean isMinimalPositive(ColumnCombinationBitset ccb){
    
    Broadcast<ColumnCombinationBitset> bCcb = Singleton.getSparkContext().broadcast(ccb);
    JavaRDD<ColumnCombinationBitset> rdd = this.minimalPositives.filter((ColumnCombinationBitset c) ->{
        return c.equals(bCcb.value());
    });
    
    bCcb.destroy();
    return !rdd.isEmpty();
}
 
開發者ID:mpoiitis,項目名稱:DUCCspark,代碼行數:14,代碼來源:SimpleGraphTraverser.java

示例6: isMaximalNegative

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
/**
 * check if given column combination is maximal non unique
 */
protected boolean isMaximalNegative(ColumnCombinationBitset ccb){
    
    Broadcast<ColumnCombinationBitset> bCcb = Singleton.getSparkContext().broadcast(ccb);
    JavaRDD<ColumnCombinationBitset> rdd = this.maximalNegatives.filter((ColumnCombinationBitset c) ->{
        return c.equals(bCcb.value());
    });
    
    bCcb.destroy();
    return !rdd.isEmpty();
}
 
開發者ID:mpoiitis,項目名稱:DUCCspark,代碼行數:14,代碼來源:SimpleGraphTraverser.java

示例7: cleanupBroadcastVariable

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
/**
 * This call destroys a broadcast variable at all executors and the driver.
 * Hence, it is intended to be used on rmvar only. Depending on the
 * ASYNCHRONOUS_VAR_DESTROY configuration, this is asynchronous or not.
 *
 * @param bvar broadcast variable
 */
public static void cleanupBroadcastVariable(Broadcast<?> bvar)
{
	//In comparison to 'unpersist' (which would only delete the broadcast
	//from the executors), this call also deletes related data from the driver.
	if( bvar.isValid() ) {
		bvar.destroy( !ASYNCHRONOUS_VAR_DESTROY );
	}
}
 
開發者ID:apache,項目名稱:systemml,代碼行數:16,代碼來源:SparkExecutionContext.java

示例8: destroyBroadcast

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
/** Sometimes Spark has trouble destroying a broadcast variable, but we'd like the app to continue anyway. */
public static <T> void destroyBroadcast(final Broadcast<T> broadcast, final String whatBroadcast ) {
    try {
        broadcast.destroy();
    } catch ( final Exception e ) {
        logger.warn("Failed to destroy broadcast for " + whatBroadcast, e);
    }
}
 
開發者ID:broadinstitute,項目名稱:gatk,代碼行數:9,代碼來源:SparkUtils.java

示例9: operate

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
@Override
    public INDArray operate(@Nonnull final INDArray W_tl) throws DimensionMismatchException {
        if (W_tl.rank() != 2 || W_tl.shape()[0] != numTargets || W_tl.shape()[1] != numLatents)
            throw new DimensionMismatchException(W_tl.length(), numTargets * numLatents);

        /* Z F W */
        final long startTimeZFW = System.nanoTime();
        final INDArray Z_F_W_tl = Nd4j.create(numTargets, numLatents);
        IntStream.range(0, numLatents).parallel().forEach(li ->
                Z_F_W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign(
                        F_tt.operate(W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li)))));
        Z_F_W_tl.assign(Nd4j.gemm(Z_F_W_tl, Z_ll, false, false));
        final long endTimeZFW = System.nanoTime();

        /* perform a broadcast hash join */
        final long startTimeQW = System.nanoTime();
        final Map<LinearlySpacedIndexBlock, INDArray> W_tl_map = CoverageModelSparkUtils.partitionINDArrayToMap(targetSpaceBlocks, W_tl);
        final Broadcast<Map<LinearlySpacedIndexBlock, INDArray>> W_tl_bc = ctx.broadcast(W_tl_map);
        final INDArray Q_W_tl = CoverageModelSparkUtils.assembleINDArrayBlocksFromRDD(
                computeRDD.mapValues(cb -> {
                    final INDArray W_tl_chunk = W_tl_bc.value().get(cb.getTargetSpaceBlock());
                    final INDArray Q_tll_chunk = cb.getINDArrayFromCache(CoverageModelEMComputeBlock.CoverageModelICGCacheNode.Q_tll);
                    final Collection<INDArray> W_Q_chunk = IntStream.range(0, cb.getTargetSpaceBlock().getNumElements()).parallel()
                            .mapToObj(ti -> Q_tll_chunk.get(NDArrayIndex.point(ti))
                                    .mmul(W_tl_chunk.get(NDArrayIndex.point(ti)).transpose()))
                            .collect(Collectors.toList());
                    return Nd4j.vstack(W_Q_chunk);
                }), 0);
        W_tl_bc.destroy();
//        final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> W_tl_RDD = CoverageModelSparkUtils.rddFromINDArray(W_tl,
//                targetSpaceBlocks, ctx, true);
//        final INDArray Q_W_tl = CoverageModelSparkUtils.assembleINDArrayBlocks(
//                computeRDD.join(W_tl_RDD).mapValues(p -> {
//                    final CoverageModelEMComputeBlock cb = p._1;
//                    final INDArray W_tl_chunk = p._2;
//                    final INDArray Q_tll_chunk = cb.getINDArrayFromCache("Q_tll");
//                    return Nd4j.vstack(IntStream.range(0, cb.getTargetSpaceBlock().getNumElements()).parallel()
//                            .mapToObj(ti -> Q_tll_chunk.get(NDArrayIndex.point(ti)).mmul(W_tl_chunk.get(NDArrayIndex.point(ti)).transpose()))
//                            .collect(Collectors.toList()));
//                }), false);
//        W_tl_RDD.unpersist();
        final long endTimeQW = System.nanoTime();

        logger.debug("Local [Z] [F] [W] timing: " + (endTimeZFW - startTimeZFW)/1000000 + " ms");
        logger.debug("Spark [Q] [W] timing: " + (endTimeQW - startTimeQW)/1000000 + " ms");

        return Q_W_tl.addi(Z_F_W_tl);
    }
 
開發者ID:broadinstitute,項目名稱:gatk-protected,代碼行數:49,代碼來源:CoverageModelWLinearOperatorSpark.java

示例10: operate

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
@Override
    public INDArray operate(@Nonnull final INDArray W_tl)
            throws DimensionMismatchException {
        if (W_tl.rank() != 2 || W_tl.shape()[0] != numTargets || W_tl.shape()[1] != numLatents) {
            throw new DimensionMismatchException(W_tl.length(), numTargets * numLatents);
        }
        long startTimeRFFT = System.nanoTime();
        /* forward rfft */
        final INDArray W_kl = Nd4j.create(fftSize, numLatents);
        IntStream.range(0, numLatents).parallel().forEach(li ->
                W_kl.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign(
                        Nd4j.create(F_tt.getForwardFFT(W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li))),
                                new int[]{fftSize, 1})));
        long endTimeRFFT = System.nanoTime();

        /* apply the preconditioner in the Fourier space */
        long startTimePrecond = System.nanoTime();
        final Map<LinearlySpacedIndexBlock, INDArray> W_kl_map = CoverageModelSparkUtils.partitionINDArrayToMap(fourierSpaceBlocks, W_kl);
        final Broadcast<Map<LinearlySpacedIndexBlock, INDArray>> W_kl_bc = ctx.broadcast(W_kl_map);
        final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> preconditionedWRDD = linOpPairRDD
                .mapToPair(p -> {
                    final INDArray W_kl_chuck = W_kl_bc.value().get(p._1);
                    final INDArray linOp_chunk = p._2;
                    final int blockSize = linOp_chunk.shape()[0];
                    final List<INDArray> linOpWList = IntStream.range(0, blockSize).parallel()
                            .mapToObj(k -> CoverageModelEMWorkspaceMathUtils.linsolve(linOp_chunk.get(NDArrayIndex.point(k)),
                                    W_kl_chuck.get(NDArrayIndex.point(k))))
                            .collect(Collectors.toList());
                    return new Tuple2<>(p._1, Nd4j.vstack(linOpWList));
                });
        W_kl.assign(CoverageModelSparkUtils.assembleINDArrayBlocksFromRDD(preconditionedWRDD, 0));
        W_kl_bc.destroy();
//        final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> W_kl_RDD = CoverageModelSparkUtils.rddFromINDArray(W_kl,
//                fourierSpaceBlocks, ctx, true);
//        W_kl.assign(CoverageModelSparkUtils.assembleINDArrayBlocks(linOpPairRDD.join((W_kl_RDD))
//                .mapValues(p -> {
//                    final INDArray linOp = p._1;
//                    final INDArray W = p._2;
//                    final int blockSize = linOp.shape()[0];
//                    final List<INDArray> linOpWList = IntStream.range(0, blockSize).parallel().mapToObj(k ->
//                            CoverageModelEMWorkspaceMathUtils.linsolve(linOp.get(NDArrayIndex.point(k)),
//                                    W.get(NDArrayIndex.point(k))))
//                            .collect(Collectors.toList());
//                    return Nd4j.vstack(linOpWList);
//                }), false));
//        W_kl_RDD.unpersist();
        long endTimePrecond = System.nanoTime();

        /* irfft */
        long startTimeIRFFT = System.nanoTime();
        final INDArray res = Nd4j.create(numTargets, numLatents);
        IntStream.range(0, numLatents).parallel().forEach(li ->
                res.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign(
                        F_tt.getInverseFFT(W_kl.get(NDArrayIndex.all(), NDArrayIndex.point(li)))));
        long endTimeIRFFT = System.nanoTime();

        logger.debug("Local FFT timing: " + (endTimeRFFT - startTimeRFFT + endTimeIRFFT - startTimeIRFFT)/1000000 + " ms");
        logger.debug("Spark preconditioner application timing: " + (endTimePrecond - startTimePrecond)/1000000 + " ms");

        return res;
    }
 
開發者ID:broadinstitute,項目名稱:gatk-protected,代碼行數:62,代碼來源:CoverageModelWPreconditionerSpark.java

示例11: popUdf

import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
/**
 * Pops a BroadcastableValueSets from the user-defined function stack.
 *
 * @param spark the spark session
 * @return true if there is still a registered in_valuset UDF, false otherwise
 */
public static synchronized boolean popUdf(SparkSession spark) {

  if (valueSetStack.isEmpty()) {

    return false;

  } else {

    // Cleanup the previously broadcast valuesets
    Broadcast<BroadcastableValueSets> old = valueSetStack.pop();

    old.destroy();

    if (valueSetStack.isEmpty()) {

      return false;

    } else {

      // Re-apply the previous function.
      Broadcast<BroadcastableValueSets> current = valueSetStack.peek();

      spark.udf()
          .register("in_valueset",
              new InValuesetUdf(current),
              DataTypes.BooleanType);

      return true;
    }
  }
}
 
開發者ID:cerner,項目名稱:bunsen,代碼行數:38,代碼來源:ValueSetUdfs.java


注:本文中的org.apache.spark.broadcast.Broadcast.destroy方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。