本文整理匯總了Java中org.apache.spark.broadcast.Broadcast.destroy方法的典型用法代碼示例。如果您正苦於以下問題:Java Broadcast.destroy方法的具體用法?Java Broadcast.destroy怎麽用?Java Broadcast.destroy使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.spark.broadcast.Broadcast
的用法示例。
在下文中一共展示了Broadcast.destroy方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
public static void main(String[] args) {
// SparkConf conf = new SparkConf().setMaster("local").setAppName("BroadCasting");
// JavaSparkContext jsc = new JavaSparkContext(conf);
//
// Broadcast<String> broadcastVar = jsc.broadcast("Hello Spark");
//
SparkSession sparkSession = SparkSession.builder().master("local").appName("My App")
.config("spark.sql.warehouse.dir", "file:////C:/Users/sgulati/spark-warehouse").getOrCreate();
Broadcast<String> broadcastVar= sparkSession.sparkContext().broadcast("Hello Spark", scala.reflect.ClassTag$.MODULE$.apply(String.class));
System.out.println(broadcastVar.getValue());
broadcastVar.unpersist();
// broadcastVar.unpersist(true);
broadcastVar.destroy();
}
示例2: find
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
protected boolean find(ColumnCombinationBitset columnCombination) {
if(this.columnCombinationMap.isEmpty()){
return false;
}
else{
Broadcast<ColumnCombinationBitset> bCcb = Singleton.getSparkContext().broadcast(columnCombination);
JavaRDD<ColumnCombinationBitset> check = this.columnCombinationMap.filter((ColumnCombinationBitset ccb) -> ccb.equals((Object) bCcb.value()));
if(check.isEmpty()){
bCcb.destroy();
return false;
}
else{
bCcb.destroy();
return true;
}
}
}
示例3: getNextParentColumnCombination
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
protected ColumnCombinationBitset getNextParentColumnCombination(ColumnCombinationBitset column) {
Broadcast<ColumnCombinationBitset> bColumn = Singleton.getSparkContext().broadcast(column);
//if minimal positives contain column return null
if(!this.minimalPositives.filter((ColumnCombinationBitset ccb) -> ccb.equals(bColumn.value())).isEmpty()){
return null;
}
List<ColumnCombinationBitset> supersets = column.getDirectSupersets(this.bitmaskForNonUniqueColumns);
JavaRDD<ColumnCombinationBitset> supersetsRdd = Singleton.getSparkContext().parallelize(supersets);
//destroy broadcast variable
bColumn.destroy();
return this.findUnprunedSet(supersetsRdd);
}
示例4: getNextChildColumnCombination
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
protected ColumnCombinationBitset getNextChildColumnCombination(ColumnCombinationBitset column) {
if (column.size() == 1) {
return null;
}
Broadcast<ColumnCombinationBitset> bColumn = Singleton.getSparkContext().broadcast(column);
//if maximal negatives contain column return null
if(!this.maximalNegatives.filter((ColumnCombinationBitset ccb) -> ccb.equals(bColumn.value())).isEmpty()){
return null;
}
List<ColumnCombinationBitset> subsets = column.getDirectSubsets();
JavaRDD<ColumnCombinationBitset> subsetsRdd = Singleton.getSparkContext().parallelize(subsets);
//destroy broadcast variable
bColumn.destroy();
return this.findUnprunedSet(subsetsRdd);
}
示例5: isMinimalPositive
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
/**
* check if given column combination is minimal unique
*/
protected boolean isMinimalPositive(ColumnCombinationBitset ccb){
Broadcast<ColumnCombinationBitset> bCcb = Singleton.getSparkContext().broadcast(ccb);
JavaRDD<ColumnCombinationBitset> rdd = this.minimalPositives.filter((ColumnCombinationBitset c) ->{
return c.equals(bCcb.value());
});
bCcb.destroy();
return !rdd.isEmpty();
}
示例6: isMaximalNegative
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
/**
* check if given column combination is maximal non unique
*/
protected boolean isMaximalNegative(ColumnCombinationBitset ccb){
Broadcast<ColumnCombinationBitset> bCcb = Singleton.getSparkContext().broadcast(ccb);
JavaRDD<ColumnCombinationBitset> rdd = this.maximalNegatives.filter((ColumnCombinationBitset c) ->{
return c.equals(bCcb.value());
});
bCcb.destroy();
return !rdd.isEmpty();
}
示例7: cleanupBroadcastVariable
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
/**
* This call destroys a broadcast variable at all executors and the driver.
* Hence, it is intended to be used on rmvar only. Depending on the
* ASYNCHRONOUS_VAR_DESTROY configuration, this is asynchronous or not.
*
* @param bvar broadcast variable
*/
public static void cleanupBroadcastVariable(Broadcast<?> bvar)
{
//In comparison to 'unpersist' (which would only delete the broadcast
//from the executors), this call also deletes related data from the driver.
if( bvar.isValid() ) {
bvar.destroy( !ASYNCHRONOUS_VAR_DESTROY );
}
}
示例8: destroyBroadcast
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
/** Sometimes Spark has trouble destroying a broadcast variable, but we'd like the app to continue anyway. */
public static <T> void destroyBroadcast(final Broadcast<T> broadcast, final String whatBroadcast ) {
try {
broadcast.destroy();
} catch ( final Exception e ) {
logger.warn("Failed to destroy broadcast for " + whatBroadcast, e);
}
}
示例9: operate
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
@Override
public INDArray operate(@Nonnull final INDArray W_tl) throws DimensionMismatchException {
if (W_tl.rank() != 2 || W_tl.shape()[0] != numTargets || W_tl.shape()[1] != numLatents)
throw new DimensionMismatchException(W_tl.length(), numTargets * numLatents);
/* Z F W */
final long startTimeZFW = System.nanoTime();
final INDArray Z_F_W_tl = Nd4j.create(numTargets, numLatents);
IntStream.range(0, numLatents).parallel().forEach(li ->
Z_F_W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign(
F_tt.operate(W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li)))));
Z_F_W_tl.assign(Nd4j.gemm(Z_F_W_tl, Z_ll, false, false));
final long endTimeZFW = System.nanoTime();
/* perform a broadcast hash join */
final long startTimeQW = System.nanoTime();
final Map<LinearlySpacedIndexBlock, INDArray> W_tl_map = CoverageModelSparkUtils.partitionINDArrayToMap(targetSpaceBlocks, W_tl);
final Broadcast<Map<LinearlySpacedIndexBlock, INDArray>> W_tl_bc = ctx.broadcast(W_tl_map);
final INDArray Q_W_tl = CoverageModelSparkUtils.assembleINDArrayBlocksFromRDD(
computeRDD.mapValues(cb -> {
final INDArray W_tl_chunk = W_tl_bc.value().get(cb.getTargetSpaceBlock());
final INDArray Q_tll_chunk = cb.getINDArrayFromCache(CoverageModelEMComputeBlock.CoverageModelICGCacheNode.Q_tll);
final Collection<INDArray> W_Q_chunk = IntStream.range(0, cb.getTargetSpaceBlock().getNumElements()).parallel()
.mapToObj(ti -> Q_tll_chunk.get(NDArrayIndex.point(ti))
.mmul(W_tl_chunk.get(NDArrayIndex.point(ti)).transpose()))
.collect(Collectors.toList());
return Nd4j.vstack(W_Q_chunk);
}), 0);
W_tl_bc.destroy();
// final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> W_tl_RDD = CoverageModelSparkUtils.rddFromINDArray(W_tl,
// targetSpaceBlocks, ctx, true);
// final INDArray Q_W_tl = CoverageModelSparkUtils.assembleINDArrayBlocks(
// computeRDD.join(W_tl_RDD).mapValues(p -> {
// final CoverageModelEMComputeBlock cb = p._1;
// final INDArray W_tl_chunk = p._2;
// final INDArray Q_tll_chunk = cb.getINDArrayFromCache("Q_tll");
// return Nd4j.vstack(IntStream.range(0, cb.getTargetSpaceBlock().getNumElements()).parallel()
// .mapToObj(ti -> Q_tll_chunk.get(NDArrayIndex.point(ti)).mmul(W_tl_chunk.get(NDArrayIndex.point(ti)).transpose()))
// .collect(Collectors.toList()));
// }), false);
// W_tl_RDD.unpersist();
final long endTimeQW = System.nanoTime();
logger.debug("Local [Z] [F] [W] timing: " + (endTimeZFW - startTimeZFW)/1000000 + " ms");
logger.debug("Spark [Q] [W] timing: " + (endTimeQW - startTimeQW)/1000000 + " ms");
return Q_W_tl.addi(Z_F_W_tl);
}
示例10: operate
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
@Override
public INDArray operate(@Nonnull final INDArray W_tl)
throws DimensionMismatchException {
if (W_tl.rank() != 2 || W_tl.shape()[0] != numTargets || W_tl.shape()[1] != numLatents) {
throw new DimensionMismatchException(W_tl.length(), numTargets * numLatents);
}
long startTimeRFFT = System.nanoTime();
/* forward rfft */
final INDArray W_kl = Nd4j.create(fftSize, numLatents);
IntStream.range(0, numLatents).parallel().forEach(li ->
W_kl.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign(
Nd4j.create(F_tt.getForwardFFT(W_tl.get(NDArrayIndex.all(), NDArrayIndex.point(li))),
new int[]{fftSize, 1})));
long endTimeRFFT = System.nanoTime();
/* apply the preconditioner in the Fourier space */
long startTimePrecond = System.nanoTime();
final Map<LinearlySpacedIndexBlock, INDArray> W_kl_map = CoverageModelSparkUtils.partitionINDArrayToMap(fourierSpaceBlocks, W_kl);
final Broadcast<Map<LinearlySpacedIndexBlock, INDArray>> W_kl_bc = ctx.broadcast(W_kl_map);
final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> preconditionedWRDD = linOpPairRDD
.mapToPair(p -> {
final INDArray W_kl_chuck = W_kl_bc.value().get(p._1);
final INDArray linOp_chunk = p._2;
final int blockSize = linOp_chunk.shape()[0];
final List<INDArray> linOpWList = IntStream.range(0, blockSize).parallel()
.mapToObj(k -> CoverageModelEMWorkspaceMathUtils.linsolve(linOp_chunk.get(NDArrayIndex.point(k)),
W_kl_chuck.get(NDArrayIndex.point(k))))
.collect(Collectors.toList());
return new Tuple2<>(p._1, Nd4j.vstack(linOpWList));
});
W_kl.assign(CoverageModelSparkUtils.assembleINDArrayBlocksFromRDD(preconditionedWRDD, 0));
W_kl_bc.destroy();
// final JavaPairRDD<LinearlySpacedIndexBlock, INDArray> W_kl_RDD = CoverageModelSparkUtils.rddFromINDArray(W_kl,
// fourierSpaceBlocks, ctx, true);
// W_kl.assign(CoverageModelSparkUtils.assembleINDArrayBlocks(linOpPairRDD.join((W_kl_RDD))
// .mapValues(p -> {
// final INDArray linOp = p._1;
// final INDArray W = p._2;
// final int blockSize = linOp.shape()[0];
// final List<INDArray> linOpWList = IntStream.range(0, blockSize).parallel().mapToObj(k ->
// CoverageModelEMWorkspaceMathUtils.linsolve(linOp.get(NDArrayIndex.point(k)),
// W.get(NDArrayIndex.point(k))))
// .collect(Collectors.toList());
// return Nd4j.vstack(linOpWList);
// }), false));
// W_kl_RDD.unpersist();
long endTimePrecond = System.nanoTime();
/* irfft */
long startTimeIRFFT = System.nanoTime();
final INDArray res = Nd4j.create(numTargets, numLatents);
IntStream.range(0, numLatents).parallel().forEach(li ->
res.get(NDArrayIndex.all(), NDArrayIndex.point(li)).assign(
F_tt.getInverseFFT(W_kl.get(NDArrayIndex.all(), NDArrayIndex.point(li)))));
long endTimeIRFFT = System.nanoTime();
logger.debug("Local FFT timing: " + (endTimeRFFT - startTimeRFFT + endTimeIRFFT - startTimeIRFFT)/1000000 + " ms");
logger.debug("Spark preconditioner application timing: " + (endTimePrecond - startTimePrecond)/1000000 + " ms");
return res;
}
示例11: popUdf
import org.apache.spark.broadcast.Broadcast; //導入方法依賴的package包/類
/**
* Pops a BroadcastableValueSets from the user-defined function stack.
*
* @param spark the spark session
* @return true if there is still a registered in_valuset UDF, false otherwise
*/
public static synchronized boolean popUdf(SparkSession spark) {
if (valueSetStack.isEmpty()) {
return false;
} else {
// Cleanup the previously broadcast valuesets
Broadcast<BroadcastableValueSets> old = valueSetStack.pop();
old.destroy();
if (valueSetStack.isEmpty()) {
return false;
} else {
// Re-apply the previous function.
Broadcast<BroadcastableValueSets> current = valueSetStack.peek();
spark.udf()
.register("in_valueset",
new InValuesetUdf(current),
DataTypes.BooleanType);
return true;
}
}
}