本文整理汇总了Java中reactor.core.scheduler.Scheduler.dispose方法的典型用法代码示例。如果您正苦于以下问题:Java Scheduler.dispose方法的具体用法?Java Scheduler.dispose怎么用?Java Scheduler.dispose使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类reactor.core.scheduler.Scheduler
的用法示例。
在下文中一共展示了Scheduler.dispose方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: sampleZipTest3
import reactor.core.scheduler.Scheduler; //导入方法依赖的package包/类
@Test
public void sampleZipTest3() throws Exception {
int elements = 1;
CountDownLatch latch = new CountDownLatch(elements + 1);
EmitterProcessor<SensorData> sensorDataProcessor = EmitterProcessor.create();
Scheduler scheduler = Schedulers.single();
sensorDataProcessor.publishOn(scheduler)
.subscribe(d -> latch.countDown(), null, latch::countDown);
Flux.zip(Flux.just(new SensorData(2L, 12.0f)), Flux.just(new SensorData(1L, 14.0f)), this::computeMin)
.log("zip3")
.subscribe(sensorDataProcessor);
awaitLatch(null, latch);
scheduler.dispose();
}
示例2: testSubmitSession
import reactor.core.scheduler.Scheduler; //导入方法依赖的package包/类
@Test
public void testSubmitSession() throws Exception {
FluxProcessor<Integer, Integer> processor = EmitterProcessor.create();
AtomicInteger count = new AtomicInteger();
CountDownLatch latch = new CountDownLatch(1);
Scheduler scheduler = Schedulers.parallel();
processor.publishOn(scheduler)
.delaySubscription(Duration.ofMillis(1000))
.limitRate(1)
.subscribe(d -> {
count.incrementAndGet();
latch.countDown();
});
FluxSink<Integer> session = processor.sink();
session.next(1);
//System.out.println(emission);
session.complete();
latch.await(5, TimeUnit.SECONDS);
Assert.assertTrue("latch : " + count, count.get() == 1);
scheduler.dispose();
}
示例3: consistentMultithreadingWithPartition
import reactor.core.scheduler.Scheduler; //导入方法依赖的package包/类
@Test
public void consistentMultithreadingWithPartition() throws InterruptedException {
Scheduler supplier1 = Schedulers.newParallel("groupByPool", 2);
Scheduler supplier2 = Schedulers.newParallel("partitionPool", 5);
CountDownLatch latch = new CountDownLatch(10);
/*Disposable c = */Flux.range(1, 10)
.groupBy(n -> n % 2 == 0)
.flatMap(stream -> stream.publishOn(supplier1)
.log("groupBy-" + stream.key()))
.parallel(5)
.runOn(supplier2)
.sequential()
.publishOn(asyncGroup)
.log("join")
.subscribe(t -> {
latch.countDown();
});
latch.await(30, TimeUnit.SECONDS);
assertThat("Not totally dispatched: " + latch.getCount(), latch.getCount() == 0);
supplier1.dispose();
supplier2.dispose();
}
示例4: onNextOnDisposedSchedulerThrows
import reactor.core.scheduler.Scheduler; //导入方法依赖的package包/类
@Test
public void onNextOnDisposedSchedulerThrows() {
Scheduler scheduler = Schedulers.newSingle("onNextOnDisposedSchedulerThrows");
scheduler.dispose();
Mono<String> source = Mono.just("foo").hide();
try {
StepVerifier.create(new MonoDelayElement<>(source, 2, TimeUnit.SECONDS, scheduler))
.expectSubscription()
.verifyComplete(); //complete not relevant
fail("expected exception here");
}
catch (Throwable e) {
Throwable t = Exceptions.unwrap(e);
assertThat(t).isEqualTo(e)
.isInstanceOf(RejectedExecutionException.class)
.hasMessage("Scheduler unavailable");
assertThat(e).satisfies(Exceptions::isBubbling);
}
}
示例5: whenProcessorIsStreamed
import reactor.core.scheduler.Scheduler; //导入方法依赖的package包/类
@Test
public void whenProcessorIsStreamed() {
// "When a processor is streamed"
// given: "a source composable and a async downstream"
ReplayProcessor<Integer> source = ReplayProcessor.create();
Scheduler scheduler = Schedulers.newParallel("test", 2);
try {
Mono<List<Integer>> res = source.subscribeOn(scheduler)
.delaySubscription(Duration.ofMillis(1L))
.log("streamed")
.map(it -> it * 2)
.buffer()
.publishNext();
res.subscribe();
// when: "the source accepts a value"
source.onNext(1);
source.onNext(2);
source.onNext(3);
source.onNext(4);
source.onComplete();
// then: "the res is passed on"
assertThat(res.block()).containsExactly(2, 4, 6, 8);
}
finally {
scheduler.dispose();
}
}
示例6: gh507
import reactor.core.scheduler.Scheduler; //导入方法依赖的package包/类
@Test
public void gh507() {
Scheduler s = Schedulers.newSingle("subscribe");
Scheduler s2 = Schedulers.newParallel("receive");
Flux.from((Publisher<String>) subscriber -> {
subscriber.onSubscribe(new Subscription() {
private int totalCount;
@Override
public void request(long n) {
for (int i = 0; i < n; i++) {
if (totalCount++ < 317) {
subscriber.onNext(String.valueOf(totalCount));
}
else {
subscriber.onComplete();
}
}
}
@Override
public void cancel() {
// do nothing
}
});
})
.subscribeOn(s)
.limitRate(10)
.doOnNext(d -> {
Mono.fromCallable(() -> d)
.subscribeOn(s2)
.block();
})
.blockLast();
s.dispose();
}
示例7: multiplexUsingDispatchersAndSplit
import reactor.core.scheduler.Scheduler; //导入方法依赖的package包/类
/**
* <pre>
* forkStream
* / \ < - - - int
* v v
* persistenceStream computationStream
* \ / < - - - List< String >
* v v
* joinStream < - - - String
* splitStream
* observedSplitStream
* </pre>
* @throws Exception for convenience
*/
@Test(timeout = TIMEOUT)
public void multiplexUsingDispatchersAndSplit() throws Exception {
final EmitterProcessor<Integer> forkEmitterProcessor = EmitterProcessor.create();
final EmitterProcessor<Integer> computationEmitterProcessor = EmitterProcessor.create(false);
Scheduler computation = Schedulers.newSingle("computation");
Scheduler persistence = Schedulers.newSingle("persistence");
Scheduler forkJoin = Schedulers.newParallel("forkJoin", 2);
final Flux<List<String>> computationStream =
computationEmitterProcessor.publishOn(computation)
.map(i -> {
final List<String> list = new ArrayList<>(i);
for (int j = 0; j < i; j++) {
list.add("i" + j);
}
return list;
})
.doOnNext(ls -> println("Computed: ", ls))
.log("computation");
final EmitterProcessor<Integer> persistenceEmitterProcessor = EmitterProcessor.create(false);
final Flux<List<String>> persistenceStream =
persistenceEmitterProcessor.publishOn(persistence)
.doOnNext(i -> println("Persisted: ", i))
.map(i -> Collections.singletonList("done" + i))
.log("persistence");
Flux<Integer> forkStream = forkEmitterProcessor.publishOn(forkJoin)
.log("fork");
forkStream.subscribe(computationEmitterProcessor);
forkStream.subscribe(persistenceEmitterProcessor);
final Flux<List<String>> joinStream = Flux.zip(computationStream, persistenceStream, (a, b) -> Arrays.asList(a, b))
.publishOn(forkJoin)
.map(listOfLists -> {
listOfLists.get(0)
.addAll(listOfLists.get(1));
return listOfLists.get(0);
})
.log("join");
final Semaphore doneSemaphore = new Semaphore(0);
final MonoProcessor<List<String>> listPromise = joinStream.flatMap(Flux::fromIterable)
.log("resultStream")
.collectList()
.doOnTerminate(doneSemaphore::release)
.toProcessor();
listPromise.subscribe();
forkEmitterProcessor.onNext(1);
forkEmitterProcessor.onNext(2);
forkEmitterProcessor.onNext(3);
forkEmitterProcessor.onComplete();
List<String> res = listPromise.block(Duration.ofSeconds(5));
assertEquals(Arrays.asList("i0", "done1", "i0", "i1", "done2", "i0", "i1", "i2", "done3"), res);
forkJoin.dispose();
persistence.dispose();
computation.dispose();
}
示例8: parallelModeFused
import reactor.core.scheduler.Scheduler; //导入方法依赖的package包/类
@Test
public void parallelModeFused() {
Hooks.onOperatorDebug();
Hooks.onEachOperator(p -> {
System.out.println(Scannable.from(p).operatorName());
return p;
});
Flux<Integer> source = Mono.just(1)
.flux()
.repeat(1000)
.publish()
.autoConnect();
int ncpu = Math.max(8,
Runtime.getRuntime()
.availableProcessors());
Scheduler scheduler = Schedulers.newParallel("test", ncpu);
try {
Flux<Integer> result = ParallelFlux.from(source, ncpu)
.runOn(scheduler)
.map(v -> v + 1)
.log("test", Level.INFO, true, SignalType.ON_SUBSCRIBE)
.sequential();
AssertSubscriber<Integer> ts = AssertSubscriber.create();
result.subscribe(ts);
ts.await(Duration.ofSeconds(10));
ts.assertSubscribed()
.assertValueCount(1000)
.assertComplete()
.assertNoError();
}
finally {
scheduler.dispose();
}
}