本文整理汇总了Java中reactor.core.scheduler.Schedulers.newParallel方法的典型用法代码示例。如果您正苦于以下问题:Java Schedulers.newParallel方法的具体用法?Java Schedulers.newParallel怎么用?Java Schedulers.newParallel使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类reactor.core.scheduler.Schedulers
的用法示例。
在下文中一共展示了Schedulers.newParallel方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testParallelWithJava8StreamsInput
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
/**
* https://gist.github.com/nithril/444d8373ce67f0a8b853 Contribution by Nicolas Labrot
* @throws InterruptedException on interrupt
*/
@Test
public void testParallelWithJava8StreamsInput() throws InterruptedException {
Scheduler supplier = Schedulers.newParallel("test-p", 2);
int max = ThreadLocalRandom.current()
.nextInt(100, 300);
CountDownLatch countDownLatch = new CountDownLatch(max);
Flux<Integer> worker = Flux.range(0, max)
.publishOn(asyncGroup);
worker.parallel(2)
.runOn(supplier)
.map(v -> v)
.subscribe(v -> countDownLatch.countDown());
countDownLatch.await(10, TimeUnit.SECONDS);
Assert.assertEquals(0, countDownLatch.getCount());
}
示例2: consistentMultithreadingWithPartition
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
@Test
public void consistentMultithreadingWithPartition() throws InterruptedException {
Scheduler supplier1 = Schedulers.newParallel("groupByPool", 2);
Scheduler supplier2 = Schedulers.newParallel("partitionPool", 5);
CountDownLatch latch = new CountDownLatch(10);
/*Disposable c = */Flux.range(1, 10)
.groupBy(n -> n % 2 == 0)
.flatMap(stream -> stream.publishOn(supplier1)
.log("groupBy-" + stream.key()))
.parallel(5)
.runOn(supplier2)
.sequential()
.publishOn(asyncGroup)
.log("join")
.subscribe(t -> {
latch.countDown();
});
latch.await(30, TimeUnit.SECONDS);
assertThat("Not totally dispatched: " + latch.getCount(), latch.getCount() == 0);
supplier1.dispose();
supplier2.dispose();
}
示例3: transformFlux
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
@Override
Flux<Integer> transformFlux(Flux<Integer> f) {
Flux<String> otherStream = Flux.just("test", "test2", "test3");
// System.out.println("Providing new downstream");
Scheduler asyncGroup = Schedulers.newParallel("flux-p-tck", 2);
BiFunction<Integer, String, Integer> combinator = (t1, t2) -> t1;
return f.publishOn(sharedGroup)
.parallel(2)
.groups()
.flatMap(stream -> stream.publishOn(asyncGroup)
.doOnNext(this::monitorThreadUse)
.scan((prev, next) -> next)
.map(integer -> -integer)
.filter(integer -> integer <= 0)
.map(integer -> -integer)
.bufferTimeout(batch, Duration.ofMillis(50))
.flatMap(Flux::fromIterable)
.flatMap(i -> Flux.zip(Flux.just(i), otherStream, combinator))
)
.publishOn(sharedGroup)
.doAfterTerminate(asyncGroup::dispose)
.doOnError(Throwable::printStackTrace);
}
示例4: createSchedGroupPub
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
@Override
public Flux<String> createSchedGroupPub() {
Scheduler subWorker = Schedulers.newSingle("sub-thread");
Scheduler parallelGrp = Schedulers.newParallel("pub-grp", 8);
Function<Employee, String> allCapsNames = (emp) -> emp.getFirstName().toUpperCase() + " " + emp.getLastName().toUpperCase();
Flux<String> grpFlux = Flux.fromIterable(employeeDaoImpl.getEmployees())
.publishOn(parallelGrp)
.flatMap((emp)->{
System.out.println("flatMap thread: " + Thread.currentThread().getName());
return Mono.just(emp).map(allCapsNames).subscribeOn(subWorker);
});
return grpFlux;
}
示例5: createSchedGroupSub
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
@Override
public Flux<String> createSchedGroupSub() {
Scheduler pubWorker = Schedulers.newSingle("pub-thread");
Scheduler parallelGrp = Schedulers.newParallel("sub-grp", 8);
Function<Employee, String> allCapsNames = (emp) -> emp.getFirstName().toUpperCase() + " " + emp.getLastName().toUpperCase();
Flux<String> strFlux = Flux.fromIterable(employeeDaoImpl.getEmployees())
.publishOn(pubWorker)
.flatMap((str)->{
System.out.println("flatMap thread: " + Thread.currentThread().getName());
return Mono.just(str).map(allCapsNames).subscribeOn(parallelGrp);
});
return strFlux;
}
示例6: whenProcessorIsStreamed
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
@Test
public void whenProcessorIsStreamed() {
// "When a processor is streamed"
// given: "a source composable and a async downstream"
ReplayProcessor<Integer> source = ReplayProcessor.create();
Scheduler scheduler = Schedulers.newParallel("test", 2);
try {
Mono<List<Integer>> res = source.subscribeOn(scheduler)
.delaySubscription(Duration.ofMillis(1L))
.log("streamed")
.map(it -> it * 2)
.buffer()
.publishNext();
res.subscribe();
// when: "the source accepts a value"
source.onNext(1);
source.onNext(2);
source.onNext(3);
source.onNext(4);
source.onComplete();
// then: "the res is passed on"
assertThat(res.block()).containsExactly(2, 4, 6, 8);
}
finally {
scheduler.dispose();
}
}
示例7: gh507
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
@Test
public void gh507() {
Scheduler s = Schedulers.newSingle("subscribe");
Scheduler s2 = Schedulers.newParallel("receive");
Flux.from((Publisher<String>) subscriber -> {
subscriber.onSubscribe(new Subscription() {
private int totalCount;
@Override
public void request(long n) {
for (int i = 0; i < n; i++) {
if (totalCount++ < 317) {
subscriber.onNext(String.valueOf(totalCount));
}
else {
subscriber.onComplete();
}
}
}
@Override
public void cancel() {
// do nothing
}
});
})
.subscribeOn(s)
.limitRate(10)
.doOnNext(d -> {
Mono.fromCallable(() -> d)
.subscribeOn(s2)
.block();
})
.blockLast();
s.dispose();
}
示例8: loadEnv
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
@BeforeClass
public static void loadEnv() {
ioGroup = Schedulers.newElastic("work");
asyncGroup = Schedulers.newParallel("parallel", 4);
}
示例9: multiplexUsingDispatchersAndSplit
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
/**
* <pre>
* forkStream
* / \ < - - - int
* v v
* persistenceStream computationStream
* \ / < - - - List< String >
* v v
* joinStream < - - - String
* splitStream
* observedSplitStream
* </pre>
* @throws Exception for convenience
*/
@Test(timeout = TIMEOUT)
public void multiplexUsingDispatchersAndSplit() throws Exception {
final EmitterProcessor<Integer> forkEmitterProcessor = EmitterProcessor.create();
final EmitterProcessor<Integer> computationEmitterProcessor = EmitterProcessor.create(false);
Scheduler computation = Schedulers.newSingle("computation");
Scheduler persistence = Schedulers.newSingle("persistence");
Scheduler forkJoin = Schedulers.newParallel("forkJoin", 2);
final Flux<List<String>> computationStream =
computationEmitterProcessor.publishOn(computation)
.map(i -> {
final List<String> list = new ArrayList<>(i);
for (int j = 0; j < i; j++) {
list.add("i" + j);
}
return list;
})
.doOnNext(ls -> println("Computed: ", ls))
.log("computation");
final EmitterProcessor<Integer> persistenceEmitterProcessor = EmitterProcessor.create(false);
final Flux<List<String>> persistenceStream =
persistenceEmitterProcessor.publishOn(persistence)
.doOnNext(i -> println("Persisted: ", i))
.map(i -> Collections.singletonList("done" + i))
.log("persistence");
Flux<Integer> forkStream = forkEmitterProcessor.publishOn(forkJoin)
.log("fork");
forkStream.subscribe(computationEmitterProcessor);
forkStream.subscribe(persistenceEmitterProcessor);
final Flux<List<String>> joinStream = Flux.zip(computationStream, persistenceStream, (a, b) -> Arrays.asList(a, b))
.publishOn(forkJoin)
.map(listOfLists -> {
listOfLists.get(0)
.addAll(listOfLists.get(1));
return listOfLists.get(0);
})
.log("join");
final Semaphore doneSemaphore = new Semaphore(0);
final MonoProcessor<List<String>> listPromise = joinStream.flatMap(Flux::fromIterable)
.log("resultStream")
.collectList()
.doOnTerminate(doneSemaphore::release)
.toProcessor();
listPromise.subscribe();
forkEmitterProcessor.onNext(1);
forkEmitterProcessor.onNext(2);
forkEmitterProcessor.onNext(3);
forkEmitterProcessor.onComplete();
List<String> res = listPromise.block(Duration.ofSeconds(5));
assertEquals(Arrays.asList("i0", "done1", "i0", "i1", "done2", "i0", "i1", "i2", "done3"), res);
forkJoin.dispose();
persistence.dispose();
computation.dispose();
}
示例10: init
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
@BeforeMethod
public void init() {
sharedGroup = Schedulers.newParallel("fluxion-tck", 2);
}
示例11: parallelModeFused
import reactor.core.scheduler.Schedulers; //导入方法依赖的package包/类
@Test
public void parallelModeFused() {
Hooks.onOperatorDebug();
Hooks.onEachOperator(p -> {
System.out.println(Scannable.from(p).operatorName());
return p;
});
Flux<Integer> source = Mono.just(1)
.flux()
.repeat(1000)
.publish()
.autoConnect();
int ncpu = Math.max(8,
Runtime.getRuntime()
.availableProcessors());
Scheduler scheduler = Schedulers.newParallel("test", ncpu);
try {
Flux<Integer> result = ParallelFlux.from(source, ncpu)
.runOn(scheduler)
.map(v -> v + 1)
.log("test", Level.INFO, true, SignalType.ON_SUBSCRIBE)
.sequential();
AssertSubscriber<Integer> ts = AssertSubscriber.create();
result.subscribe(ts);
ts.await(Duration.ofSeconds(10));
ts.assertSubscribed()
.assertValueCount(1000)
.assertComplete()
.assertNoError();
}
finally {
scheduler.dispose();
}
}