本文整理汇总了Java中com.hazelcast.jet.JetInstance.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Java JetInstance.shutdown方法的具体用法?Java JetInstance.shutdown怎么用?Java JetInstance.shutdown使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.hazelcast.jet.JetInstance
的用法示例。
在下文中一共展示了JetInstance.shutdown方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import com.hazelcast.jet.JetInstance; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("Usage: hdfs-to-map <name> <input path> <parallelism>");
return;
}
String name = args[0];
String inputPath = args[1];
int parallelism = Integer.parseInt(args[2]);
JetInstance client = Jet.newJetClient();
IStreamMap<Long, String> map = client.getMap(name);
map.clear();
try {
long begin = System.currentTimeMillis();
fillMap(client, name, inputPath, parallelism);
long elapsed = System.currentTimeMillis() - begin;
System.out.println("Time=" + elapsed);
} finally {
client.shutdown();
}
}
示例2: when_writeBufferedJobFailed_then_bufferDisposed
import com.hazelcast.jet.JetInstance; //导入方法依赖的package包/类
@Test
public void when_writeBufferedJobFailed_then_bufferDisposed() throws Exception {
JetInstance instance = createJetMember();
try {
DAG dag = new DAG();
Vertex source = dag.newVertex("source", StuckForeverSourceP::new);
Vertex sink = dag.newVertex("sink", getLoggingBufferedWriter()).localParallelism(1);
dag.edge(Edge.between(source, sink));
Job job = instance.newJob(dag);
// wait for the job to initialize
Thread.sleep(5000);
job.cancel();
assertTrueEventually(() -> assertTrue("No \"dispose\", only: " + events, events.contains("dispose")), 60);
System.out.println(events);
} finally {
instance.shutdown();
}
}
示例3: when_addAndRemoveNodeDuringExecution_then_completeSuccessfully
import com.hazelcast.jet.JetInstance; //导入方法依赖的package包/类
@Test
public void when_addAndRemoveNodeDuringExecution_then_completeSuccessfully() throws Throwable {
// Given
DAG dag = new DAG().vertex(new Vertex("test", new MockPS(StuckProcessor::new, nodeCount)));
// When
Job job = instances[0].newJob(dag);
StuckProcessor.executionStarted.await();
JetInstance instance = factory.newMember();
instance.shutdown();
StuckProcessor.proceedLatch.countDown();
job.join();
// Then
assertEquals(nodeCount, MockPS.initCount.get());
assertTrueEventually(() -> {
assertEquals(nodeCount, MockPS.completeCount.get());
assertThat(MockPS.completeErrors, empty());
});
}
示例4: when_shutdown_then_jobFuturesCanceled
import com.hazelcast.jet.JetInstance; //导入方法依赖的package包/类
@Test
public void when_shutdown_then_jobFuturesCanceled() throws Exception {
JetInstance jet = newInstance();
DAG dag = new DAG();
dag.newVertex("blocking", new CloseableProcessorSupplier(BlockingProcessor::new)).localParallelism(1);
jet.newJob(dag);
assertTrueEventually(() -> assertTrue(BlockingProcessor.hasStarted), 3);
jet.shutdown();
assertBlockingProcessorEventuallyNotRunning();
}
示例5: main
import com.hazelcast.jet.JetInstance; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
if (args.length != 9) {
System.err.println("Usage:");
System.err.println(" " + JetTradeMonitor.class.getSimpleName() +
" <bootstrap.servers> <topic> <offset-reset> <maxLagMs> <windowSizeMs> <slideByMs> <snapshotIntervalMs> <snapshotMode> <outputPath>");
System.err.println();
System.err.println("<snapshotMode> - \"exactly-once\" or \"at-least-once\"");
System.exit(1);
}
System.setProperty("hazelcast.logging.type", "log4j");
String brokerUri = args[0];
String topic = args[1];
String offsetReset = args[2];
int lagMs = Integer.parseInt(args[3]);
int windowSize = Integer.parseInt(args[4]);
int slideBy = Integer.parseInt(args[5]);
int snapshotInterval = Integer.parseInt(args[6]);
ProcessingGuarantee guarantee = ProcessingGuarantee.valueOf(args[7].toUpperCase().replace('-', '_'));
String outputPath = args[8];
Properties kafkaProps = getKafkaProperties(brokerUri, offsetReset);
WindowDefinition windowDef = slidingWindowDef(windowSize, slideBy);
AggregateOperation1<Object, LongAccumulator, Long> counting = AggregateOperations.counting();
DAG dag = new DAG();
Vertex readKafka = dag.newVertex("read-kafka", streamKafkaP(kafkaProps, topic))
.localParallelism(1);
Vertex extractTrade = dag.newVertex("extract-trade", mapP(entryValue()));
Vertex insertWm = dag.newVertex("insert-wm",
insertWatermarksP(Trade::getTime, withFixedLag(lagMs), emitByFrame(windowDef)));
Vertex accumulateByF = dag.newVertex("accumulate-by-frame",
accumulateByFrameP(Trade::getTicker, Trade::getTime, TimestampKind.EVENT, windowDef, counting));
Vertex slidingW = dag.newVertex("sliding-window", combineToSlidingWindowP(windowDef, counting));
Vertex formatOutput = dag.newVertex("format-output",
mapP((TimestampedEntry entry) -> {
long timeMs = currentTimeMillis();
long latencyMs = timeMs - entry.getTimestamp();
return Instant.ofEpochMilli(entry.getTimestamp()).atZone(ZoneId.systemDefault()).toLocalTime().toString()
+ "," + entry.getKey()
+ "," + entry.getValue()
+ "," + timeMs
+ "," + (latencyMs - lagMs);
}));
Vertex fileSink = dag.newVertex("write-file", writeFileP(outputPath))
.localParallelism(1);
dag
.edge(between(readKafka, extractTrade).isolated())
.edge(between(extractTrade, insertWm).isolated())
.edge(between(insertWm, accumulateByF).partitioned(Trade::getTicker, HASH_CODE))
.edge(between(accumulateByF, slidingW).partitioned(entryKey())
.distributed())
.edge(between(slidingW, formatOutput).isolated())
.edge(between(formatOutput, fileSink));
// Jet.newJetInstance();
JetInstance jet = JetBootstrap.getInstance();
System.out.println("Executing job..");
JobConfig config = new JobConfig();
config.setSnapshotIntervalMillis(snapshotInterval);
config.setProcessingGuarantee(guarantee);
Future<Void> future = jet.newJob(dag, config).getFuture();
System.in.read();
System.out.println("Cancelling job...");
future.cancel(true);
Thread.sleep(1000);
jet.shutdown();
}
示例6: main
import com.hazelcast.jet.JetInstance; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
JetInstance client = Jet.newJetClient();
String sourceMap = args[0];
String sinkMap = args[1];
DAG dag = new DAG();
Vertex producer = dag.newVertex("reader", readMapP(sourceMap)).localParallelism(3);
Vertex tokenizer = dag.newVertex("tokenizer",
flatMapP((Map.Entry<?, String> entry) -> {
StringTokenizer s = new StringTokenizer(entry.getValue());
return () -> s.hasMoreTokens() ? s.nextToken() : null;
})
);
// word -> (word, count)
Vertex accumulate = dag.newVertex("accumulate", accumulateByKeyP(wholeItem(), counting()));
// (word, count) -> (word, count)
Vertex combine = dag.newVertex("combine", combineByKeyP(counting()));
Vertex consumer = dag.newVertex("writer", writeMapP(sinkMap)).localParallelism(1);
dag.edge(between(producer, tokenizer))
.edge(between(tokenizer, accumulate)
.partitioned(wholeItem(), HASH_CODE))
.edge(between(accumulate, combine)
.distributed()
.partitioned(entryKey()))
.edge(between(combine, consumer));
JobConfig config = new JobConfig();
config.addClass(JetMapWordCount.class);
try {
long start = System.currentTimeMillis();
client.newJob(dag, config).join();
System.out.println("Time=" + (System.currentTimeMillis() - start));
} finally {
client.shutdown();
}
}
示例7: main
import com.hazelcast.jet.JetInstance; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
JetInstance client = Jet.newJetClient();
String inputPath = args[0];
String outputPath = args[1] + "_" + System.currentTimeMillis();
DAG dag = new DAG();
JobConf conf = new JobConf();
conf.setOutputFormat(TextOutputFormat.class);
conf.setInputFormat(TextInputFormat.class);
TextInputFormat.addInputPath(conf, new Path(inputPath));
TextOutputFormat.setOutputPath(conf, new Path(outputPath));
Vertex producer = dag.newVertex("reader", readHdfsP(conf,
(k, v) -> v.toString())).localParallelism(3);
Vertex tokenizer = dag.newVertex("tokenizer",
flatMapP((String line) -> {
StringTokenizer s = new StringTokenizer(line);
return () -> s.hasMoreTokens() ? s.nextToken() : null;
})
);
// word -> (word, count)
Vertex accumulate = dag.newVertex("accumulate", accumulateByKeyP(wholeItem(), counting()));
// (word, count) -> (word, count)
Vertex combine = dag.newVertex("combine", combineByKeyP(counting()));
Vertex consumer = dag.newVertex("writer", writeHdfsP(conf, entryKey(), entryValue())).localParallelism(1);
dag.edge(between(producer, tokenizer))
.edge(between(tokenizer, accumulate)
.partitioned(wholeItem(), HASH_CODE))
.edge(between(accumulate, combine)
.distributed()
.partitioned(entryKey()))
.edge(between(combine, consumer));
JobConfig config = new JobConfig();
config.addClass(JetWordCount.class);
try {
long start = System.currentTimeMillis();
client.newJob(dag, config).join();
System.out.println("Time=" + (System.currentTimeMillis() - start));
} finally {
client.shutdown();
}
}