本文整理汇总了Java中com.datatorrent.api.LocalMode.getDAG方法的典型用法代码示例。如果您正苦于以下问题:Java LocalMode.getDAG方法的具体用法?Java LocalMode.getDAG怎么用?Java LocalMode.getDAG使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.datatorrent.api.LocalMode
的用法示例。
在下文中一共展示了LocalMode.getDAG方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: runEmbedded
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
@Override
public void runEmbedded(boolean async, long duration, Callable<Boolean> exitCondition)
{
LocalMode lma = LocalMode.newInstance();
populateDag(lma.getDAG());
DAG dag = lma.getDAG();
LocalMode.Controller lc = lma.getController();
if (lc instanceof StramLocalCluster) {
((StramLocalCluster)lc).setExitCondition(exitCondition);
}
if (async) {
lc.runAsync();
} else {
if (duration >= 0) {
lc.run(duration);
} else {
lc.run();
}
}
}
示例2: testWindowDataManager
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
@Test
public void testWindowDataManager() throws Exception
{
// Create DAG for testing.
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
KinesisStringInputOperator inputOperator = dag.addOperator("KinesisInput", new KinesisStringInputOperator()
{
@Override
public void deactivate()
{
}
@Override
public void teardown()
{
}
});
testMeta.operator = inputOperator;
Assert.assertTrue("Default behaviour of WindowDataManager changed",
(inputOperator.getWindowDataManager() instanceof WindowDataManager.NoopWindowDataManager));
}
示例3: testMapOperator
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
@Test
public void testMapOperator() throws Exception
{
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
NumberGenerator numGen = dag.addOperator("numGen", new NumberGenerator());
FunctionOperator.MapFunctionOperator<Integer, Integer> mapper
= dag.addOperator("mapper", new FunctionOperator.MapFunctionOperator<Integer, Integer>(new Square()));
ResultCollector collector = dag.addOperator("collector", new ResultCollector());
dag.addStream("raw numbers", numGen.output, mapper.input);
dag.addStream("mapped results", mapper.output, collector.input);
// Create local cluster
LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(false);
((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
{
@Override
public Boolean call() throws Exception
{
return TupleCount == NumTuples;
}
});
lc.run(5000);
Assert.assertEquals(sum, 285);
}
示例4: testFlatMapOperator
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
@Test
public void testFlatMapOperator() throws Exception
{
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
NumberListGenerator numGen = dag.addOperator("numGen", new NumberListGenerator());
FunctionOperator.FlatMapFunctionOperator<List<Integer>, Integer> fm
= dag.addOperator("flatmap", new FunctionOperator.FlatMapFunctionOperator<>(new FmFunction()));
ResultCollector collector = dag.addOperator("collector", new ResultCollector());
dag.addStream("raw numbers", numGen.output, fm.input);
dag.addStream("flatmap results", fm.output, collector.input);
// Create local cluster
LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(false);
((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
{
@Override
public Boolean call() throws Exception
{
return TupleCount == 13;
}
});
lc.run(5000);
Assert.assertEquals(sum, 39555);
}
示例5: testFilterOperator
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
@Test
public void testFilterOperator() throws Exception
{
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
FunctionOperator.FilterFunctionOperator<Integer> filter0
= new FunctionOperator.FilterFunctionOperator<Integer>(new Function.FilterFunction<Integer>()
{
@Override
public boolean f(Integer in)
{
return in % divider == 0;
}
});
NumberGenerator numGen = dag.addOperator("numGen", new NumberGenerator());
FunctionOperator.FilterFunctionOperator<Integer> filter = dag.addOperator("filter", filter0);
ResultCollector collector = dag.addOperator("collector", new ResultCollector());
dag.addStream("raw numbers", numGen.output, filter.input);
dag.addStream("filtered results", filter.output, collector.input);
// Create local cluster
LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(false);
((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
{
@Override
public Boolean call() throws Exception
{
return TupleCount == NumTuples / divider;
}
});
lc.run(5000);
Assert.assertEquals(sum, 20);
}
示例6: testInputOperator
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
public void testInputOperator() throws Exception
{
testStore.connect();
testStore.put("test_abc", "789");
testStore.put("test_def", "456");
testStore.put("test_ghi", "123");
try {
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
@SuppressWarnings("unchecked")
InputOperator<S> inputOperator = dag.addOperator("input", new InputOperator<S>());
CollectorModule<Object> collector = dag.addOperator("collector", new CollectorModule<Object>());
inputOperator.addKey("test_abc");
inputOperator.addKey("test_def");
inputOperator.addKey("test_ghi");
inputOperator.setStore(operatorStore);
dag.addStream("stream", inputOperator.outputPort, collector.inputPort);
final LocalMode.Controller lc = lma.getController();
lc.run(3000);
lc.shutdown();
Assert.assertEquals("789", CollectorModule.resultMap.get("test_abc"));
Assert.assertEquals("456", CollectorModule.resultMap.get("test_def"));
Assert.assertEquals("123", CollectorModule.resultMap.get("test_ghi"));
} finally {
testStore.remove("test_abc");
testStore.remove("test_def");
testStore.remove("test_ghi");
testStore.disconnect();
}
}
示例7: ReadFromKafka
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
private List<Person> ReadFromKafka()
{
tupleCollection.clear();
// Create KafkaSinglePortStringInputOperator
Properties props = new Properties();
props.put(BOOTSTRAP_SERVERS_CONFIG, getClusterConfig());
props.put(KEY_DESERIALIZER_CLASS_CONFIG, KafkaSinglePortExactlyOnceOutputOperator.KEY_DESERIALIZER);
props.put(VALUE_DESERIALIZER_CLASS_CONFIG, VALUE_DESERIALIZER);
props.put(GROUP_ID_CONFIG, "KafkaTest");
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
// Create KafkaSinglePortStringInputOperator
KafkaSinglePortInputOperator node = dag.addOperator("Kafka input", KafkaSinglePortInputOperator.class);
node.setConsumerProps(props);
node.setInitialPartitionCount(1);
// set topic
node.setTopics(testName);
node.setInitialOffset(AbstractKafkaInputOperator.InitialOffset.EARLIEST.name());
node.setClusters(getClusterConfig());
node.setStrategy("one_to_one");
// Create Test tuple collector
CollectorModule collector1 = dag.addOperator("collector", new CollectorModule());
// Connect ports
dag.addStream("Kafka message", node.outputPort, collector1.inputPort);
// Create local cluster
final LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(false);
lc.run(30000);
return tupleCollection;
}
示例8: testApplication
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
@Test
public void testApplication()
{
try {
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
XmlDataEmitterOperator input = dag.addOperator("data", new XmlDataEmitterOperator());
XmlParser parser = dag.addOperator("xmlparser", new XmlParser());
ResultCollector rc = dag.addOperator("rc", new ResultCollector());
dag.getMeta(parser).getMeta(parser.out).getAttributes().put(Context.PortContext.TUPLE_CLASS, org.apache.apex.malhar.lib.parser.XmlParserTest.EmployeeBean.class);
ConsoleOutputOperator xmlObjectOp = dag.addOperator("xmlObjectOp", new ConsoleOutputOperator());
xmlObjectOp.setDebug(true);
dag.addStream("input", input.output, parser.in);
dag.addStream("output", parser.parsedOutput, xmlObjectOp.input);
dag.addStream("pojo", parser.out,rc.input);
LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(false);
((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
{
@Override
public Boolean call() throws Exception
{
return TupleCount == 1;
}
});
lc.run(10000);// runs for 10 seconds and quits
Assert.assertEquals(1,TupleCount);
Assert.assertEquals("john", obj.getName());
} catch (ConstraintViolationException e) {
Assert.fail("constraint violations: " + e.getConstraintViolations());
}
}
示例9: testPartitionableInputOperator
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
public void testPartitionableInputOperator(KinesisConsumer consumer) throws Exception
{
// Set to 2 because we want to make sure END_TUPLE from both 2 partitions are received
latch = new CountDownLatch(2);
int totalCount = 100;
// Start producer
KinesisTestProducer p = new KinesisTestProducer(streamName, true);
p.setSendCount(totalCount);
new Thread(p).start();
// Create DAG for testing.
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
// Create KinesisSinglePortStringInputOperator
KinesisStringInputOperator node = dag.addOperator("Kinesis consumer", KinesisStringInputOperator.class);
node.setAccessKey(credentials.getCredentials().getAWSSecretKey());
node.setSecretKey(credentials.getCredentials().getAWSAccessKeyId());
node.setStreamName(streamName);
//set topic
consumer.setStreamName(streamName);
node.setConsumer(consumer);
// Create Test tuple collector
CollectorModule<String> collector = dag.addOperator("RecordsCollector", new CollectorModule<String>());
// Connect ports
dag.addStream("Kinesis stream", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);
// Create local cluster
final LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(false);
lc.runAsync();
//Wait 15s for consumer finish consuming all the records
latch.await(15000, TimeUnit.MILLISECONDS);
// Check results
Assert.assertEquals("Collections size", 1, collections.size());
Assert.assertEquals("Tuple count", totalCount, collections.get(collector.inputPort.id).size());
logger.debug(String.format("Number of emitted tuples: %d", collections.get(collector.inputPort.id).size()));
//p.close();
lc.shutdown();
}
示例10: testPartitionableInputOperator
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
public void testPartitionableInputOperator(KinesisConsumer consumer) throws Exception
{
// Set to 3 because we want to make sure all the tuples from both 2 partitions are received and offsets has been updated to 102
latch = new CountDownLatch(3);
// Start producer
KinesisTestProducer p = new KinesisTestProducer(streamName, true);
p.setSendCount(totalCount);
// wait the producer send all records
p.run();
// Create DAG for testing.
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
// Create KinesisSinglePortStringInputOperator
KinesisStringInputOperator node = dag.addOperator("Kinesis consumer", KinesisStringInputOperator.class);
node.setAccessKey(credentials.getCredentials().getAWSSecretKey());
node.setSecretKey(credentials.getCredentials().getAWSAccessKeyId());
node.setStreamName(streamName);
TestShardManager tfm = new TestShardManager();
tfm.setFilename(streamName + OFFSET_FILE);
node.setShardManager(tfm);
node.setStrategy(AbstractKinesisInputOperator.PartitionStrategy.MANY_TO_ONE.toString());
node.setRepartitionInterval(-1);
//set topic
consumer.setStreamName(streamName);
//set the brokerlist used to initialize the partition
consumer.setInitialOffset("earliest");
node.setConsumer(consumer);
// Create Test tuple collector
CollectorModule collector = dag.addOperator("RecordCollector", new CollectorModule());
// Connect ports
dag.addStream("Kinesis Records", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);
// Create local cluster
final LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(true);
lc.runAsync();
// Wait 15s for consumer finish consuming all the records
latch.await(15000, TimeUnit.MILLISECONDS);
// Check results
assertEquals("Tuple count", totalCount, collectedTuples.size());
logger.debug(String.format("Number of emitted tuples: %d -> %d", collectedTuples.size(), totalCount));
lc.shutdown();
}
示例11: testShardManager
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
@Test
public void testShardManager() throws Exception
{
// Set to 3 because we want to make sure all the tuples from both 2 partitions are received and offsets has been updated to 102
latch = new CountDownLatch(3);
// Start producer
KinesisTestProducer p = new KinesisTestProducer(streamName, true);
p.setSendCount(totalCount);
// wait the producer send all records
p.run();
// Create DAG for testing.
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
KinesisUtil.getInstance().setClient(client);
// Create KinesisSinglePortStringInputOperator
KinesisStringInputOperator node = dag.addOperator("Kinesis consumer", KinesisStringInputOperator.class);
node.setAccessKey(credentials.getCredentials().getAWSSecretKey());
node.setSecretKey(credentials.getCredentials().getAWSAccessKeyId());
node.setStreamName(streamName);
ShardManager tfm = new ShardManager();
node.setShardManager(tfm);
node.setStrategy(AbstractKinesisInputOperator.PartitionStrategy.MANY_TO_ONE.toString());
node.setRepartitionInterval(-1);
KinesisConsumer consumer = new KinesisConsumer();
//set topic
consumer.setStreamName(streamName);
//set the brokerlist used to initialize the partition
consumer.setInitialOffset("earliest");
node.setConsumer(consumer);
// Create Test tuple collector
CollectorModule collector = dag.addOperator("RecordCollector", new CollectorModule());
// Connect ports
dag.addStream("Kinesis Records", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);
// Create local cluster
final LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(true);
lc.runAsync();
// Wait 15s for consumer finish consuming all the records
latch.await(10000, TimeUnit.MILLISECONDS);
assertEquals("ShardPos Size", 2, node.getShardManager().loadInitialShardPositions().size());
Iterator ite = node.getShardManager().loadInitialShardPositions().entrySet().iterator();
Entry e = (Entry)ite.next();
assertNotEquals("Record Seq No in Shard Id 1", "", e.getValue());
e = (Entry)ite.next();
assertNotEquals("Record Seq No in Shard Id 2", "", e.getValue());
// Check results
assertEquals("Tuple count", totalCount, collectedTuples.size());
logger.debug(String.format("Number of emitted tuples: %d -> %d", collectedTuples.size(), totalCount));
lc.shutdown();
}
示例12: testKinesisInputOperator
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
/**
* Test AbstractKinesisSinglePortInputOperator (i.e. an input adapter for
* Kinesis, consumer). This module receives data from an outside test
* generator through Kinesis message bus and feed that data into Malhar
* streaming platform.
*
* [Generate message and send that to Kinesis message bus] ==> [Receive that
* message through Kinesis input adapter(i.e. consumer) and send using
* emitTuples() interface on output port during onMessage call]
*
*
* @throws Exception
*/
@Test
public void testKinesisInputOperator() throws Exception
{
int totalCount = 100;
// initial the latch for this test
latch = new CountDownLatch(1);
// Start producer
KinesisTestProducer p = new KinesisTestProducer(streamName);
p.setSendCount(totalCount);
p.setBatchSize(500);
new Thread(p).start();
// Create DAG for testing.
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
// Create KinesisSinglePortStringInputOperator
KinesisStringInputOperator node = dag.addOperator("Kinesis message consumer", KinesisStringInputOperator.class);
node.setAccessKey(credentials.getCredentials().getAWSSecretKey());
node.setSecretKey(credentials.getCredentials().getAWSAccessKeyId());
KinesisConsumer consumer = new KinesisConsumer();
consumer.setStreamName(streamName);
consumer.setRecordsLimit(totalCount);
node.setConsumer(consumer);
// Create Test tuple collector
CollectorModule<String> collector = dag.addOperator("TestMessageCollector", new CollectorModule<String>());
// Connect ports
dag.addStream("Kinesis message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);
// Create local cluster
final LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(false);
lc.runAsync();
// Wait 45s for consumer finish consuming all the messages
latch.await(45000, TimeUnit.MILLISECONDS);
// Check results
Assert.assertEquals("Collections size", 1, collections.size());
Assert.assertEquals("Tuple count", totalCount, collections.get(collector.inputPort.id).size());
logger.debug(String.format("Number of emitted tuples: %d", collections.get(collector.inputPort.id).size()));
lc.shutdown();
}
示例13: testKinesisByteArrayInputOperator
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
@Test
public void testKinesisByteArrayInputOperator() throws Exception
{
int totalCount = 10;
// initial the latch for this test
latch = new CountDownLatch(1);
// Start producer
KinesisTestProducer p = new KinesisTestProducer(streamName);
p.setSendCount(totalCount);
p.setBatchSize(9);
new Thread(p).start();
// Create DAG for testing.
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
// Create KinesisByteArrayInputOperator and set some properties with respect to consumer.
KinesisByteArrayInputOperator node = dag.addOperator("Kinesis message consumer", KinesisByteArrayInputOperator.class);
node.setAccessKey(credentials.getCredentials().getAWSSecretKey());
node.setSecretKey(credentials.getCredentials().getAWSAccessKeyId());
KinesisConsumer consumer = new KinesisConsumer();
consumer.setStreamName(streamName);
consumer.setRecordsLimit(totalCount);
node.setConsumer(consumer);
// Create Test tuple collector
CollectorModule<byte[]> collector = dag.addOperator("TestMessageCollector", new CollectorModule<byte[]>());
// Connect ports
dag.addStream("Kinesis message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);
// Create local cluster
final LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(false);
lc.runAsync();
// Wait 45s for consumer finish consuming all the messages
latch.await(45000, TimeUnit.MILLISECONDS);
// Check results
Assert.assertEquals("Collections size", 1, collections.size());
Assert.assertEquals("Tuple count", totalCount, collections.get(collector.inputPort.id).size());
logger.debug(String.format("Number of emitted tuples: %d", collections.get(collector.inputPort.id).size()));
lc.shutdown();
}
示例14: testIntputOperator
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
@Test
public void testIntputOperator() throws IOException
{
this.operatorStore = new RedisStore();
this.testStore = new RedisStore();
testStore.connect();
ScanParams params = new ScanParams();
params.count(1);
testStore.put("test_abc", "789");
testStore.put("test_def", "456");
testStore.put("test_ghi", "123");
try {
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
RedisKeyValueInputOperator inputOperator = dag.addOperator("input", new RedisKeyValueInputOperator());
final CollectorModule collector = dag.addOperator("collector", new CollectorModule());
inputOperator.setStore(operatorStore);
dag.addStream("stream", inputOperator.outputPort, collector.inputPort);
final LocalMode.Controller lc = lma.getController();
new Thread("LocalClusterController")
{
@Override
public void run()
{
long startTms = System.currentTimeMillis();
long timeout = 50000L;
try {
Thread.sleep(1000);
while (System.currentTimeMillis() - startTms < timeout) {
if (CollectorModule.resultMap.size() < 3) {
Thread.sleep(10);
} else {
break;
}
}
} catch (InterruptedException ex) {
//
}
lc.shutdown();
}
}.start();
lc.run();
Assert.assertTrue(CollectorModule.resultMap.contains(new KeyValPair<String, String>("test_abc", "789")));
Assert.assertTrue(CollectorModule.resultMap.contains(new KeyValPair<String, String>("test_def", "456")));
Assert.assertTrue(CollectorModule.resultMap.contains(new KeyValPair<String, String>("test_ghi", "123")));
} finally {
for (KeyValPair<String, String> entry : CollectorModule.resultMap) {
testStore.remove(entry.getKey());
}
testStore.disconnect();
}
}
示例15: runTest
import com.datatorrent.api.LocalMode; //导入方法依赖的package包/类
protected void runTest(int testNum) throws IOException
{
RabbitMQMessageReceiver receiver = new RabbitMQMessageReceiver();
receiver.setup();
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
SourceModule source = dag.addOperator("source", new SourceModule());
source.setTestNum(testNum);
RabbitMQOutputOperator collector = dag.addOperator("generator", new RabbitMQOutputOperator());
collector.setWindowDataManager(new FSWindowDataManager());
collector.setExchange("testEx");
dag.addStream("Stream", source.outPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);
final LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(false);
lc.runAsync();
try {
Thread.sleep(1000);
long timeout = 10000L;
long startTms = System.currentTimeMillis();
while ((receiver.count < testNum * 3) && (System.currentTimeMillis() - startTms < timeout)) {
Thread.sleep(100);
}
} catch (InterruptedException ex) {
Assert.fail(ex.getMessage());
} finally {
lc.shutdown();
}
Assert.assertEquals("emitted value for testNum was ", testNum * 3, receiver.count);
for (Map.Entry<String, Integer> e : receiver.dataMap.entrySet()) {
if (e.getKey().equals("a")) {
Assert.assertEquals("emitted value for 'a' was ", new Integer(2), e.getValue());
} else if (e.getKey().equals("b")) {
Assert.assertEquals("emitted value for 'b' was ", new Integer(20), e.getValue());
} else if (e.getKey().equals("c")) {
Assert.assertEquals("emitted value for 'c' was ", new Integer(1000), e.getValue());
}
}
}