本文整理汇总了Java中com.datatorrent.api.DAG类的典型用法代码示例。如果您正苦于以下问题:Java DAG类的具体用法?Java DAG怎么用?Java DAG使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DAG类属于com.datatorrent.api包,在下文中一共展示了DAG类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: populateDAG
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration configuration) {
LogLevelProperties props = new LogLevelProperties(configuration);
//dag.setAttribute(Context.DAGContext.STREAMING_WINDOW_SIZE_MILLIS, props.getWindowMillis());
// create the operator to receive data from NiFi
WindowDataManager inManager = new WindowDataManager.NoopWindowDataManager();
NiFiSinglePortInputOperator nifiInput = getNiFiInput(dag, props, inManager);
// create the operator to count log levels over a window
String attributName = props.getLogLevelAttribute();
LogLevelWindowCount count = dag.addOperator("count", new LogLevelWindowCount(attributName));
dag.setAttribute(count, Context.OperatorContext.APPLICATION_WINDOW_COUNT, props.getAppWindowCount());
// create the operator to send data back to NiFi
WindowDataManager outManager = new WindowDataManager.NoopWindowDataManager();
NiFiSinglePortOutputOperator nifiOutput = getNiFiOutput(dag, props, outManager);
// configure the dag to get nifi-in -> count -> nifi-out
dag.addStream("nifi-in-count", nifiInput.outputPort, count.input);
dag.addStream("count-nifi-out", count.output, nifiOutput.inputPort);
}
示例2: populateDAG
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
/*
* Define HDFS and S3 as input and output module operators respectively.
*/
FSInputModule inputModule = dag.addModule("HDFSInputModule", new FSInputModule());
S3OutputModule outputModule = dag.addModule("S3OutputModule", new S3OutputModule());
/*
* Create a stream for Metadata blocks from HDFS to S3 output modules.
* Note: DAG locality is set to CONTAINER_LOCAL for performance benefits by
* avoiding any serialization/deserialization of objects.
*/
dag.addStream("FileMetaData", inputModule.filesMetadataOutput, outputModule.filesMetadataInput);
dag.addStream("BlocksMetaData", inputModule.blocksMetadataOutput, outputModule.blocksMetadataInput)
.setLocality(DAG.Locality.CONTAINER_LOCAL);
/*
* Create a stream for Data blocks from HDFS to S3 output modules.
*/
dag.addStream("BlocksData", inputModule.messages, outputModule.blockData).setLocality(DAG.Locality.CONTAINER_LOCAL);
}
示例3: populateDAG
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
//Add S3 as input and redshift as output operators to DAG
S3RecordReaderModule inputModule = dag.addModule("S3Input", new S3RecordReaderModule());
setS3FilesToInput(inputModule, conf);
CsvParser csvParser = dag.addOperator("csvParser", CsvParser.class);
TransformOperator transform = dag.addOperator("transform", new TransformOperator());
Map<String, String> expMap = Maps.newHashMap();
expMap.put("name", "{$.name}.toUpperCase()");
transform.setExpressionMap(expMap);
CsvFormatter formatter = dag.addOperator("formatter", new CsvFormatter());
StringToByteArrayConverterOperator converterOp = dag.addOperator("converter", new StringToByteArrayConverterOperator());
RedshiftOutputModule redshiftOutput = dag.addModule("RedshiftOutput", new RedshiftOutputModule());
//Create streams
dag.addStream("data", inputModule.records, csvParser.in);
dag.addStream("pojo", csvParser.out, transform.input);
dag.addStream("transformed", transform.output, formatter.in);
dag.addStream("string", formatter.out, converterOp.input).setLocality(DAG.Locality.THREAD_LOCAL);
dag.addStream("writeToJDBC", converterOp.output, redshiftOutput.input);
}
示例4: populateDAG
import com.datatorrent.api.DAG; //导入依赖的package包/类
public void populateDAG(DAG dag, Configuration conf)
{
KafkaSinglePortInputOperator kafkaInputOperator = dag.addOperator("kafkaInput", KafkaSinglePortInputOperator.class);
JsonParser parser = dag.addOperator("parser", JsonParser.class);
TransformOperator transform = dag.addOperator("transform", new TransformOperator());
FilterOperator filterOperator = dag.addOperator("filter", new FilterOperator());
JsonFormatter formatter = dag.addOperator("formatter", JsonFormatter.class);
StringFileOutputOperator fileOutput = dag.addOperator("fileOutput", new StringFileOutputOperator());
dag.addStream("data", kafkaInputOperator.outputPort, parser.in);
dag.addStream("pojo", parser.out, filterOperator.input);
dag.addStream("filtered", filterOperator.truePort, transform.input);
dag.addStream("transformed", transform.output, formatter.in);
dag.addStream("string", formatter.out, fileOutput.input);
}
示例5: copyShallow
import com.datatorrent.api.DAG; //导入依赖的package包/类
private static void copyShallow(DAG from, DAG to) {
checkArgument(from.getClass() == to.getClass(), "must be same class %s %s",
from.getClass(), to.getClass());
Field[] fields = from.getClass().getDeclaredFields();
AccessibleObject.setAccessible(fields, true);
for (int i = 0; i < fields.length; i++) {
Field field = fields[i];
if (!java.lang.reflect.Modifier.isStatic(field.getModifiers())) {
try {
field.set(to, field.get(from));
} catch (IllegalArgumentException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
}
示例6: getApexLauncher
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Override
protected Launcher<?> getApexLauncher() {
return new Launcher<AppHandle>() {
@Override
public AppHandle launchApp(StreamingApplication application,
Configuration configuration, AttributeMap launchParameters)
throws org.apache.apex.api.Launcher.LauncherException {
EmbeddedAppLauncher<?> embeddedLauncher = Launcher.getLauncher(LaunchMode.EMBEDDED);
DAG dag = embeddedLauncher.getDAG();
application.populateDAG(dag, new Configuration(false));
String appName = dag.getValue(DAGContext.APPLICATION_NAME);
Assert.assertEquals("DummyApp", appName);
return new AppHandle() {
@Override
public boolean isFinished() {
return true;
}
@Override
public void shutdown(org.apache.apex.api.Launcher.ShutdownMode arg0) {
}
};
}
};
}
示例7: testParDoChaining
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Test
public void testParDoChaining() throws Exception {
Pipeline p = Pipeline.create();
long numElements = 1000;
PCollection<Long> input = p.apply(GenerateSequence.from(0).to(numElements));
PAssert.thatSingleton(input.apply("Count", Count.<Long>globally())).isEqualTo(numElements);
ApexPipelineOptions options = PipelineOptionsFactory.as(ApexPipelineOptions.class);
DAG dag = TestApexRunner.translate(p, options);
String[] expectedThreadLocal = { "/CreateActual/FilterActuals/Window.Assign" };
Set<String> actualThreadLocal = Sets.newHashSet();
for (DAG.StreamMeta sm : dag.getAllStreamsMeta()) {
DAG.OutputPortMeta opm = sm.getSource();
if (sm.getLocality() == Locality.THREAD_LOCAL) {
String name = opm.getOperatorMeta().getName();
String prefix = "PAssert$";
if (name.startsWith(prefix)) {
// remove indeterministic prefix
name = name.substring(prefix.length() + 1);
}
actualThreadLocal.add(name);
}
}
Assert.assertThat(actualThreadLocal, Matchers.hasItems(expectedThreadLocal));
}
示例8: getFilteredApacheAggregationCountOper
import com.datatorrent.api.DAG; //导入依赖的package包/类
private MultiWindowDimensionAggregation getFilteredApacheAggregationCountOper(String name, DAG dag)
{
MultiWindowDimensionAggregation oper = dag.addOperator(name, MultiWindowDimensionAggregation.class);
oper.setWindowSize(3);
List<int[]> dimensionArrayList = new ArrayList<int[]>();
int[] dimensionArray1 = {0};
int[] dimensionArray2 = {1};
dimensionArrayList.add(dimensionArray1);
dimensionArrayList.add(dimensionArray2);
oper.setDimensionArray(dimensionArrayList);
oper.setTimeBucket(TIME_BUCKETS.m.name());
oper.setDimensionKeyVal("0"); // aggregate on count
oper.setWindowSize(2); // 1 sec window
return oper;
}
示例9: populateDAG
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
// Setup the operator to get the data from twitter sample stream injected into the system.
TwitterSampleInput twitterFeed = new TwitterSampleInput();
twitterFeed = dag.addOperator("TweetSampler", twitterFeed);
// Setup a node to count the unique Hashtags within a window.
UniqueCounter<String> uniqueCounter = dag.addOperator("UniqueHashtagCounter", new UniqueCounter<String>());
// Get the aggregated Hashtag counts and count them over last 5 mins.
WindowedTopCounter<String> topCounts = dag.addOperator("TopCounter", new WindowedTopCounter<String>());
topCounts.setTopCount(10);
topCounts.setSlidingWindowWidth(600);
topCounts.setDagWindowWidth(1);
dag.addStream("TwittedHashtags", twitterFeed.hashtag, uniqueCounter.data).setLocality(locality);
// Count unique Hashtags
dag.addStream("UniqueHashtagCounts", uniqueCounter.count, topCounts.input);
TwitterTopCounterApplication.consoleOutput(dag, "topHashtags", topCounts.output, SNAPSHOT_SCHEMA, "hashtag");
}
示例10: populateDAG
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration configuration)
{
WordGenerator inputOperator = new WordGenerator();
KeyedWindowedOperatorImpl<String, Long, MutableLong, Long> windowedOperator = new KeyedWindowedOperatorImpl<>();
Accumulation<Long, MutableLong, Long> sum = new SumAccumulation();
windowedOperator.setAccumulation(sum);
windowedOperator.setDataStorage(new InMemoryWindowedKeyedStorage<String, MutableLong>());
windowedOperator.setRetractionStorage(new InMemoryWindowedKeyedStorage<String, Long>());
windowedOperator.setWindowStateStorage(new InMemoryWindowedStorage<WindowState>());
windowedOperator.setWindowOption(new WindowOption.TimeWindows(Duration.standardMinutes(1)));
windowedOperator.setTriggerOption(TriggerOption.AtWatermark().withEarlyFiringsAtEvery(Duration.millis(1000)).accumulatingAndRetractingFiredPanes());
//windowedOperator.setAllowedLateness(Duration.millis(14000));
ConsoleOutputOperator outputOperator = new ConsoleOutputOperator();
dag.addOperator("inputOperator", inputOperator);
dag.addOperator("windowedOperator", windowedOperator);
dag.addOperator("outputOperator", outputOperator);
dag.addStream("input_windowed", inputOperator.output, windowedOperator.input);
dag.addStream("windowed_output", windowedOperator.output, outputOperator.input);
}
示例11: buildDataset
import com.datatorrent.api.DAG; //导入依赖的package包/类
public void buildDataset()
{
hashMapping1[0] = "prop1:t1.col1:INT";
hashMapping1[1] = "prop3:t1.col3:STRING";
hashMapping1[2] = "prop2:t1.col2:DATE";
hashMapping1[3] = "prop4:t2.col1:STRING";
hashMapping1[4] = "prop5:t2.col2:INT";
arrayMapping1[0] = "t1.col1:INT";
arrayMapping1[1] = "t1.col3:STRING";
arrayMapping1[2] = "t1.col2:DATE";
arrayMapping1[3] = "t2.col2:STRING";
arrayMapping1[4] = "t2.col1:INT";
attrmap.put(DAG.APPLICATION_ID, "myMongoDBOouputOperatorAppId");
}
示例12: populateDAG
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration entries)
{
/* Generate random key-value pairs */
RandomDataGenerator randGen = dag.addOperator("randomgen", new RandomDataGenerator());
/* Initialize with three partition to start with */
UniqueCounter<KeyValPair<String, Object>> uniqCount =
dag.addOperator("uniqevalue", new UniqueCounter<KeyValPair<String, Object>>());
MapToKeyHashValuePairConverter<KeyValPair<String, Object>, Integer> converter = dag.addOperator("converter", new MapToKeyHashValuePairConverter());
uniqCount.setCumulative(false);
dag.setAttribute(randGen, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<UniqueCounter<KeyValPair<String, Object>>>(3));
ConsoleOutputOperator output = dag.addOperator("output", new ConsoleOutputOperator());
dag.addStream("datain", randGen.outPort, uniqCount.data);
dag.addStream("convert", uniqCount.count, converter.input).setLocality(Locality.THREAD_LOCAL);
dag.addStream("consoutput", converter.output, output.input);
}
示例13: populateDAG
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
CustomRandomEventGenerator randomEventGenerator = dag.addOperator(
"randomEventGenerator", new CustomRandomEventGenerator());
randomEventGenerator.setMaxCountOfWindows(MAX_WINDOW_COUNT);
randomEventGenerator.setTuplesBlastIntervalMillis(TUPLE_BLAST_MILLIS);
randomEventGenerator.setTuplesBlast(TUPLE_BLAST);
LOG.debug("Before making output operator");
MemsqlPOJOOutputOperator memsqlOutputOperator = dag.addOperator("memsqlOutputOperator",
new MemsqlPOJOOutputOperator());
LOG.debug("After making output operator");
memsqlOutputOperator.setBatchSize(DEFAULT_BATCH_SIZE);
dag.addStream("memsqlConnector",
randomEventGenerator.integer_data,
memsqlOutputOperator.input);
}
示例14: populateDAG
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
// Create operators for each step
// settings are applied by the platform using the config file.
KafkaSinglePortStringInputOperator kafkaInput = dag.addOperator("kafkaInput", new KafkaSinglePortStringInputOperator());
DeserializeJSON deserializeJSON = dag.addOperator("deserialize", new DeserializeJSON());
FilterTuples filterTuples = dag.addOperator("filterTuples", new FilterTuples());
FilterFields filterFields = dag.addOperator("filterFields", new FilterFields());
RedisJoin redisJoin = dag.addOperator("redisJoin", new RedisJoin());
CampaignProcessor campaignProcessor = dag.addOperator("campaignProcessor", new CampaignProcessor());
// Connect the Ports in the Operators
dag.addStream("deserialize", kafkaInput.outputPort, deserializeJSON.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
dag.addStream("filterTuples", deserializeJSON.output, filterTuples.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
dag.addStream("filterFields", filterTuples.output, filterFields.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
dag.addStream("redisJoin", filterFields.output, redisJoin.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
dag.addStream("output", redisJoin.output, campaignProcessor.input);
dag.setInputPortAttribute(deserializeJSON.input, Context.PortContext.PARTITION_PARALLEL, true);
dag.setInputPortAttribute(filterTuples.input, Context.PortContext.PARTITION_PARALLEL, true);
dag.setInputPortAttribute(filterFields.input, Context.PortContext.PARTITION_PARALLEL, true);
dag.setInputPortAttribute(redisJoin.input, Context.PortContext.PARTITION_PARALLEL, true);
}
示例15: testJdbcInputOperator
import com.datatorrent.api.DAG; //导入依赖的package包/类
@Test
public void testJdbcInputOperator()
{
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
attributeMap.put(DAG.APPLICATION_ID, APP_ID);
OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap);
TestInputOperator inputOperator = new TestInputOperator();
inputOperator.setStore(store);
insertEventsInTable(10);
CollectorTestSink<Object> sink = new CollectorTestSink<>();
inputOperator.outputPort.setSink(sink);
inputOperator.setup(context);
inputOperator.beginWindow(0);
inputOperator.emitTuples();
inputOperator.endWindow();
Assert.assertEquals("rows from db", 10, sink.collectedTuples.size());
}