当前位置: 首页>>代码示例>>Java>>正文


Java DAG.addStream方法代码示例

本文整理汇总了Java中com.datatorrent.api.DAG.addStream方法的典型用法代码示例。如果您正苦于以下问题:Java DAG.addStream方法的具体用法?Java DAG.addStream怎么用?Java DAG.addStream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在com.datatorrent.api.DAG的用法示例。


在下文中一共展示了DAG.addStream方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration configuration) {
    LogLevelProperties props = new LogLevelProperties(configuration);

    //dag.setAttribute(Context.DAGContext.STREAMING_WINDOW_SIZE_MILLIS, props.getWindowMillis());

    // create the operator to receive data from NiFi
    WindowDataManager inManager = new WindowDataManager.NoopWindowDataManager();
    NiFiSinglePortInputOperator nifiInput = getNiFiInput(dag, props, inManager);

    // create the operator to count log levels over a window
    String attributName = props.getLogLevelAttribute();
    LogLevelWindowCount count = dag.addOperator("count", new LogLevelWindowCount(attributName));
    dag.setAttribute(count, Context.OperatorContext.APPLICATION_WINDOW_COUNT, props.getAppWindowCount());

    // create the operator to send data back to NiFi
    WindowDataManager outManager = new WindowDataManager.NoopWindowDataManager();
    NiFiSinglePortOutputOperator nifiOutput = getNiFiOutput(dag, props, outManager);

    // configure the dag to get nifi-in -> count -> nifi-out
    dag.addStream("nifi-in-count", nifiInput.outputPort, count.input);
    dag.addStream("count-nifi-out", count.output, nifiOutput.inputPort);
}
 
开发者ID:bbende,项目名称:nifi-streaming-examples,代码行数:24,代码来源:LogLevelApplication.java

示例2: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{

  /*
   * Define HDFS and S3 as input and output module operators respectively.
   */
  FSInputModule inputModule = dag.addModule("HDFSInputModule", new FSInputModule());
  S3OutputModule outputModule = dag.addModule("S3OutputModule", new S3OutputModule());

  /*
   * Create a stream for Metadata blocks from HDFS to S3 output modules.
   * Note: DAG locality is set to CONTAINER_LOCAL for performance benefits by
   * avoiding any serialization/deserialization of objects.
   */
  dag.addStream("FileMetaData", inputModule.filesMetadataOutput, outputModule.filesMetadataInput);
  dag.addStream("BlocksMetaData", inputModule.blocksMetadataOutput, outputModule.blocksMetadataInput)
          .setLocality(DAG.Locality.CONTAINER_LOCAL);

  /*
   * Create a stream for Data blocks from HDFS to S3 output modules.
   */
  dag.addStream("BlocksData", inputModule.messages, outputModule.blockData).setLocality(DAG.Locality.CONTAINER_LOCAL);
}
 
开发者ID:DataTorrent,项目名称:app-templates,代码行数:25,代码来源:Application.java

示例3: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  //Add S3 as input and redshift as output operators to DAG
  S3RecordReaderModule inputModule = dag.addModule("S3Input", new S3RecordReaderModule());
  setS3FilesToInput(inputModule, conf);

  CsvParser csvParser = dag.addOperator("csvParser", CsvParser.class);
  TransformOperator transform = dag.addOperator("transform", new TransformOperator());
  Map<String, String> expMap = Maps.newHashMap();
  expMap.put("name", "{$.name}.toUpperCase()");
  transform.setExpressionMap(expMap);
  CsvFormatter formatter = dag.addOperator("formatter", new CsvFormatter());
  StringToByteArrayConverterOperator converterOp = dag.addOperator("converter", new StringToByteArrayConverterOperator());
  RedshiftOutputModule redshiftOutput = dag.addModule("RedshiftOutput", new RedshiftOutputModule());

  //Create streams
  dag.addStream("data", inputModule.records, csvParser.in);
  dag.addStream("pojo", csvParser.out, transform.input);
  dag.addStream("transformed", transform.output, formatter.in);
  dag.addStream("string", formatter.out, converterOp.input).setLocality(DAG.Locality.THREAD_LOCAL);
  dag.addStream("writeToJDBC", converterOp.output, redshiftOutput.input);
}
 
开发者ID:DataTorrent,项目名称:app-templates,代码行数:24,代码来源:Application.java

示例4: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
public void populateDAG(DAG dag, Configuration conf)
{
  KafkaSinglePortInputOperator kafkaInputOperator = dag.addOperator("kafkaInput", KafkaSinglePortInputOperator.class);
  JsonParser parser = dag.addOperator("parser", JsonParser.class);
  TransformOperator transform = dag.addOperator("transform", new TransformOperator());
  FilterOperator filterOperator = dag.addOperator("filter", new FilterOperator());
  JsonFormatter formatter = dag.addOperator("formatter", JsonFormatter.class);

  StringFileOutputOperator fileOutput = dag.addOperator("fileOutput", new StringFileOutputOperator());
  
  dag.addStream("data", kafkaInputOperator.outputPort, parser.in);
  dag.addStream("pojo", parser.out, filterOperator.input);
  dag.addStream("filtered", filterOperator.truePort, transform.input);
  dag.addStream("transformed", transform.output, formatter.in);
  dag.addStream("string", formatter.out, fileOutput.input);
}
 
开发者ID:DataTorrent,项目名称:app-templates,代码行数:17,代码来源:Application.java

示例5: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  JsonGenerator generator = dag.addOperator("JsonGenerator", JsonGenerator.class);
  JsonParser jsonParser = dag.addOperator("jsonParser", JsonParser.class);

  CsvFormatter formatter = dag.addOperator("formatter", CsvFormatter.class);
  formatter.setSchema(SchemaUtils.jarResourceFileToString(filename));
  dag.setInputPortAttribute(formatter.in, PortContext.TUPLE_CLASS, PojoEvent.class);

  HDFSOutputOperator<String> hdfsOutput = dag.addOperator("HDFSOutputOperator", HDFSOutputOperator.class);
  hdfsOutput.setLineDelimiter("");

  dag.addStream("parserStream", generator.out, jsonParser.in);
  dag.addStream("formatterStream", jsonParser.out, formatter.in);
  dag.addStream("outputStream", formatter.out, hdfsOutput.input);

}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:19,代码来源:Application.java

示例6: populateInputDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public RelInfo populateInputDAG(DAG dag, JavaTypeFactory typeFactory)
{
  KafkaSinglePortInputOperator kafkaInput = dag.addOperator(OperatorUtils.getUniqueOperatorName("KafkaInput"),
      KafkaSinglePortInputOperator.class);
  kafkaInput.setTopics((String)operands.get(KAFKA_TOPICS));
  kafkaInput.setInitialOffset("EARLIEST");

  Properties props = new Properties();
  props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, operands.get(KAFKA_SERVERS));
  props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, KEY_DESERIALIZER);
  props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, VALUE_DESERIALIZER);
  kafkaInput.setConsumerProps(props);

  kafkaInput.setClusters((String)operands.get(KAFKA_SERVERS));

  RelInfo spec = messageFormat.populateInputDAG(dag, typeFactory);
  dag.addStream(OperatorUtils.getUniqueStreamName("Kafka", "Parser"), kafkaInput.outputPort,
      spec.getInputPorts().get(0));
  return new RelInfo("Input", Lists.<Operator.InputPort>newArrayList(), spec.getOperator(), spec.getOutPort(),
      messageFormat.getRowType(typeFactory));
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:23,代码来源:KafkaEndpoint.java

示例7: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  CustomRandomEventGenerator randomEventGenerator = dag.addOperator(
      "randomEventGenerator", new CustomRandomEventGenerator());
  randomEventGenerator.setMaxCountOfWindows(MAX_WINDOW_COUNT);
  randomEventGenerator.setTuplesBlastIntervalMillis(TUPLE_BLAST_MILLIS);
  randomEventGenerator.setTuplesBlast(TUPLE_BLAST);

  LOG.debug("Before making output operator");
  MemsqlPOJOOutputOperator memsqlOutputOperator = dag.addOperator("memsqlOutputOperator",
      new MemsqlPOJOOutputOperator());
  LOG.debug("After making output operator");

  memsqlOutputOperator.setBatchSize(DEFAULT_BATCH_SIZE);

  dag.addStream("memsqlConnector",
      randomEventGenerator.integer_data,
      memsqlOutputOperator.input);
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:21,代码来源:MemsqlOutputBenchmark.java

示例8: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration configuration)
{
  // create operators
  FileReader fileReader = dag.addOperator("FileReader", FileReader.class);
  CsvParser csvParser = dag.addOperator("CsvParser", CsvParser.class);
  JdbcPOJOInsertOutputOperator jdbcOutputOperator = dag.addOperator("JdbcOutput", JdbcPOJOInsertOutputOperator.class);

  // configure operators
  String pojoSchema = SchemaUtils.jarResourceFileToString("schema.json");
  csvParser.setSchema(pojoSchema);

  jdbcOutputOperator.setFieldInfos(addFieldInfos());
  JdbcTransactionalStore outputStore = new JdbcTransactionalStore();
  jdbcOutputOperator.setStore(outputStore);

  // add stream
  dag.addStream("Bytes", fileReader.byteOutput, csvParser.in);
  dag.addStream("POJOs", csvParser.out, jdbcOutputOperator.input);
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:21,代码来源:FileToJdbcCsvParser.java

示例9: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  TestGenerator gen = dag.addOperator("Generator", new TestGenerator());

  dedup = dag.addOperator("Deduper", new TestDeduper());
  dedup.setKeyExpression("id");
  dedup.setTimeExpression("eventTime.getTime()");
  dedup.setBucketSpan(60);
  dedup.setExpireBefore(600);

  ConsoleOutputOperator console = dag.addOperator("Console", new ConsoleOutputOperator());
  dag.addStream("Generator to Dedup", gen.output, dedup.input);
  dag.addStream("Dedup to Console", dedup.unique, console.input);
  dag.setInputPortAttribute(dedup.input, Context.PortContext.TUPLE_CLASS, TestEvent.class);
  dag.setOutputPortAttribute(dedup.unique, Context.PortContext.TUPLE_CLASS, TestEvent.class);
  dag.setAttribute(dedup, Context.OperatorContext.PARTITIONER,
      new StatelessPartitioner<TimeBasedDedupOperator>(NUM_DEDUP_PARTITIONS));
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:20,代码来源:DeduperPartitioningTest.java

示例10: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
public void populateDAG(DAG dag, Configuration conf)
{
  // This kafka input operator takes input from specified Kafka brokers.
  KafkaSinglePortInputOperator kafkaInputOperator = dag.addOperator("kafkaInput", KafkaSinglePortInputOperator.class);

  // Parses a json string tuple against a specified json schema and emits JSONObject.
  JsonParser jsonParser = dag.addOperator("JsonParser", new JsonParser());

  // Filters the tuple as per specified condition by user.
  FilterOperator filterOperator = dag.addOperator("filter", new FilterOperator());

  // Transforms the tuple value to user logic. Note logic may be modified.
  TransformOperator transform = dag.addOperator("transform", new TransformOperator());
  Map<String, String> expMap = Maps.newHashMap();
  expMap.put("name", "{$.name}.toUpperCase()");
  transform.setExpressionMap(expMap);

  // Writes the data or Pojo to Cassandra database.
  CassandraTransactionalStore transactionalStore = new CassandraTransactionalStore();
  CassandraPOJOOutputOperator cassandraOutputOperator = dag.addOperator("CassandraOutput", new CassandraPOJOOutputOperator());
  cassandraOutputOperator.setStore(transactionalStore);

  // Now create the streams to complete the dag or application logic.
  dag.addStream("KafkaToJsonParser", kafkaInputOperator.outputPort, jsonParser.in);
  dag.addStream("JsonParserToFilter", jsonParser.out, filterOperator.input);
  dag.addStream("FilterToTransform", filterOperator.truePort, transform.input);
  dag.addStream("TransformToCassandraDB", transform.output, cassandraOutputOperator.input);
}
 
开发者ID:DataTorrent,项目名称:app-templates,代码行数:29,代码来源:Application.java

示例11: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{

  S3InputModule inputModule = dag.addModule("S3InputModule", new S3InputModule());
  HDFSFileCopyModule outputModule = dag.addModule("HDFSFileCopyModule", new HDFSFileCopyModule());

  dag.addStream("FileMetaData", inputModule.filesMetadataOutput, outputModule.filesMetadataInput);
  dag.addStream("BlocksMetaData", inputModule.blocksMetadataOutput, outputModule.blocksMetadataInput)
      .setLocality(Locality.THREAD_LOCAL);
  dag.addStream("BlocksData", inputModule.messages, outputModule.blockData).setLocality(Locality.THREAD_LOCAL);
}
 
开发者ID:DataTorrent,项目名称:app-templates,代码行数:13,代码来源:Application.java

示例12: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  // Add FSInputModule as input and PartFileWriter as output operators to dag.
  FSInputModule input = dag.addModule("HDFSInputModule", new FSInputModule());
  PartFileWriter output = dag.addOperator("PartFileCopy", new PartFileWriter());

  dag.setInputPortAttribute(output.input, Context.PortContext.PARTITION_PARALLEL, true);
  dag.setInputPortAttribute(output.blockMetadataInput, Context.PortContext.PARTITION_PARALLEL, true);

  // Create a stream for blockData, fileMetadata, blockMetadata from Input to PartFileWriter
  dag.addStream("BlocksMetaData", input.blocksMetadataOutput, output.blockMetadataInput).setLocality(DAG.Locality.CONTAINER_LOCAL);
  dag.addStream("BlocksData", input.messages, output.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
  dag.addStream("FileMetaData", input.filesMetadataOutput, output.fileMetadataInput);
}
 
开发者ID:DataTorrent,项目名称:app-templates,代码行数:16,代码来源:Application.java

示例13: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
public void populateDAG(DAG dag, Configuration conf)
{
  // This kafka input operator takes input from specified Kafka brokers.
  KafkaSinglePortInputOperator kafkaInputOperator = dag.addOperator("kafkaInput", KafkaSinglePortInputOperator.class);

  // Parses a json string tuple against a specified json schema and emits JSONObject.
  JsonParser jsonParser = dag.addOperator("JsonParser", JsonParser.class);

  // Filters the tuple as per specified condition by user.
  FilterOperator filterOperator = dag.addOperator("filter", new FilterOperator());

  // Transforms the tuple value to user logic. Note logic may be modified.
  TransformOperator transform = dag.addOperator("transform", new TransformOperator());
  Map<String, String> expMap = Maps.newHashMap();
  expMap.put("name", "{$.name}.toUpperCase()");
  transform.setExpressionMap(expMap);

  // Format the transformed logic into JSON format.
  JsonFormatter jsonFormatter = dag.addOperator("JsonFormatter", JsonFormatter.class);

  // Publish the data to kafka consumers.
  KafkaSinglePortOutputOperator kafkaOutput = dag.addOperator("kafkaOutput", KafkaSinglePortOutputOperator.class);

  // Now create the streams to complete the dag or application logic.
  dag.addStream("KafkaToJsonParser", kafkaInputOperator.outputPort, jsonParser.in);
  dag.addStream("JsonParserToFilter", jsonParser.out, filterOperator.input);
  dag.addStream("FilterToTransform", filterOperator.truePort, transform.input);
  dag.addStream("TransformToJsonFormatter", transform.output, jsonFormatter.in);
  dag.addStream("JsonFormatterToKafka", jsonFormatter.out, kafkaOutput.inputPort);
}
 
开发者ID:DataTorrent,项目名称:app-templates,代码行数:31,代码来源:Application.java

示例14: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{

  FSInputModule inputModule = dag.addModule("HDFSInputModule", new FSInputModule());
  HDFSFileCopyModule outputModule = dag.addModule("HDFSFileCopyModule", new HDFSFileCopyModule());

  dag.addStream("FileMetaData", inputModule.filesMetadataOutput, outputModule.filesMetadataInput);
  dag.addStream("BlocksMetaData", inputModule.blocksMetadataOutput, outputModule.blocksMetadataInput)
      .setLocality(Locality.THREAD_LOCAL);
  dag.addStream("BlocksData", inputModule.messages, outputModule.blockData).setLocality(Locality.THREAD_LOCAL);
}
 
开发者ID:DataTorrent,项目名称:app-templates,代码行数:13,代码来源:Application.java

示例15: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
public void populateDAG(DAG dag, Configuration conf)
{
  FSRecordReaderModule recordReader = dag.addModule("lineInput", FSRecordReaderModule.class);
  S3BytesOutputModule s3StringOutputModule = dag.addModule("s3TupleOutput", S3BytesOutputModule.class);
  dag.addStream("data", recordReader.records, s3StringOutputModule.input);

}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:8,代码来源:Application.java


注:本文中的com.datatorrent.api.DAG.addStream方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。