当前位置: 首页>>代码示例>>Java>>正文


Java DAG.setAttribute方法代码示例

本文整理汇总了Java中com.datatorrent.api.DAG.setAttribute方法的典型用法代码示例。如果您正苦于以下问题:Java DAG.setAttribute方法的具体用法?Java DAG.setAttribute怎么用?Java DAG.setAttribute使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在com.datatorrent.api.DAG的用法示例。


在下文中一共展示了DAG.setAttribute方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration configuration) {
    LogLevelProperties props = new LogLevelProperties(configuration);

    //dag.setAttribute(Context.DAGContext.STREAMING_WINDOW_SIZE_MILLIS, props.getWindowMillis());

    // create the operator to receive data from NiFi
    WindowDataManager inManager = new WindowDataManager.NoopWindowDataManager();
    NiFiSinglePortInputOperator nifiInput = getNiFiInput(dag, props, inManager);

    // create the operator to count log levels over a window
    String attributName = props.getLogLevelAttribute();
    LogLevelWindowCount count = dag.addOperator("count", new LogLevelWindowCount(attributName));
    dag.setAttribute(count, Context.OperatorContext.APPLICATION_WINDOW_COUNT, props.getAppWindowCount());

    // create the operator to send data back to NiFi
    WindowDataManager outManager = new WindowDataManager.NoopWindowDataManager();
    NiFiSinglePortOutputOperator nifiOutput = getNiFiOutput(dag, props, outManager);

    // configure the dag to get nifi-in -> count -> nifi-out
    dag.addStream("nifi-in-count", nifiInput.outputPort, count.input);
    dag.addStream("count-nifi-out", count.output, nifiOutput.inputPort);
}
 
开发者ID:bbende,项目名称:nifi-streaming-examples,代码行数:24,代码来源:LogLevelApplication.java

示例2: createStore

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
protected AppDataSingleSchemaDimensionStoreHDHT createStore(DAG dag, Configuration conf,  String eventSchema)
{
  AppDataSingleSchemaDimensionStoreHDHT store = dag.addOperator("Store", ProcessTimeAwareStore.class);
  store.setUpdateEnumValues(true);
  String basePath = Preconditions.checkNotNull(conf.get(PROP_STORE_PATH),
        "base path should be specified in the properties.xml");
  TFileImpl hdsFile = new TFileImpl.DTFileImpl();
  basePath += System.currentTimeMillis();
  hdsFile.setBasePath(basePath);

  store.setFileStore(hdsFile);
  dag.setAttribute(store, Context.OperatorContext.COUNTERS_AGGREGATOR,
      new BasicCounters.LongAggregator<MutableLong>());
  store.setConfigurationSchemaJSON(eventSchema);
  store.setPartitionCount(storePartitionCount);
  if(storePartitionCount > 1)
  {
    store.setPartitionCount(storePartitionCount);
    store.setQueryResultUnifier(new DimensionStoreHDHTNonEmptyQueryResultUnifier());
  }
  return store;
}
 
开发者ID:yahoo,项目名称:streaming-benchmarks,代码行数:23,代码来源:ApplicationDimensionComputation.java

示例3: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  deployCount = 0;
  setupTimes = 0;
  
  String eventSchema = SchemaUtils.jarResourceFileToString("FaultTolerantTestApp.json");

  TestPojoGenerator generator = new TestPojoGenerator();

  dag.addOperator("generator", generator);

  TestDimensionsPOJO dimensions = new TestDimensionsPOJO();
  dag.addOperator("dimensions", dimensions);
  dimensions.setConfigurationSchemaJSON(eventSchema);

  dag.setAttribute(dimensions, Context.OperatorContext.APPLICATION_WINDOW_COUNT, applicationWindowCount);
  dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, checkpointWindowCount);

  DevNull<Object> outputOperator = new DevNull<>();
  dag.addOperator("output", outputOperator);

  dag.addStream("dimensionsStream", generator.outputPort, dimensions.input);
  dag.addStream("outputStream", dimensions.output, outputOperator.data);
}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:26,代码来源:FaultTolerantTestApp.java

示例4: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
  public void populateDAG(DAG dag, Configuration conf)
  {
    TestStatsListener sl = new TestStatsListener();
    sl.adjustRate = conf.getBoolean("dt.ManagedStateBenchmark.adjustRate", false);

    G generator = createGenerator();
    dag.addOperator("Generator", generator);
    //generator.setRange(timeRange);
    dag.setAttribute(generator, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener)sl));

    O windowedOperator = createWindowedOperator(conf);
    dag.addOperator("windowedOperator", windowedOperator);
    dag.setAttribute(windowedOperator, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener)sl));
    //dag.addStream("Data", generator.data, windowedOperator.input).setLocality(Locality.CONTAINER_LOCAL);
    connectGeneratorToWindowedOperator(dag, generator, windowedOperator);

//    WatermarkGenerator watermarkGenerator = new WatermarkGenerator();
//    dag.addOperator("WatermarkGenerator", watermarkGenerator);
//    dag.addStream("Control", watermarkGenerator.control, windowedOperator.controlInput)
//      .setLocality(Locality.CONTAINER_LOCAL);

    DevNull output = dag.addOperator("output", new DevNull());
    dag.addStream("output", windowedOperator.output, output.data).setLocality(Locality.CONTAINER_LOCAL);
  }
 
开发者ID:apache,项目名称:apex-malhar,代码行数:26,代码来源:AbstractWindowedOperatorBenchmarkApp.java

示例5: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  TestStatsListener sl = new TestStatsListener();
  sl.adjustRate = conf.getBoolean("dt.hdsbench.adjustRate", false);
  TestGenerator gen = dag.addOperator("Generator", new TestGenerator());
  dag.setAttribute(gen, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener)sl));
  TestStoreOperator store = dag.addOperator("Store", new TestStoreOperator());
  dag.setAttribute(store, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener)sl));
  FileAccessFSImpl hfa = new HFileImpl();
  hfa.setBasePath(this.getClass().getSimpleName());
  store.setFileStore(hfa);
  dag.setInputPortAttribute(store.input, PortContext.PARTITION_PARALLEL, true);
  dag.getOperatorMeta("Store").getAttributes().put(Context.OperatorContext.COUNTERS_AGGREGATOR,
      new HDHTWriter.BucketIOStatAggregator());
  dag.addStream("Events", gen.data, store.input).setLocality(Locality.THREAD_LOCAL);
}
 
开发者ID:DataTorrent,项目名称:Megh,代码行数:18,代码来源:HDHTBenchmarkTest.java

示例6: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  AccumuloTestHelper.getConnector();
  AccumuloTestHelper.clearTable();
  dag.setAttribute(DAG.APPLICATION_NAME, "AccumuloOutputTest");
  AccumuloRowTupleGenerator rtg = dag.addOperator("tuplegenerator", AccumuloRowTupleGenerator.class);
  TestAccumuloOutputOperator taop = dag.addOperator("testaccumulooperator", TestAccumuloOutputOperator.class);
  dag.addStream("ss", rtg.outputPort, taop.input);
  com.datatorrent.api.Attribute.AttributeMap attributes = dag.getAttributes();
  taop.getStore().setTableName("tab1");
  taop.getStore().setZookeeperHost("127.0.0.1");
  taop.getStore().setInstanceName("instance");
  taop.getStore().setUserName("root");
  taop.getStore().setPassword("pass");

}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:18,代码来源:AccumuloApp.java

示例7: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration entries)
{
      /* Generate random key-value pairs */
  RandomDataGenerator randGen = dag.addOperator("randomgen", new RandomDataGenerator());

      /* Initialize with three partition to start with */
  UniqueCounter<KeyValPair<String, Object>> uniqCount =
      dag.addOperator("uniqevalue", new UniqueCounter<KeyValPair<String, Object>>());
  MapToKeyHashValuePairConverter<KeyValPair<String, Object>, Integer> converter = dag.addOperator("converter", new MapToKeyHashValuePairConverter());
  uniqCount.setCumulative(false);
  dag.setAttribute(randGen, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<UniqueCounter<KeyValPair<String, Object>>>(3));

  ConsoleOutputOperator output = dag.addOperator("output", new ConsoleOutputOperator());

  dag.addStream("datain", randGen.outPort, uniqCount.data);
  dag.addStream("convert", uniqCount.count, converter.input).setLocality(Locality.THREAD_LOCAL);
  dag.addStream("consoutput", converter.output, output.input);
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:20,代码来源:UniqueKeyValCountExample.java

示例8: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  TestGeneratorInputOperator input = dag.addOperator("Input", new TestGeneratorInputOperator());
  test = dag.addOperator("Test", new DynamicLoader());

  dag.addStream("S1", input.outport, test.input);
  dag.setAttribute(Context.DAGContext.LIBRARY_JARS, generatedJar);
  dag.setInputPortAttribute(test.input, Context.PortContext.TUPLE_CLASS, pojo);
}
 
开发者ID:apache,项目名称:apex-core,代码行数:11,代码来源:StramLocalClusterTest.java

示例9: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  HiveStore store = new HiveStore();
  store.setDatabaseUrl(conf.get("dt.application.HiveInsertBenchmarkingApp.operator.HiveOperator.store.dbUrl"));
  store.setConnectionProperties(conf.get(
      "dt.application.HiveInsertBenchmarkingApp.operator.HiveOperator.store.connectionProperties"));
  store.setFilepath(conf.get("dt.application.HiveInsertBenchmarkingApp.operator.HiveOperator.store.filepath"));

  try {
    hiveInitializeDatabase(store, conf.get(
        "dt.application.HiveInsertBenchmarkingApp.operator.HiveOperator.tablename"));
  } catch (SQLException ex) {
    LOG.debug(ex.getMessage());
  }

  dag.setAttribute(DAG.STREAMING_WINDOW_SIZE_MILLIS, 1000);
  RandomDateGenerator dateGenerator = dag.addOperator("DateGenerator", new RandomDateGenerator());
  FSRollingTestImpl rollingFsWriter = dag.addOperator("RollingFsWriter", new FSRollingTestImpl());
  rollingFsWriter.setFilePath(store.filepath);
  HiveOperator hiveInsert = dag.addOperator("HiveOperator", new HiveOperator());
  hiveInsert.setStore(store);
  ArrayList<String> hivePartitionColumns = new ArrayList<String>();
  hivePartitionColumns.add("dt");
  hiveInsert.setHivePartitionColumns(hivePartitionColumns);
  dag.addStream("Generator2HDFS", dateGenerator.outputString, rollingFsWriter.input);
  dag.addStream("FsWriter2Hive", rollingFsWriter.outputPort, hiveInsert.input);
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:29,代码来源:HiveInsertBenchmarkingApp.java

示例10: setupHasFailureTest

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
private void setupHasFailureTest(KafkaSinglePortInputOperator operator, DAG dag)
{
  operator.setHoldingBufferSize(5000);
  dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, 1);
  //dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new FSStorageAgent(
  //  APPLICATION_PATH + "failureck", new Configuration()));
  operator.setMaxTuplesPerWindow(tuplesPerWindow);
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:9,代码来源:KafkaInputOperatorTest.java

示例11: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration entries)
{
  dag.setAttribute(dag.APPLICATION_NAME, "UniqueValueCountDemo");
  dag.setAttribute(dag.DEBUG, true);


  /* Generate random key-value pairs */
  RandomEventGenerator randGen = dag.addOperator("randomgen", new RandomEventGenerator());
  randGen.setMaxvalue(999999);
  randGen.setTuplesBlastIntervalMillis(50);
  dag.setAttribute(randGen, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<RandomEventGenerator>(3));

  /* Initialize with three partition to start with */
  UniqueCounter<Integer> uniqCount = dag.addOperator("uniqevalue", new UniqueCounter<Integer>());
  MapToKeyHashValuePairConverter<Integer, Integer> converter =
      dag.addOperator("converter", new MapToKeyHashValuePairConverter());

  dag.setAttribute(uniqCount, Context.OperatorContext.PARTITIONER,
      new StatelessPartitioner<UniqueCounter<Integer>>(3));
  dag.setInputPortAttribute(uniqCount.data, Context.PortContext.PARTITION_PARALLEL, true);
  uniqCount.setCumulative(false);

  Counter counter = dag.addOperator("count", new Counter());
  ConsoleOutputOperator output = dag.addOperator("output", new ConsoleOutputOperator());

  dag.addStream("datain", randGen.integer_data, uniqCount.data);
  dag.addStream("convert", uniqCount.count, converter.input).setLocality(Locality.THREAD_LOCAL);
  dag.addStream("consoutput", converter.output, counter.input);
  dag.addStream("final", counter.output, output.input);
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:32,代码来源:UniqueValueCountBenchmarkApplication.java

示例12: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  dag.setAttribute(DAG.APPLICATION_NAME, "KafkaOutputBenchmark");
  BenchmarkPartitionableKafkaOutputOperator bpkoo = dag.addOperator(
      "KafkaBenchmarkProducer", BenchmarkPartitionableKafkaOutputOperator.class);
  bpkoo.setBrokerList(conf.get("kafka.brokerlist"));
  bpkoo.setPartitionCount(2);
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:10,代码来源:KafkaOutputBenchmark.java

示例13: setPartitioner

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
void setPartitioner(DAG dag,Configuration conf,TransformOperator transform)
{
  StatelessThroughputBasedPartitioner<TransformOperator> partitioner = new StatelessThroughputBasedPartitioner<>();
  partitioner.setCooldownMillis(conf.getLong(COOL_DOWN_MILLIS, 10000));
  partitioner.setMaximumEvents(conf.getLong(MAX_THROUGHPUT, 30000));
  partitioner.setMinimumEvents(conf.getLong(MIN_THROUGHPUT, 10000));
  dag.setAttribute(transform, Context.OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[]{partitioner}));
  dag.setAttribute(transform, Context.OperatorContext.PARTITIONER, partitioner);
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:11,代码来源:DynamicTransformApplication.java

示例14: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration conf)
{
  // Setup the operator to get the data from twitter sample stream injected into the system.
  TwitterSampleInput twitterFeed = new TwitterSampleInput();
  twitterFeed = dag.addOperator("TweetSampler", twitterFeed);

  //  Setup the operator to get the URLs extracted from the twitter statuses
  TwitterStatusURLExtractor urlExtractor = dag.addOperator("URLExtractor", TwitterStatusURLExtractor.class);

  // Setup a node to count the unique urls within a window.
  UniqueCounter<String> uniqueCounter = dag.addOperator("UniqueURLCounter", new UniqueCounter<String>());
  // Get the aggregated url counts and count them over last 5 mins.
  dag.setAttribute(uniqueCounter, Context.OperatorContext.APPLICATION_WINDOW_COUNT, 600);
  dag.setAttribute(uniqueCounter, Context.OperatorContext.SLIDE_BY_WINDOW_COUNT, 1);


  WindowedTopCounter<String> topCounts = dag.addOperator("TopCounter", new WindowedTopCounter<String>());
  topCounts.setTopCount(10);
  topCounts.setSlidingWindowWidth(1);
  topCounts.setDagWindowWidth(1);

  // Feed the statuses from feed into the input of the url extractor.
  dag.addStream("TweetStream", twitterFeed.status, urlExtractor.input).setLocality(Locality.CONTAINER_LOCAL);
  //  Start counting the urls coming out of URL extractor
  dag.addStream("TwittedURLs", urlExtractor.url, uniqueCounter.data).setLocality(locality);
  // Count unique urls
  dag.addStream("UniqueURLCounts", uniqueCounter.count, topCounts.input);

  consoleOutput(dag, "topURLs", topCounts.output, SNAPSHOT_SCHEMA, "url");
}
 
开发者ID:apache,项目名称:apex-malhar,代码行数:32,代码来源:TwitterTopCounterApplication.java

示例15: populateDAG

import com.datatorrent.api.DAG; //导入方法依赖的package包/类
@Override
public void populateDAG(DAG dag, Configuration configuration)
{
   // Create operators for each step
   // settings are applied by the platform using the config file.
  JsonGenerator eventGenerator = dag.addOperator("eventGenerator", new JsonGenerator());
  FilterTuples filterTuples = dag.addOperator("filterTuples", new FilterTuples());
  FilterFields filterFields = dag.addOperator("filterFields", new FilterFields());
  RedisJoin redisJoin = dag.addOperator("redisJoin", new RedisJoin());
  CampaignProcessor campaignProcessor = dag.addOperator("campaignProcessor", new CampaignProcessor());

  eventGenerator.setNumAdsPerCampaign(Integer.parseInt(configuration.get("numberOfAds")));
  eventGenerator.setNumCampaigns(Integer.parseInt(configuration.get("numberOfCampaigns")));
  setupRedis(eventGenerator.getCampaigns(), configuration.get("redis"));

  // Connect the Ports in the Operators
  dag.addStream("filterTuples", eventGenerator.out, filterTuples.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
  dag.addStream("filterFields", filterTuples.output, filterFields.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
  dag.addStream("redisJoin", filterFields.output, redisJoin.input).setLocality(DAG.Locality.CONTAINER_LOCAL);
  dag.addStream("output", redisJoin.output, campaignProcessor.input);

  dag.setInputPortAttribute(filterTuples.input, Context.PortContext.PARTITION_PARALLEL, true);
  dag.setInputPortAttribute(filterFields.input, Context.PortContext.PARTITION_PARALLEL, true);
  dag.setInputPortAttribute(redisJoin.input, Context.PortContext.PARTITION_PARALLEL, true);

  dag.setAttribute(eventGenerator, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<EventGenerator>(8));
  dag.setAttribute(campaignProcessor, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<CampaignProcessor>(8));
}
 
开发者ID:yahoo,项目名称:streaming-benchmarks,代码行数:29,代码来源:ApplicationWithGenerator.java


注:本文中的com.datatorrent.api.DAG.setAttribute方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。