当前位置: 首页>>代码示例>>Java>>正文


Java Tuple类代码示例

本文整理汇总了Java中org.apache.storm.tuple.Tuple的典型用法代码示例。如果您正苦于以下问题:Java Tuple类的具体用法?Java Tuple怎么用?Java Tuple使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Tuple类属于org.apache.storm.tuple包,在下文中一共展示了Tuple类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: execute

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple input) {
    if (TupleUtils.isTick(input)) {
        collector.ack(input);
        return;
    }
    try {
        Command cmd = (Command) input.getValueByField(EmitFields.COMMAND);
        BoltCommandHandler handler = handlerManager.getHandler(cmd);
        handler.handle(input);
        this.collector.ack(input);
    } catch (Exception e) {
        this.collector.fail(input);
        this.collector.reportError(e);
        logger.error("Process Error!", e);
    }

}
 
开发者ID:BriData,项目名称:DBus,代码行数:19,代码来源:DispatcherBolt.java

示例2: execute

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
public void execute(Tuple tuple) {
    final String word = tuple.getStringByField(Constraints.wordFileds);
    Integer count = tuple.getIntegerByField(Constraints.coinCountFileds);

    predictorHotKeyUtil.PredictorHotKey(word,count);

    if(predictorHotKeyUtil.isHotKey(word))
        collector.emit(new Values(word,1));

    predictorHotKeyUtil.SynopsisHashMapRandomDump(new DumpRemoveHandler() {
        @Override
        public void dumpRemove(String key) {
            collector.emit(new Values(word,1));
        }
    });

    collector.ack(tuple);
}
 
开发者ID:DStream-Storm,项目名称:DStream,代码行数:19,代码来源:PredictorBolt.java

示例3: getResultofMax

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
/**
 * 聚合函数 max 计算结果
 *@param isJoin 是否含有join
 * @param tupleList 属于某一个组的tuple list
 * @param parameter max(tab1.A) 参数值,目前仅支持一个参数
 * @return 该组tuple max 函数的执行结果
 */
private int getResultofMax(boolean isJoin, List<Tuple> tupleList, TCItem parameter) {
    int maxRes = Integer.MIN_VALUE;
    for (Tuple tuple : tupleList) {
        String tempKey = parameter.getTableName() + "." + parameter.getColName();
        if (!isJoin) {
            tempKey = parameter.getColName();
        }
        String tValStr = tuple.getStringByField(tempKey);
        if (!tValStr.isEmpty()) {
            int tVal = Integer.valueOf(tValStr);
            maxRes = Math.max(maxRes, tVal);
        }
    }
    return maxRes;
}
 
开发者ID:bigdataFlySQL,项目名称:SQLonStorm,代码行数:23,代码来源:GroupByBolt.java

示例4: execute

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
  Object obj = tuple.getValue(0);
  long count = tuple.getLong(1);
  int source = tuple.getSourceTask();
  Map<Integer, Long> subCounts = counts.get(obj);
  if (subCounts == null) {
    subCounts = new HashMap<Integer, Long>();
    counts.put(obj, subCounts);
  }
  //Update the current count for this object
  subCounts.put(source, count);
  //Output the sum of all the known counts so for this key
  long sum = 0;
  for (Long val: subCounts.values()) {
    sum += val;
  }
  collector.emit(new Values(obj, sum));
}
 
开发者ID:Paleozoic,项目名称:storm_spring_boot_demo,代码行数:20,代码来源:RollingCountAggBolt.java

示例5: invalidJsonForDiscoveryFilter

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Test
public void invalidJsonForDiscoveryFilter() throws CmdLineException, ConfigurationException {
    OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
    TopologyConfig config = manager.getConfig();
    OFELinkBolt bolt = new OFELinkBolt(config);

    TopologyContext context = Mockito.mock(TopologyContext.class);

    Mockito.when(context.getComponentId(TASK_ID_BOLT))
            .thenReturn(COMPONENT_ID_SOURCE);
    Mockito.when(context.getComponentOutputFields(COMPONENT_ID_SOURCE, STREAM_ID_INPUT))
            .thenReturn(KafkaMessage.FORMAT);

    OutputCollectorMock outputDelegate = Mockito.spy(new OutputCollectorMock());
    OutputCollector output = new OutputCollector(outputDelegate);

    bolt.prepare(stormConfig(), context, output);
    bolt.initState(new InMemoryKeyValueState<>());

    Tuple tuple = new TupleImpl(context, new Values("{\"corrupted-json"), TASK_ID_BOLT, STREAM_ID_INPUT);
    bolt.doWork(tuple);

    Mockito.verify(outputDelegate).ack(tuple);
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:25,代码来源:OFELinkBoltTest.java

示例6: ack

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
/**
 * Do not ack the tuple straight away! wait to get the confirmation that it
 * worked
 **/
public void ack(Tuple t, String url) {
    synchronized (waitAck) {
        String sha256hex = org.apache.commons.codec.digest.DigestUtils
                .sha256Hex(url);
        List<Tuple> tt = waitAck.getIfPresent(sha256hex);
        if (tt == null) {
            // check that there has been no removal of the entry since
            Metadata metadata = (Metadata) t.getValueByField("metadata");
            if (metadata.getFirstValue("es.status.skipped.sending") != null) {
                LOG.debug(
                        "Indexing skipped for {} with ID {} but key removed since",
                        url, sha256hex);
                // ack straight away!
                super.ack(t, url);
                return;
            }
            tt = new LinkedList<>();
        }
        tt.add(t);
        waitAck.put(sha256hex, tt);
        LOG.debug("Added to waitAck {} with ID {} total {}", url,
                sha256hex, tt.size());
    }
}
 
开发者ID:eorliac,项目名称:patent-crawler,代码行数:29,代码来源:StatusUpdaterBolt.java

示例7: execute

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
public void execute(Tuple input, BasicOutputCollector collector) {
	Map<String, Map<String, Object>> record = new HashMap<String, Map<String, Object>>();
	Map<String, Object> personalMap = new HashMap<String, Object>();
	// "firstName","lastName","companyName")
	personalMap.put("firstName", input.getValueByField("firstName"));
	personalMap.put("lastName", input.getValueByField("lastName"));

	Map<String, Object> companyMap = new HashMap<String, Object>();
	companyMap.put("companyName", input.getValueByField("companyName"));

	record.put("personal", personalMap);
	record.put("company", companyMap);
	// call the inset method of HBaseOperations class to insert record into
	// HBase
	hbaseOperations.insert(record, UUID.randomUUID().toString());
}
 
开发者ID:PacktPublishing,项目名称:Mastering-Apache-Storm,代码行数:17,代码来源:StormHBaseBolt.java

示例8: process

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
@Override
   protected void process(Tuple tuple) {
       try {
           List<Column> columns = jdbcMapper.getColumns(tuple);
           List<List<Column>> columnLists = new ArrayList<List<Column>>();
           columnLists.add(columns);
           if(!StringUtils.isBlank(tableName)) {
               this.jdbcClient.insert(this.tableName, columnLists);
           } else {
               this.jdbcClient.executeInsertQuery(this.insertQuery, columnLists);
           }
           this.collector.ack(tuple);
       } catch (Exception e) {
           this.collector.reportError(e);
           this.collector.fail(tuple);
       }
   }
 
开发者ID:neur0nus,项目名称:storm-clickhouse,代码行数:19,代码来源:ClickhouseInsertBolt.java

示例9: execute

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
    String url = tuple.getStringByField("url");
    byte[] content = tuple.getBinaryByField("content");
    Metadata metadata = (Metadata) tuple.getValueByField("metadata");
    String text = tuple.getStringByField("text");

    Values v = new Values(url, content, metadata, text);

    // if there is a text - no need to parse it again
    if (StringUtils.isNotBlank(text)) {
        collector.emit(tuple, v);
    } else {
        collector.emit("tika", tuple, v);
    }

    collector.ack(tuple);
}
 
开发者ID:eorliac,项目名称:patent-crawler,代码行数:19,代码来源:RedirectionBolt.java

示例10: execute

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple input, BasicOutputCollector collector) {
	if (input.contains(FIELD_SWITCH_ID)) {
		String switchId = (String)input.getValueByField(FIELD_SWITCH_ID);
	    logger.debug("switchId=" + input.getValueByField(FIELD_SWITCH_ID));
	    
	    if (isSwitchConfirmed(switchId)) {
	    	collector.emit(new Values(switchId));
	    } else {
	    	logger.warn("could not confirm switch with id " + switchId);
	    	// TODO - any action here?
	    }
    } else {
    	logger.error(FIELD_SWITCH_ID + " not found in tuple " + input);
    }
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:17,代码来源:ConfirmationBolt.java

示例11: handle

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void handle(Tuple tuple) {
    EmitData data = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
    List<PairWrapper<String, Object>> wrapperList = data.get(EmitData.MESSAGE);
    if (wrapperList != null && !wrapperList.isEmpty()) {
        for (PairWrapper<String, Object> wrapper : wrapperList) {
            HeartbeatPulse pulse = HeartbeatPulse.build(wrapper.pairs2map());
            if (logger.isDebugEnabled()) {
                Object offset = data.get(EmitData.OFFSET);
                HeartBeatPacket packet = HeartBeatPacket.parse(pulse.getPacket());
                SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
                String groupId = tuple.getStringByField(Constants.EmitFields.GROUP_FIELD);
                logger.debug("[heartbeat] {} offset:{} ts:{}, time:{}", groupId, offset == null ? -1 : offset, packet.getTxtime(), format.format(new Date(packet.getTxtime())));
            }
            reporter.mark(pulse);
        }
    }

    handler.handle(tuple);
    this.listener.getOutputCollector().ack(tuple);
}
 
开发者ID:BriData,项目名称:DBus,代码行数:22,代码来源:HeartbeatHandler.java

示例12: execute

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
    boltstatus++;

    String word = tuple.getStringByField(Constraints.wordFileds);
    if (!word.isEmpty()) {
        Long count = counts.get(word);
        if (count == null) {
            count = 0L;
        }
        count++;
        counts.put(word, count);
        outputCollector.emit(tuple,new Values(word,count));
    }
    outputCollector.ack(tuple);
}
 
开发者ID:DStream-Storm,项目名称:DStream,代码行数:17,代码来源:WordCounterBolt.java

示例13: handleRestoreRequest

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
private void handleRestoreRequest(CommandMessage message, Tuple tuple) throws IOException {
    ImmutablePair<Flow, Flow> requestedFlow = ((FlowRestoreRequest) message.getData()).getPayload();

    ImmutablePair<PathInfoData, PathInfoData> path = pathComputer.getPath(requestedFlow.getLeft());
    logger.info("Restored flow path: {}", path);

    if (!flowCache.isOneSwitchFlow(requestedFlow) && pathComputer.isEmpty(path)) {
        throw new MessageException(message.getCorrelationId(), System.currentTimeMillis(),
                ErrorType.CREATION_FAILURE, "Could not restore flow", "Path was not found");
    }

    ImmutablePair<Flow, Flow> flow;
    if (flowCache.cacheContainsFlow(requestedFlow.getLeft().getFlowId())) {
        flow = flowCache.updateFlow(requestedFlow, path);
    } else {
        flow = flowCache.createFlow(requestedFlow, path);
    }
    logger.info("Restored flow: {}", flow);

    Values topology = new Values(Utils.MAPPER.writeValueAsString(
            new FlowInfoData(requestedFlow.getLeft().getFlowId(), flow,
                    FlowOperation.UPDATE, message.getCorrelationId())));
    outputCollector.emit(StreamType.UPDATE.toString(), tuple, topology);
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:25,代码来源:CrudBolt.java

示例14: execute

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
  List<Object> id = tuple.select(_idFields);
  GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId());
  if (!_pending.containsKey(id)) {
    _pending.put(id, new HashMap<GlobalStreamId, Tuple>());
  }
  Map<GlobalStreamId, Tuple> parts = _pending.get(id);
  if (parts.containsKey(streamId))
    throw new RuntimeException("Received same side of single join twice");
  parts.put(streamId, tuple);
  if (parts.size() == _numSources) {
    _pending.remove(id);
    List<Object> joinResult = new ArrayList<Object>();
    for (String outField : _outFields) {
      GlobalStreamId loc = _fieldLocations.get(outField);
      joinResult.add(parts.get(loc).getValueByField(outField));
    }
    _collector.emit(new ArrayList<Tuple>(parts.values()), joinResult);

    for (Tuple part : parts.values()) {
      _collector.ack(part);
    }
  }
}
 
开发者ID:bigdataFlySQL,项目名称:SQLonStorm,代码行数:26,代码来源:SingleJoinBolt.java

示例15: execute

import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
    final String data = tuple.getString(0);
    LOGGER.debug("Processing datapoint", data);
    try {
        Datapoint datapoint = MAPPER.readValue(data, Datapoint.class);
        if (isUpdateRequired(datapoint)) {
            addDatapoint(datapoint);

            List<Object> stream = Stream.of(datapoint.getMetric(), datapoint.getTime(), datapoint.getValue(),
                    datapoint.getTags())
                    .collect(Collectors.toList());

            LOGGER.debug("emit: " + stream);
            collector.emit(stream);
        }
    } catch (IOException e) {
        LOGGER.error("Failed read datapoint", e);
    } finally {
        collector.ack(tuple);
    }
}
 
开发者ID:telstra,项目名称:open-kilda,代码行数:23,代码来源:OpenTSDBFilterBolt.java


注:本文中的org.apache.storm.tuple.Tuple类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。