本文整理汇总了Java中org.apache.storm.tuple.Tuple类的典型用法代码示例。如果您正苦于以下问题:Java Tuple类的具体用法?Java Tuple怎么用?Java Tuple使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Tuple类属于org.apache.storm.tuple包,在下文中一共展示了Tuple类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: execute
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple input) {
if (TupleUtils.isTick(input)) {
collector.ack(input);
return;
}
try {
Command cmd = (Command) input.getValueByField(EmitFields.COMMAND);
BoltCommandHandler handler = handlerManager.getHandler(cmd);
handler.handle(input);
this.collector.ack(input);
} catch (Exception e) {
this.collector.fail(input);
this.collector.reportError(e);
logger.error("Process Error!", e);
}
}
示例2: execute
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
public void execute(Tuple tuple) {
final String word = tuple.getStringByField(Constraints.wordFileds);
Integer count = tuple.getIntegerByField(Constraints.coinCountFileds);
predictorHotKeyUtil.PredictorHotKey(word,count);
if(predictorHotKeyUtil.isHotKey(word))
collector.emit(new Values(word,1));
predictorHotKeyUtil.SynopsisHashMapRandomDump(new DumpRemoveHandler() {
@Override
public void dumpRemove(String key) {
collector.emit(new Values(word,1));
}
});
collector.ack(tuple);
}
示例3: getResultofMax
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
/**
* 聚合函数 max 计算结果
*@param isJoin 是否含有join
* @param tupleList 属于某一个组的tuple list
* @param parameter max(tab1.A) 参数值,目前仅支持一个参数
* @return 该组tuple max 函数的执行结果
*/
private int getResultofMax(boolean isJoin, List<Tuple> tupleList, TCItem parameter) {
int maxRes = Integer.MIN_VALUE;
for (Tuple tuple : tupleList) {
String tempKey = parameter.getTableName() + "." + parameter.getColName();
if (!isJoin) {
tempKey = parameter.getColName();
}
String tValStr = tuple.getStringByField(tempKey);
if (!tValStr.isEmpty()) {
int tVal = Integer.valueOf(tValStr);
maxRes = Math.max(maxRes, tVal);
}
}
return maxRes;
}
示例4: execute
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
Object obj = tuple.getValue(0);
long count = tuple.getLong(1);
int source = tuple.getSourceTask();
Map<Integer, Long> subCounts = counts.get(obj);
if (subCounts == null) {
subCounts = new HashMap<Integer, Long>();
counts.put(obj, subCounts);
}
//Update the current count for this object
subCounts.put(source, count);
//Output the sum of all the known counts so for this key
long sum = 0;
for (Long val: subCounts.values()) {
sum += val;
}
collector.emit(new Values(obj, sum));
}
示例5: invalidJsonForDiscoveryFilter
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Test
public void invalidJsonForDiscoveryFilter() throws CmdLineException, ConfigurationException {
OFEventWFMTopology manager = new OFEventWFMTopology(makeLaunchEnvironment());
TopologyConfig config = manager.getConfig();
OFELinkBolt bolt = new OFELinkBolt(config);
TopologyContext context = Mockito.mock(TopologyContext.class);
Mockito.when(context.getComponentId(TASK_ID_BOLT))
.thenReturn(COMPONENT_ID_SOURCE);
Mockito.when(context.getComponentOutputFields(COMPONENT_ID_SOURCE, STREAM_ID_INPUT))
.thenReturn(KafkaMessage.FORMAT);
OutputCollectorMock outputDelegate = Mockito.spy(new OutputCollectorMock());
OutputCollector output = new OutputCollector(outputDelegate);
bolt.prepare(stormConfig(), context, output);
bolt.initState(new InMemoryKeyValueState<>());
Tuple tuple = new TupleImpl(context, new Values("{\"corrupted-json"), TASK_ID_BOLT, STREAM_ID_INPUT);
bolt.doWork(tuple);
Mockito.verify(outputDelegate).ack(tuple);
}
示例6: ack
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
/**
* Do not ack the tuple straight away! wait to get the confirmation that it
* worked
**/
public void ack(Tuple t, String url) {
synchronized (waitAck) {
String sha256hex = org.apache.commons.codec.digest.DigestUtils
.sha256Hex(url);
List<Tuple> tt = waitAck.getIfPresent(sha256hex);
if (tt == null) {
// check that there has been no removal of the entry since
Metadata metadata = (Metadata) t.getValueByField("metadata");
if (metadata.getFirstValue("es.status.skipped.sending") != null) {
LOG.debug(
"Indexing skipped for {} with ID {} but key removed since",
url, sha256hex);
// ack straight away!
super.ack(t, url);
return;
}
tt = new LinkedList<>();
}
tt.add(t);
waitAck.put(sha256hex, tt);
LOG.debug("Added to waitAck {} with ID {} total {}", url,
sha256hex, tt.size());
}
}
示例7: execute
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
public void execute(Tuple input, BasicOutputCollector collector) {
Map<String, Map<String, Object>> record = new HashMap<String, Map<String, Object>>();
Map<String, Object> personalMap = new HashMap<String, Object>();
// "firstName","lastName","companyName")
personalMap.put("firstName", input.getValueByField("firstName"));
personalMap.put("lastName", input.getValueByField("lastName"));
Map<String, Object> companyMap = new HashMap<String, Object>();
companyMap.put("companyName", input.getValueByField("companyName"));
record.put("personal", personalMap);
record.put("company", companyMap);
// call the inset method of HBaseOperations class to insert record into
// HBase
hbaseOperations.insert(record, UUID.randomUUID().toString());
}
示例8: process
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
@Override
protected void process(Tuple tuple) {
try {
List<Column> columns = jdbcMapper.getColumns(tuple);
List<List<Column>> columnLists = new ArrayList<List<Column>>();
columnLists.add(columns);
if(!StringUtils.isBlank(tableName)) {
this.jdbcClient.insert(this.tableName, columnLists);
} else {
this.jdbcClient.executeInsertQuery(this.insertQuery, columnLists);
}
this.collector.ack(tuple);
} catch (Exception e) {
this.collector.reportError(e);
this.collector.fail(tuple);
}
}
示例9: execute
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
String url = tuple.getStringByField("url");
byte[] content = tuple.getBinaryByField("content");
Metadata metadata = (Metadata) tuple.getValueByField("metadata");
String text = tuple.getStringByField("text");
Values v = new Values(url, content, metadata, text);
// if there is a text - no need to parse it again
if (StringUtils.isNotBlank(text)) {
collector.emit(tuple, v);
} else {
collector.emit("tika", tuple, v);
}
collector.ack(tuple);
}
示例10: execute
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple input, BasicOutputCollector collector) {
if (input.contains(FIELD_SWITCH_ID)) {
String switchId = (String)input.getValueByField(FIELD_SWITCH_ID);
logger.debug("switchId=" + input.getValueByField(FIELD_SWITCH_ID));
if (isSwitchConfirmed(switchId)) {
collector.emit(new Values(switchId));
} else {
logger.warn("could not confirm switch with id " + switchId);
// TODO - any action here?
}
} else {
logger.error(FIELD_SWITCH_ID + " not found in tuple " + input);
}
}
示例11: handle
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void handle(Tuple tuple) {
EmitData data = (EmitData) tuple.getValueByField(Constants.EmitFields.DATA);
List<PairWrapper<String, Object>> wrapperList = data.get(EmitData.MESSAGE);
if (wrapperList != null && !wrapperList.isEmpty()) {
for (PairWrapper<String, Object> wrapper : wrapperList) {
HeartbeatPulse pulse = HeartbeatPulse.build(wrapper.pairs2map());
if (logger.isDebugEnabled()) {
Object offset = data.get(EmitData.OFFSET);
HeartBeatPacket packet = HeartBeatPacket.parse(pulse.getPacket());
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
String groupId = tuple.getStringByField(Constants.EmitFields.GROUP_FIELD);
logger.debug("[heartbeat] {} offset:{} ts:{}, time:{}", groupId, offset == null ? -1 : offset, packet.getTxtime(), format.format(new Date(packet.getTxtime())));
}
reporter.mark(pulse);
}
}
handler.handle(tuple);
this.listener.getOutputCollector().ack(tuple);
}
示例12: execute
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
boltstatus++;
String word = tuple.getStringByField(Constraints.wordFileds);
if (!word.isEmpty()) {
Long count = counts.get(word);
if (count == null) {
count = 0L;
}
count++;
counts.put(word, count);
outputCollector.emit(tuple,new Values(word,count));
}
outputCollector.ack(tuple);
}
示例13: handleRestoreRequest
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
private void handleRestoreRequest(CommandMessage message, Tuple tuple) throws IOException {
ImmutablePair<Flow, Flow> requestedFlow = ((FlowRestoreRequest) message.getData()).getPayload();
ImmutablePair<PathInfoData, PathInfoData> path = pathComputer.getPath(requestedFlow.getLeft());
logger.info("Restored flow path: {}", path);
if (!flowCache.isOneSwitchFlow(requestedFlow) && pathComputer.isEmpty(path)) {
throw new MessageException(message.getCorrelationId(), System.currentTimeMillis(),
ErrorType.CREATION_FAILURE, "Could not restore flow", "Path was not found");
}
ImmutablePair<Flow, Flow> flow;
if (flowCache.cacheContainsFlow(requestedFlow.getLeft().getFlowId())) {
flow = flowCache.updateFlow(requestedFlow, path);
} else {
flow = flowCache.createFlow(requestedFlow, path);
}
logger.info("Restored flow: {}", flow);
Values topology = new Values(Utils.MAPPER.writeValueAsString(
new FlowInfoData(requestedFlow.getLeft().getFlowId(), flow,
FlowOperation.UPDATE, message.getCorrelationId())));
outputCollector.emit(StreamType.UPDATE.toString(), tuple, topology);
}
示例14: execute
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
List<Object> id = tuple.select(_idFields);
GlobalStreamId streamId = new GlobalStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId());
if (!_pending.containsKey(id)) {
_pending.put(id, new HashMap<GlobalStreamId, Tuple>());
}
Map<GlobalStreamId, Tuple> parts = _pending.get(id);
if (parts.containsKey(streamId))
throw new RuntimeException("Received same side of single join twice");
parts.put(streamId, tuple);
if (parts.size() == _numSources) {
_pending.remove(id);
List<Object> joinResult = new ArrayList<Object>();
for (String outField : _outFields) {
GlobalStreamId loc = _fieldLocations.get(outField);
joinResult.add(parts.get(loc).getValueByField(outField));
}
_collector.emit(new ArrayList<Tuple>(parts.values()), joinResult);
for (Tuple part : parts.values()) {
_collector.ack(part);
}
}
}
示例15: execute
import org.apache.storm.tuple.Tuple; //导入依赖的package包/类
@Override
public void execute(Tuple tuple) {
final String data = tuple.getString(0);
LOGGER.debug("Processing datapoint", data);
try {
Datapoint datapoint = MAPPER.readValue(data, Datapoint.class);
if (isUpdateRequired(datapoint)) {
addDatapoint(datapoint);
List<Object> stream = Stream.of(datapoint.getMetric(), datapoint.getTime(), datapoint.getValue(),
datapoint.getTags())
.collect(Collectors.toList());
LOGGER.debug("emit: " + stream);
collector.emit(stream);
}
} catch (IOException e) {
LOGGER.error("Failed read datapoint", e);
} finally {
collector.ack(tuple);
}
}