本文整理汇总了Java中cascading.tuple.TupleEntryCollector类的典型用法代码示例。如果您正苦于以下问题:Java TupleEntryCollector类的具体用法?Java TupleEntryCollector怎么用?Java TupleEntryCollector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
TupleEntryCollector类属于cascading.tuple包,在下文中一共展示了TupleEntryCollector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: openTrapForWrite
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
@Override
public TupleEntryCollector openTrapForWrite(Tap trap) throws IOException {
if (trap instanceof Hfs) {
JobConf jobConf = new JobConf(this.getConfigCopy());
int stepNum = jobConf.getInt( "cascading.flow.step.num", 0 );
int nodeNum = jobConf.getInt( "cascading.flow.node.num", 0 );
String partname = String.format( "-%05d-%05d-%05d", stepNum, nodeNum, this.getCurrentSliceNum() );
jobConf.set( "cascading.tapcollector.partname", "%s%spart" + partname );
String value = String.format( "attempt_%012d_0000_m_%06d_0", (int) Math.rint( System.currentTimeMillis() ), this.getCurrentSliceNum() );
jobConf.set( "mapred.task.id", value );
jobConf.set( "mapreduce.task.id", value );
return trap.openForWrite( new FlinkFlowProcess( jobConf ), null);
}
else {
throw new UnsupportedOperationException("Only Hfs taps are supported as traps");
}
}
示例2: writeToHadoopPartitionTap
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
private void writeToHadoopPartitionTap(Tap<?, ?, ?> tap) throws IOException {
@SuppressWarnings("unchecked")
BasePartitionTap<JobConf, ?, ?> hadoopTap = (BasePartitionTap<JobConf, ?, ?>) tap;
JobConf conf = new JobConf();
// Avoids deletion of results when using a partition tap (close() will delete the _temporary before the copy has
// been done if not in a flow)
HadoopUtil.setIsInflow(conf);
HadoopFlowProcess flowProcess = new HadoopFlowProcess(conf);
hadoopTap.sinkConfInit(flowProcess, conf);
TupleEntryCollector collector = hadoopTap.openForWrite(flowProcess);
for (TupleEntry tuple : data.asTupleEntryList()) {
collector.add(tuple);
}
collector.close();
// We need to clean up the '_temporary' folder
BasePartitionTap<JobConf, ?, ?> partitionTap = hadoopTap;
@SuppressWarnings("unchecked")
String basePath = partitionTap.getParent().getFullIdentifier(flowProcess);
deleteTemporaryPath(new Path(basePath), FileSystem.get(conf));
}
示例3: writeToLocalTap
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
private void writeToLocalTap(Tap<?, ?, ?> tap) throws IOException {
@SuppressWarnings("unchecked")
Tap<Properties, ?, ?> localTap = (Tap<Properties, ?, ?>) tap;
Properties conf = new Properties();
LocalFlowProcess flowProcess = new LocalFlowProcess(conf);
flowProcess.setStepStats(new LocalStepStats(new NullFlowStep(), NullClientState.INSTANCE));
localTap.sinkConfInit(flowProcess, conf);
TupleEntryCollector collector = localTap.openForWrite(flowProcess);
for (TupleEntry tuple : data.asTupleEntryList()) {
collector.add(tuple);
}
collector.close();
localTap.commitResource(conf);
}
示例4: getCollector
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
private TupleEntryCollector getCollector(String path) {
TupleEntryCollector collector = collectors.get(path);
if (collector != null)
return collector;
try {
collector = createTupleEntrySchemeCollector(flowProcess,
parent, path);
flowProcess.increment(Counters.Paths_Opened, 1);
} catch (IOException exception) {
throw new TapException("unable to open template path: " + path,
exception);
}
if (collectors.size() > openTapsThreshold)
purgeCollectors();
collectors.put(path, collector);
return collector;
}
示例5: exerciseScheme
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
@Test
public void exerciseScheme() throws IOException {
TupleEntryCollector collector = tap.openForWrite(flowProcess);
for (Tuple tuple : tuples) {
collector.add(tuple);
}
collector.close();
}
示例6: openForWrite
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
public TupleEntryCollector openForWrite(FlowProcess<Config> flowProcess, Object output) throws IOException {
return new RedisSchemeCollector(flowProcess,
(RedisBaseScheme) getScheme(),
hostname,
port,
db);
}
示例7: writeToHadoopTap
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
private void writeToHadoopTap(Tap<?, ?, ?> tap) throws IOException {
@SuppressWarnings("unchecked")
Tap<JobConf, ?, ?> hadoopTap = (Tap<JobConf, ?, ?>) tap;
JobConf conf = new JobConf();
HadoopFlowProcess flowProcess = new HadoopFlowProcess(conf);
hadoopTap.sinkConfInit(flowProcess, conf);
TupleEntryCollector collector = hadoopTap.openForWrite(flowProcess);
for (TupleEntry tuple : data.asTupleEntryList()) {
collector.add(tuple);
}
collector.close();
}
示例8: asTupleEntryList
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
@Test
public void asTupleEntryList() throws IOException {
Bucket sink = new Bucket(FIELDS, pipe, flow);
TupleEntryCollector collector = sink.openForWrite(null, null);
collector.add(TUPLE_1);
collector.add(TUPLE_2);
List<TupleEntry> tupleEntryList = sink.result().asTupleEntryList();
assertThat(tupleEntryList.size(), is(2));
assertThat(tupleEntryList.get(0).getFields(), is(FIELDS));
assertThat(tupleEntryList.get(0).getTuple(), is(TUPLE_1));
assertThat(tupleEntryList.get(1).getFields(), is(FIELDS));
assertThat(tupleEntryList.get(1).getTuple(), is(TUPLE_2));
}
示例9: asTupleList
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
@Test
public void asTupleList() throws IOException {
Bucket sink = new Bucket(FIELDS, pipe, flow);
TupleEntryCollector collector = sink.openForWrite(null, null);
collector.add(TUPLE_1);
collector.add(TUPLE_2);
List<Tuple> tupleList = sink.result().asTupleList();
assertThat(tupleList.size(), is(2));
assertThat(tupleList.get(0), is(TUPLE_1));
assertThat(tupleList.get(1), is(TUPLE_2));
}
示例10: close
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
@Override
public void close() {
super.close();
try {
for (TupleEntryCollector collector : collectors.values())
closeCollector(collector);
} finally {
collectors.clear();
}
}
示例11: closeCollector
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
private void closeCollector(TupleEntryCollector collector) {
if (collector == null)
return;
try {
collector.close();
flowProcess.increment(Counters.Paths_Closed, 1);
} catch (Exception exception) {
// do nothing
}
}
示例12: openForWrite
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
@Override
public TupleEntryCollector openForWrite( FlowProcess<JobConf> flowProcess, OutputCollector output ) throws IOException {
if( !isSink() )
throw new TapException( "this tap may not be used as a sink, no TableDesc defined" );
LOG.info("Creating JDBCTapCollector output instance");
JDBCTapCollector jdbcCollector = new JDBCTapCollector( flowProcess, this );
jdbcCollector.prepare();
return jdbcCollector;
}
示例13: openForWrite
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
@Override
public TupleEntryCollector openForWrite(FlowProcess<JobConf> jobConfFlowProcess, OutputCollector outputCollector)
throws IOException {
HBaseTapCollector hBaseCollector = new HBaseTapCollector(jobConfFlowProcess, this);
hBaseCollector.prepare();
return hBaseCollector;
}
示例14: openForWrite
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
@Override
public TupleEntryCollector openForWrite(FlowProcess<JobConf> flowProcess, OutputCollector output) throws IOException {
return new HadoopTupleEntrySchemeCollector(flowProcess, this, output);
}
示例15: openForWrite
import cascading.tuple.TupleEntryCollector; //导入依赖的package包/类
@Override
public TupleEntryCollector openForWrite(FlowProcess<Object> flowProcess, Object output) throws IOException {
initInnerTapIfNotSetFromFlowProcess(flowProcess);
return actualTap.openForWrite(flowProcess, output);
}