当前位置: 首页>>代码示例>>Java>>正文


Java Pipeline.read方法代码示例

本文整理汇总了Java中org.apache.crunch.Pipeline.read方法的典型用法代码示例。如果您正苦于以下问题:Java Pipeline.read方法的具体用法?Java Pipeline.read怎么用?Java Pipeline.read使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.crunch.Pipeline的用法示例。


在下文中一共展示了Pipeline.read方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testGeneric

import org.apache.crunch.Pipeline; //导入方法依赖的package包/类
@Test
public void testGeneric() throws IOException {
  Dataset<Record> inputDataset = repo.create("in", new DatasetDescriptor.Builder()
      .schema(USER_SCHEMA).build());
  Dataset<Record> outputDataset = repo.create("out", new DatasetDescriptor.Builder()
      .schema(USER_SCHEMA).build());

  // write two files, each of 5 records
  writeTestUsers(inputDataset, 5, 0);
  writeTestUsers(inputDataset, 5, 5);

  Pipeline pipeline = new MRPipeline(TestCrunchDatasets.class);
  PCollection<GenericData.Record> data = pipeline.read(
      CrunchDatasets.asSource(inputDataset, GenericData.Record.class));
  pipeline.write(data, CrunchDatasets.asTarget(outputDataset), Target.WriteMode.APPEND);
  pipeline.run();

  checkTestUsers(outputDataset, 10);
}
 
开发者ID:cloudera,项目名称:cdk,代码行数:20,代码来源:TestCrunchDatasets.java

示例2: testGenericParquet

import org.apache.crunch.Pipeline; //导入方法依赖的package包/类
@Test
public void testGenericParquet() throws IOException {
  Dataset<Record> inputDataset = repo.create("in", new DatasetDescriptor.Builder()
      .schema(USER_SCHEMA).format(Formats.PARQUET).build());
  Dataset<Record> outputDataset = repo.create("out", new DatasetDescriptor.Builder()
      .schema(USER_SCHEMA).format(Formats.PARQUET).build());

  // write two files, each of 5 records
  writeTestUsers(inputDataset, 5, 0);
  writeTestUsers(inputDataset, 5, 5);

  Pipeline pipeline = new MRPipeline(TestCrunchDatasets.class);
  PCollection<GenericData.Record> data = pipeline.read(
      CrunchDatasets.asSource(inputDataset, GenericData.Record.class));
  pipeline.write(data, CrunchDatasets.asTarget(outputDataset), Target.WriteMode.APPEND);
  pipeline.run();

  checkTestUsers(outputDataset, 10);
}
 
开发者ID:cloudera,项目名称:cdk,代码行数:20,代码来源:TestCrunchDatasets.java

示例3: testPartitionedSourceAndTarget

import org.apache.crunch.Pipeline; //导入方法依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void testPartitionedSourceAndTarget() throws IOException {
  PartitionStrategy partitionStrategy = new PartitionStrategy.Builder().hash(
      "username", 2).build();

  Dataset<Record> inputDataset = repo.create("in", new DatasetDescriptor.Builder()
      .schema(USER_SCHEMA).partitionStrategy(partitionStrategy).build());
  Dataset<Record> outputDataset = repo.create("out", new DatasetDescriptor.Builder()
      .schema(USER_SCHEMA).partitionStrategy(partitionStrategy).build());

  writeTestUsers(inputDataset, 10);

  PartitionKey key = partitionStrategy.partitionKey(0);
  Dataset<Record> inputPart0 = inputDataset.getPartition(key, false);
  Dataset<Record> outputPart0 = outputDataset.getPartition(key, true);

  Pipeline pipeline = new MRPipeline(TestCrunchDatasets.class);
  PCollection<GenericData.Record> data = pipeline.read(
      CrunchDatasets.asSource(inputPart0, GenericData.Record.class));
  pipeline.write(data, CrunchDatasets.asTarget(outputPart0), Target.WriteMode.APPEND);
  pipeline.run();

  Assert.assertEquals(5, datasetSize(outputPart0));
}
 
开发者ID:cloudera,项目名称:cdk,代码行数:26,代码来源:TestCrunchDatasets.java

示例4: readVariants

import org.apache.crunch.Pipeline; //导入方法依赖的package包/类
private static PCollection<Variant> readVariants(String inputFormat, Path inputPath,
    Configuration conf, Pipeline pipeline, String sampleGroup) throws IOException {
  PCollection<Variant> variants;
  if (inputFormat.equals("VCF")) {
    VCFToGA4GHVariantFn.configureHeaders(
        conf, FileUtils.findVcfs(inputPath, conf), sampleGroup);
    TableSource<LongWritable, VariantContextWritable> vcfSource =
        From.formattedFile(
            inputPath, VCFInputFormat.class, LongWritable.class, VariantContextWritable.class);
    PCollection<VariantContextWritable> vcfRecords = pipeline.read(vcfSource).values();
    variants = vcfRecords.parallelDo(
        "VCF to GA4GH Variant", new VCFToGA4GHVariantFn(), Avros.specifics(Variant.class));
  } else if (inputFormat.equals("AVRO")) {
    variants = pipeline.read(From.avroFile(inputPath, Avros.specifics(Variant.class)));
  } else if (inputFormat.equals("PARQUET")) {
    @SuppressWarnings("unchecked")
    Source<Variant> source =
        new AvroParquetFileSource(inputPath, Avros.specifics(Variant.class));
    variants = pipeline.read(source);
  } else {
    throw new IllegalStateException("Unrecognized input format: " + inputFormat);
  }
  return variants;
}
 
开发者ID:cloudera,项目名称:quince,代码行数:25,代码来源:GA4GHVariantsLoader.java

示例5: inputPairs

import org.apache.crunch.Pipeline; //导入方法依赖的package包/类
protected final <V extends RealVector> PCollection<Pair<Integer, V>> inputPairs(
    Pipeline p,
    String inputKey,
    PType<V> ptype) {
  PType<Pair<Integer, V>> inputType = Avros.pairs(Avros.ints(), ptype);
  return p.read(avroInput(inputKey, inputType));
}
 
开发者ID:apsaltis,项目名称:oryx,代码行数:8,代码来源:KMeansJobStep.java

示例6: run

import org.apache.crunch.Pipeline; //导入方法依赖的package包/类
public void run() {

    // TODO: Switch to parameterized views.
    View<ExampleEvent> view = Datasets.load(ScheduledReportApp.EXAMPLE_DS_URI,
        ExampleEvent.class);

    RefinableView<GenericRecord> target = Datasets.load(ScheduledReportApp.REPORT_DS_URI,
        GenericRecord.class);

    // Get the view into which this report will be written.
    DateTime dateTime = getNominalTime().toDateTime(DateTimeZone.UTC);

    View<GenericRecord> output = target
        .with("year", dateTime.getYear())
        .with("month", dateTime.getMonthOfYear())
        .with("day", dateTime.getDayOfMonth())
        .with("hour", dateTime.getHourOfDay())
        .with("minute", dateTime.getMinuteOfHour());

    Pipeline pipeline = getPipeline();

    PCollection<ExampleEvent> events = pipeline.read(CrunchDatasets.asSource(view));

    PTable<Long, ExampleEvent> eventsByUser = events.by(new GetEventId(), Avros.longs());

    // Count of events by user ID.
    PTable<Long, Long> userEventCounts = eventsByUser.keys().count();

    PCollection<GenericData.Record> report = userEventCounts.parallelDo(
        new ToUserReport(),
        Avros.generics(SCHEMA));

    pipeline.write(report, CrunchDatasets.asTarget(output));

    pipeline.run();
  }
 
开发者ID:rbrush,项目名称:kite-apps,代码行数:37,代码来源:ScheduledReportJob.java

示例7: run

import org.apache.crunch.Pipeline; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {

  new JCommander(this, args);

  URI outputUri = URI.create(output);

  // Our crunch job is a MapReduce job
  Pipeline pipeline = new MRPipeline(LegacyHdfs2Cass.class, getConf());

  // Parse & fetch info about target Cassandra cluster
  CassandraParams params = CassandraParams.parse(outputUri);

  // Read records from Avro files in inputFolder
  PCollection<ByteBuffer> records =
      pipeline.read(From.avroFile(inputList(input), Avros.records(ByteBuffer.class)));

  // Transform the input
  String protocol = outputUri.getScheme();
  if (protocol.equalsIgnoreCase("thrift")) {
    records
        // First convert ByteBuffers to ThriftRecords
        .parallelDo(new LegacyHdfsToThrift(), ThriftRecord.PTYPE)
        // Then group the ThriftRecords in preparation for writing them
        .parallelDo(new ThriftRecord.AsPair(), ThriftRecord.AsPair.PTYPE)
        .groupByKey(params.createGroupingOptions())
        // Finally write the ThriftRecords to Cassandra
        .write(new ThriftTarget(outputUri, params));
  }
  else if (protocol.equalsIgnoreCase("cql")) {
    records
        // In case of CQL, convert ByteBuffers to CQLRecords
        .parallelDo(new LegacyHdfsToCQL(), CQLRecord.PTYPE)
        .by(params.getKeyFn(), Avros.bytes())
        .groupByKey(params.createGroupingOptions())
        .write(new CQLTarget(outputUri, params));
  }

  // Execute the pipeline
  PipelineResult result = pipeline.done();
  return result.succeeded() ? 0 : 1;
}
 
开发者ID:spotify,项目名称:hdfs2cass,代码行数:43,代码来源:LegacyHdfs2Cass.java

示例8: run

import org.apache.crunch.Pipeline; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {

  new JCommander(this, args);

  URI outputUri = URI.create(output);

  // Our crunch job is a MapReduce job
  Configuration conf = getConf();
  conf.setBoolean(MRJobConfig.MAP_SPECULATIVE, Boolean.FALSE);
  conf.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, Boolean.FALSE);
  Pipeline pipeline = new MRPipeline(Hdfs2Cass.class, conf);

  // Parse & fetch info about target Cassandra cluster
  CassandraParams params = CassandraParams.parse(outputUri);

  PCollection<GenericRecord> records =
      ((PCollection<GenericRecord>)(PCollection) pipeline.read(From.avroFile(inputList(input))));

  String protocol = outputUri.getScheme();
  if (protocol.equalsIgnoreCase("thrift")) {
    records
        // First convert ByteBuffers to ThriftRecords
        .parallelDo(new AvroToThrift(rowkey, timestamp, ttl, ignore), ThriftRecord.PTYPE)
        // Then group the ThriftRecords in preparation for writing them
        .parallelDo(new ThriftRecord.AsPair(), ThriftRecord.AsPair.PTYPE)
        .groupByKey(params.createGroupingOptions())
         // Finally write the ThriftRecords to Cassandra
        .write(new ThriftTarget(outputUri, params));
  }
  else if (protocol.equalsIgnoreCase("cql")) {
    records
        // In case of CQL, convert ByteBuffers to CQLRecords
        .parallelDo(new AvroToCQL(rowkey, timestamp, ttl, ignore), CQLRecord.PTYPE)
        .by(params.getKeyFn(), Avros.bytes())
        .groupByKey(params.createGroupingOptions())
        .write(new CQLTarget(outputUri, params));
  }

  // Execute the pipeline
  PipelineResult result = pipeline.done();
  return result.succeeded() ? 0 : 1;
}
 
开发者ID:spotify,项目名称:hdfs2cass,代码行数:44,代码来源:Hdfs2Cass.java

示例9: run

import org.apache.crunch.Pipeline; //导入方法依赖的package包/类
public void run(@DataIn(name="example_events", type=ExampleEvent.class) View<ExampleEvent> input,
                @DataOut(name="odd_users", type=ExampleEvent.class) View<ExampleEvent> output) {

  Pipeline pipeline = getPipeline();

  PCollection<ExampleEvent> events = pipeline.read(CrunchDatasets.asSource(input));

  PCollection<ExampleEvent> oddUsers = events.filter(new KeepOddUsers());

  pipeline.write(oddUsers, CrunchDatasets.asTarget(output));

  pipeline.run();
}
 
开发者ID:rbrush,项目名称:kite-apps,代码行数:14,代码来源:TriggeredJob.java


注:本文中的org.apache.crunch.Pipeline.read方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。