当前位置: 首页>>代码示例>>Java>>正文


Java AvroJob.setOutputValueSchema方法代码示例

本文整理汇总了Java中org.apache.avro.mapreduce.AvroJob.setOutputValueSchema方法的典型用法代码示例。如果您正苦于以下问题:Java AvroJob.setOutputValueSchema方法的具体用法?Java AvroJob.setOutputValueSchema怎么用?Java AvroJob.setOutputValueSchema使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.avro.mapreduce.AvroJob的用法示例。


在下文中一共展示了AvroJob.setOutputValueSchema方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setSchema

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
/** Hacked method */
private void setSchema(Job job, Schema keySchema, Schema valSchema) {

  boolean isMaponly = job.getNumReduceTasks() == 0;
  if (keySchema != null) {
    if (isMaponly){
      AvroJob.setMapOutputKeySchema(job, keySchema);
    }
    AvroJob.setOutputKeySchema(job, keySchema);
  }
  if (valSchema != null) {
    if (isMaponly){
      AvroJob.setMapOutputValueSchema(job, valSchema);
    }
    AvroJob.setOutputValueSchema(job, valSchema);
  }

}
 
开发者ID:openaire,项目名称:iis,代码行数:19,代码来源:AvroMultipleOutputs.java

示例2: run

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
public int run(String[] args) throws Exception {

		Job job = new Job(getConf());
		job.setJarByClass(AVROMultipleValues.class);
		job.setJobName("AVRO Multiple Values");

		FileInputFormat.setInputPaths(job, new Path(args[0]));
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		job.setMapperClass(AVROMultipleValuesMapper.class);
		job.setReducerClass(AVROMultipleValuesReducer.class);

		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(AvroValue.class);

		job.setOutputFormatClass(AvroKeyValueOutputFormat.class);
		AvroJob.setMapOutputValueSchema(job, Multiple.SCHEMA$);
		AvroJob.setOutputValueSchema(job, Multiple.SCHEMA$);

		job.setNumReduceTasks(1);

		return (job.waitForCompletion(true) ? 0 : 1);
	}
 
开发者ID:CoE4BD,项目名称:HadoopHowTo,代码行数:24,代码来源:AVROMultipleValues.java

示例3: process

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
@Override
public void process(Annotation annotation, Job job, Object target)
		throws ToolException {

	AvroJobInfo avroInfo = (AvroJobInfo)annotation;
	if (avroInfo.inputKeySchema() != AvroDefault.class) {
		AvroJob.setInputKeySchema(job, getSchema(avroInfo.inputKeySchema()));
	}
	if (avroInfo.inputValueSchema() != AvroDefault.class) {
		AvroJob.setInputValueSchema(job, getSchema(avroInfo.inputValueSchema()));
	}

	if (avroInfo.outputKeySchema() != AvroDefault.class) {
		AvroJob.setOutputKeySchema(job, getSchema(avroInfo.outputKeySchema()));
	}
	if (avroInfo.outputValueSchema() != AvroDefault.class) {
		AvroJob.setOutputValueSchema(job, getSchema(avroInfo.outputValueSchema()));
	}

	if (avroInfo.mapOutputKeySchema() != AvroDefault.class) {
		AvroJob.setMapOutputKeySchema(job, getSchema(avroInfo.mapOutputKeySchema()));
	}
	if (avroInfo.mapOutputValueSchema() != AvroDefault.class) {
		AvroJob.setMapOutputValueSchema(job, getSchema(avroInfo.mapOutputValueSchema()));
	}

	AvroSerialization.addToConfiguration(job.getConfiguration());
}
 
开发者ID:conversant,项目名称:mara,代码行数:29,代码来源:AvroJobInfoAnnotationHandler.java

示例4: run

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
public int run(String[] args) throws Exception {
  org.apache.log4j.BasicConfigurator.configure();

  if (args.length != 2) {
    System.err.println("Usage: MapReduceAgeCount <input path> <output path>");
    return -1;
  }

  Job job = Job.getInstance(getConf());
  job.setJarByClass(MapReduceAgeCount.class);
  job.setJobName("Age Count");

  // RECORDSERVICE:
  // To read from a table instead of a path, comment out
  // FileInputFormat.setInputPaths() and instead use:
  // FileInputFormat.setInputPaths(job, new Path(args[0]));
  RecordServiceConfig.setInputTable(job.getConfiguration(), null, args[0]);

  // RECORDSERVICE:
  // Use the RecordService version of the AvroKeyValueInputFormat
  job.setInputFormatClass(
      com.cloudera.recordservice.avro.mapreduce.AvroKeyValueInputFormat.class);
  FileOutputFormat.setOutputPath(job, new Path(args[1]));

  job.setMapperClass(AgeCountMapper.class);
  // Set schema for input key and value.
  AvroJob.setInputKeySchema(job, UserKey.getClassSchema());
  AvroJob.setInputValueSchema(job, UserValue.getClassSchema());

  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(IntWritable.class);

  job.setOutputFormatClass(AvroKeyValueOutputFormat.class);
  job.setReducerClass(AgeCountReducer.class);
  AvroJob.setOutputKeySchema(job, Schema.create(Schema.Type.STRING));
  AvroJob.setOutputValueSchema(job, Schema.create(Schema.Type.INT));

  return (job.waitForCompletion(true) ? 0 : 1);
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:40,代码来源:MapReduceAgeCount.java

示例5: run

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  org.apache.log4j.BasicConfigurator.configure();

  if (args.length != 2) {
    System.err.println("Usage: MapReduceColorCount <input path> <output path>");
    return -1;
  }

  Job job = Job.getInstance(getConf());
  job.setJarByClass(MapReduceColorCount.class);
  job.setJobName("Color Count");

  // RECORDSERVICE:
  // To read from a table instead of a path, comment out
  // FileInputFormat.setInputPaths() and instead use:
  //FileInputFormat.setInputPaths(job, new Path(args[0]));
  RecordServiceConfig.setInputTable(job.getConfiguration(), "rs", "users");

  // RECORDSERVICE:
  // Use the RecordService version of the AvroKeyInputFormat
  job.setInputFormatClass(
      com.cloudera.recordservice.avro.mapreduce.AvroKeyInputFormat.class);
  //job.setInputFormatClass(AvroKeyInputFormat.class);

  FileOutputFormat.setOutputPath(job, new Path(args[1]));

  job.setMapperClass(ColorCountMapper.class);
  AvroJob.setInputKeySchema(job, User.getClassSchema());
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(IntWritable.class);

  job.setOutputFormatClass(AvroKeyValueOutputFormat.class);
  job.setReducerClass(ColorCountReducer.class);
  AvroJob.setOutputKeySchema(job, Schema.create(Schema.Type.STRING));
  AvroJob.setOutputValueSchema(job, Schema.create(Schema.Type.INT));

  return (job.waitForCompletion(true) ? 0 : 1);
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:40,代码来源:MapReduceColorCount.java

示例6: countColors

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
/**
 * Run the MR2 color count with generic records, and return a map of favorite colors to
 * the number of users.
 */
public static java.util.Map<String, Integer> countColors() throws IOException,
    ClassNotFoundException, InterruptedException {
  String output = TestUtil.getTempDirectory();
  Path outputPath = new Path(output);
  JobConf conf = new JobConf(ColorCount.class);
  conf.setInt("mapreduce.job.reduces", 1);

  Job job = Job.getInstance(conf);
  job.setJarByClass(ColorCount.class);
  job.setJobName("MR2 Color Count With Generic Records");

  RecordServiceConfig.setInputTable(job.getConfiguration(), "rs", "users");
  job.setInputFormatClass(
      com.cloudera.recordservice.avro.mapreduce.AvroKeyInputFormat.class);
  FileOutputFormat.setOutputPath(job, outputPath);

  job.setMapperClass(Map.class);
  job.setMapOutputKeyClass(Text.class);
  job.setMapOutputValueClass(IntWritable.class);

  job.setOutputFormatClass(AvroKeyValueOutputFormat.class);
  job.setReducerClass(Reduce.class);
  AvroJob.setOutputKeySchema(job, Schema.create(Schema.Type.STRING));
  AvroJob.setOutputValueSchema(job, Schema.create(Schema.Type.INT));

  job.waitForCompletion(false);

  // Read the result and return it. Since we set the number of reducers to 1,
  // there is always just one file containing the value.
  SeekableInput input = new FsInput(new Path(output + "/part-r-00000.avro"), conf);
  DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>();
  FileReader<GenericRecord> fileReader = DataFileReader.openReader(input, reader);
  java.util.Map<String, Integer> colorMap = new HashMap<String, Integer>();
  for (GenericRecord datum: fileReader) {
    colorMap.put(datum.get(0).toString(), Integer.parseInt(datum.get(1).toString()));
  }
  return colorMap;
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:43,代码来源:ColorCount.java

示例7: afterPropertiesSet

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
@Override
public void afterPropertiesSet() throws Exception {

    if (avroInputKey != null) {
        AvroJob.setInputKeySchema(job, resolveClass(avroInputKey).newInstance().getSchema());
    }

    if (avroInputValue != null) {
        AvroJob.setInputValueSchema(job, resolveClass(avroInputValue).newInstance().getSchema());
    }

    if (avroMapOutputKey != null) {
        AvroJob.setMapOutputKeySchema(job, resolveClass(avroMapOutputKey).newInstance().getSchema());
    }

    if (avroMapOutputValue != null) {
        Class<? extends IndexedRecord> c = resolveClass(avroMapOutputValue);
        IndexedRecord o = c.newInstance();
        AvroJob.setMapOutputValueSchema(job, o.getSchema());
    }

    if (avroOutputKey != null) {
        AvroJob.setOutputKeySchema(job, resolveClass(avroOutputKey).newInstance().getSchema());
    }

    if (avroOutputValue != null) {
        AvroJob.setOutputValueSchema(job, resolveClass(avroOutputValue).newInstance().getSchema());
    }
}
 
开发者ID:ch4mpy,项目名称:hadoop2,代码行数:30,代码来源:AvroJobInitializingBean.java

示例8: setOutput

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
@Override
protected void setOutput() throws JsonGenerationException,
        JsonMappingException,
        IOException
{
    JsonNode output = get(root, "output");

    // set the output path
    outputDir = new Path(getText(output, "path"));
    Path outputPath = new Path(outputDir, "tmp");

    fs.delete(outputPath, true);
    FileOutputFormat.setOutputPath(job, outputPath);

    // set the column type
    List<ColumnType> columnTypes = new ArrayList<ColumnType>();

    for (JsonNode column : asArray(output, "columns"))
    {
        ColumnType type = new ColumnType();
        type.setName(column.getTextValue());
        type.setType("int");
        columnTypes.add(type);
    }

    // set avro job properties
    AvroJob.setOutputKeySchema(job, GenerateDictionary.getSchema());
    AvroJob.setOutputValueSchema(job, Schema.create(Type.NULL));
    job.setOutputFormatClass(AvroKeyOutputFormat.class);
}
 
开发者ID:svemuri,项目名称:CalcEngine,代码行数:31,代码来源:DictionaryExecutor.java

示例9: submitJob

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
private void submitJob(StagedOutputJobExecutor executor, String inputPattern, String output, String clusterName, String year, String day, int numReducers)
{
  List<String> inputPaths = new ArrayList<String>();
  
  inputPaths.add(inputPattern);
  
  final StagedOutputJob job = StagedOutputJob.createStagedJob(
    _props,
    _name + "-" + "usage-per-hour-" + clusterName + "-" + year + "-" + day,
    inputPaths,
    "/tmp" + output,
    output,
    _log);
  
  final Configuration conf = job.getConfiguration();
  
  conf.set("cluster.name", clusterName);
              
  job.setOutputKeyClass(BytesWritable.class);
  job.setOutputValueClass(BytesWritable.class);
  
  job.setInputFormatClass(AvroKeyValueInputFormat.class);
  job.setOutputFormatClass(AvroKeyValueOutputFormat.class);
  
  AvroJob.setInputKeySchema(job, Schema.create(Type.STRING));
  AvroJob.setInputValueSchema(job, LogData.SCHEMA$);
  
  AvroJob.setMapOutputKeySchema(job, AttemptStatsKey.SCHEMA$);
  AvroJob.setMapOutputValueSchema(job, AttemptStatsValue.SCHEMA$);
  
  AvroJob.setOutputKeySchema(job, AttemptStatsKey.SCHEMA$);
  AvroJob.setOutputValueSchema(job, AttemptStatsValue.SCHEMA$);
  
  job.setNumReduceTasks(numReducers);
  
  job.setMapperClass(ComputeUsagePerHour.TheMapper.class);
  job.setReducerClass(ComputeUsagePerHour.TheReducer.class);
  
  executor.submit(job);
}
 
开发者ID:nkrishnaveni,项目名称:polar-bear,代码行数:41,代码来源:ComputeUsagePerHour.java

示例10: getContext

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
private TaskAttemptContext getContext(String nameOutput) throws IOException {

    TaskAttemptContext taskContext = taskContexts.get(nameOutput);

    if (taskContext != null) {
      return taskContext;
    }

    // The following trick leverages the instantiation of a record writer via
    // the job thus supporting arbitrary output formats.
    context.getConfiguration().set("avro.mo.config.namedOutput",nameOutput);
    Job job = new Job(context.getConfiguration());
    job.setOutputFormatClass(getNamedOutputFormatClass(context, nameOutput));
    Schema keySchema = keySchemas.get(nameOutput+"_KEYSCHEMA");
    Schema valSchema = valSchemas.get(nameOutput+"_VALSCHEMA");

    boolean isMaponly=job.getNumReduceTasks() == 0;

    if(keySchema!=null)
    {
      if(isMaponly)
        AvroJob.setMapOutputKeySchema(job,keySchema);
      else
        AvroJob.setOutputKeySchema(job,keySchema);
    }
    if(valSchema!=null)
    {
      if(isMaponly)
        AvroJob.setMapOutputValueSchema(job,valSchema);
      else
        AvroJob.setOutputValueSchema(job,valSchema);
    }
    taskContext = new TaskAttemptContext(
      job.getConfiguration(), context.getTaskAttemptID());
    
    taskContexts.put(nameOutput, taskContext);
    
    return taskContext;
  }
 
开发者ID:nkrishnaveni,项目名称:polar-bear,代码行数:40,代码来源:MyAvroMultipleOutputs.java

示例11: execute

import org.apache.avro.mapreduce.AvroJob; //导入方法依赖的package包/类
public void execute(StagedOutputJobExecutor executor) throws IOException, InterruptedException, ExecutionException
{
  for (String clusterName : _clusterNames.split(","))
  {
    System.out.println("Processing cluster " + clusterName);
          
    List<JobStatsProcessing.ProcessingTask> processingTasks = JobStatsProcessing.getTasks(_fs, _logsRoot, clusterName, _jobsOutputPathRoot, _incremental, _numDays, _numDaysForced);
    
    for (JobStatsProcessing.ProcessingTask task : processingTasks)
    {      
      List<String> inputPaths = new ArrayList<String>();
      inputPaths.add(task.inputPathFormat);
      
      String outputPath = task.outputPath;
      
      final StagedOutputJob job = StagedOutputJob.createStagedJob(
         _props,
         _name + "-parse-jobs-" + task.id,
         inputPaths,
         "/tmp" + outputPath,
         outputPath,
         _log);
      
      job.getConfiguration().set("jobs.output.path", _jobsOutputPathRoot);
      job.getConfiguration().set("logs.cluster.name", clusterName);
              
      // 1 reducer per 12 GB of input data
      long numReduceTasks = (int)Math.ceil(((double)task.totalLength) / 1024 / 1024 / 1024 / 12);
              
      job.setOutputKeyClass(BytesWritable.class);
      job.setOutputValueClass(BytesWritable.class);

      job.setInputFormatClass(CombinedTextInputFormat.class);
      job.setOutputFormatClass(AvroKeyValueOutputFormat.class);

      AvroJob.setOutputKeySchema(job, Schema.create(Type.STRING));
      AvroJob.setOutputValueSchema(job, LogData.SCHEMA$);
      
      job.setNumReduceTasks((int)numReduceTasks);
 
      job.setMapperClass(ParseJobsFromLogs.TheMapper.class);
      job.setReducerClass(ParseJobsFromLogs.TheReducer.class);
       
      AvroJob.setMapOutputKeySchema(job, Schema.create(Type.STRING));
      AvroJob.setMapOutputValueSchema(job, LogData.SCHEMA$);
      
      MyAvroMultipleOutputs.addNamedOutput(job, "logs", AvroKeyValueOutputFormat.class, Schema.create(Type.STRING), LogData.SCHEMA$);
      
      executor.submit(job);
    }
    
    executor.waitForCompletion();
  }
}
 
开发者ID:nkrishnaveni,项目名称:polar-bear,代码行数:55,代码来源:ParseJobsFromLogs.java


注:本文中的org.apache.avro.mapreduce.AvroJob.setOutputValueSchema方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。