当前位置: 首页>>代码示例>>Java>>正文


Java NullWritable类代码示例

本文整理汇总了Java中org.apache.hadoop.io.NullWritable的典型用法代码示例。如果您正苦于以下问题:Java NullWritable类的具体用法?Java NullWritable怎么用?Java NullWritable使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


NullWritable类属于org.apache.hadoop.io包,在下文中一共展示了NullWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testEmptyJoin

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
public void testEmptyJoin() throws Exception {
  Configuration conf = new Configuration();
  Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
  Path[] src = { new Path(base,"i0"), new Path("i1"), new Path("i2") };
  conf.set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose("outer",
      MapReduceTestUtil.Fake_IF.class, src));
  MapReduceTestUtil.Fake_IF.setKeyClass(conf, 
    MapReduceTestUtil.IncomparableKey.class);
  Job job = Job.getInstance(conf);
  job.setInputFormatClass(CompositeInputFormat.class);
  FileOutputFormat.setOutputPath(job, new Path(base, "out"));

  job.setMapperClass(Mapper.class);
  job.setReducerClass(Reducer.class);
  job.setOutputKeyClass(MapReduceTestUtil.IncomparableKey.class);
  job.setOutputValueClass(NullWritable.class);

  job.waitForCompletion(true);
  assertTrue(job.isSuccessful());
  base.getFileSystem(conf).delete(base, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestJoinDatamerge.java

示例2: next

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
@Override
public boolean next( final NullWritable key, final ColumnAndIndex value ) throws IOException {
  if( currentSpread == null || currentIndex == currentIndexList.size() ){
    if( ! nextReader() ){
      updateCounter( reader.getReadStats() );
      isEnd = true;
      return false;
    }
  }

  spreadColumn.setSpread( currentSpread );
  value.column = spreadColumn;
  value.index =  currentIndexList.get( currentIndex );
  value.columnIndex = spreadCounter.get();
  currentIndex++;
  return true;
}
 
开发者ID:yahoojapan,项目名称:multiple-dimension-spread,代码行数:18,代码来源:MDSHiveLineReader.java

示例3: init

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
/** {@inheritDoc} */
@Override
public void init(Job job) {
  // setup mapper
  job.setMapperClass(PartitionMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(SummationWritable.class);

  // setup partitioner
  job.setPartitionerClass(IndexPartitioner.class);

  // setup reducer
  job.setReducerClass(SummingReducer.class);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(TaskResult.class);
  final Configuration conf = job.getConfiguration();
  final int nParts = conf.getInt(N_PARTS, 1);
  job.setNumReduceTasks(nParts);

  // setup input
  job.setInputFormatClass(SummationInputFormat.class);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DistSum.java

示例4: map

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
@Override
protected void map(LongWritable key, SqoopRecord val, Context context)
    throws IOException, InterruptedException {

  try {
    // Loading of LOBs was delayed until we have a Context.
    val.loadLargeObjects(lobLoader);
  } catch (SQLException sqlE) {
    throw new IOException(sqlE);
  }

  GenericRecord outKey = AvroUtil.toGenericRecord(val.getFieldMap(),
      schema, bigDecimalFormatString);
  wrapper.datum(outKey);
  context.write(wrapper, NullWritable.get());
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:17,代码来源:AvroImportMapper.java

示例5: configureMapper

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
@Override
protected void configureMapper(Job job, String tableName,
    String tableClassName) throws ClassNotFoundException, IOException {
  if (isHCatJob) {
    throw new IOException("Sqoop-HCatalog Integration is not supported.");
  }
  switch (getInputFileType()) {
    case AVRO_DATA_FILE:
      throw new IOException("Avro data file is not supported.");
    case SEQUENCE_FILE:
    case UNKNOWN:
    default:
      job.setMapperClass(getMapperClass());
  }

  // Concurrent writes of the same records would be problematic.
  ConfigurationHelper.setJobMapSpeculativeExecution(job, false);
  job.setMapOutputKeyClass(NullWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:21,代码来源:PostgreSQLCopyExportJob.java

示例6: readPartitions

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
/**
 * Read the cut points from the given IFile.
 * @param fs The file system
 * @param p The path to read
 * @param keyClass The map output key class
 * @param job The job config
 * @throws IOException
 */
                               // matching key types enforced by passing in
@SuppressWarnings("unchecked") // map output key class
private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass,
    Configuration conf) throws IOException {
  SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, conf);
  ArrayList<K> parts = new ArrayList<K>();
  K key = ReflectionUtils.newInstance(keyClass, conf);
  NullWritable value = NullWritable.get();
  try {
    while (reader.next(key, value)) {
      parts.add(key);
      key = ReflectionUtils.newInstance(keyClass, conf);
    }
    reader.close();
    reader = null;
  } finally {
    IOUtils.cleanup(LOG, reader);
  }
  return parts.toArray((K[])Array.newInstance(keyClass, parts.size()));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TotalOrderPartitioner.java

示例7: map

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
public void map(LongWritable key, Record val, Context context) 
    throws IOException, InterruptedException{
  try {
    odpsImpl.parse(val);
    context.write(odpsImpl, NullWritable.get());
  } catch (Exception e) {
    LOG.error("Exception raised during data export");
    LOG.error("Exception: ", e);

    LOG.error("On input: " + val);
    LOG.error("At position " + key);
    InputSplit is = context.getInputSplit();
    LOG.error("");
    LOG.error("Currently processing split:");
    LOG.error(is);
    LOG.error("");
    LOG.error("This issue might not necessarily be caused by current input");
    LOG.error("due to the batching nature of export.");
    LOG.error("");
    throw new IOException("Can't export data, please check failed map task logs", e);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:23,代码来源:OdpsExportMapper.java

示例8: writeOutput

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
private void writeOutput(RecordWriter theRecordWriter,
    TaskAttemptContext context) throws IOException, InterruptedException {
  NullWritable nullWritable = NullWritable.get();

  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);
  } finally {
    theRecordWriter.close(context);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestFileOutputCommitter.java

示例9: nextKeyValue

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
  if (!reader.hasNext() || reader.pastSync(end)) {
    key = null;
    value = null;
    return false;
  }
  if (key == null) {
    key = new AvroWrapper<T>();
  }
  if (value == null) {
    value = NullWritable.get();
  }
  key.datum(reader.next(key.datum()));
  return true;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:17,代码来源:AvroRecordReader.java

示例10: reduce

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
/**
 * {@inheritDoc}
 */
protected void reduce(final Text key, final Iterable<OrcValue> values, final Context context) throws IOException, InterruptedException {
    final Configuration configuration = context.getConfiguration();
    final String sourcePath = configuration.get("compactionSourcePath");
    final String targetPath = configuration.get("compactionTargetPath");

    // Reducer stores data at the target directory retaining the directory structure of files
    String filePath = key.toString().replace(sourcePath, targetPath);
    if (key.toString().endsWith("/")) {
        filePath = filePath.concat("file");
    }

    log.info("Compaction output path {}", filePath);
    final URI uri = URI.create(filePath);
    final MultipleOutputs multipleOutputs = new MultipleOutputs<NullWritable, OrcValue>(context);
    try {
        for (final OrcValue text : values) {
            multipleOutputs.write(NullWritable.get(), text, uri.toString());
        }
    } finally {
        multipleOutputs.close();
    }
}
 
开发者ID:ExpediaInceCommercePlatform,项目名称:dataSqueeze,代码行数:26,代码来源:OrcCompactionReducer.java

示例11: call

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
@Override
public Job call() throws IOException, InterruptedException,
                         ClassNotFoundException {
  UserGroupInformation ugi = UserGroupInformation.getLoginUser();
  ugi.doAs( new PrivilegedExceptionAction <Job>() {
     public Job run() throws IOException, ClassNotFoundException,
                             InterruptedException {
      job.setMapperClass(GenDCDataMapper.class);
      job.setNumReduceTasks(0);
      job.setMapOutputKeyClass(NullWritable.class);
      job.setMapOutputValueClass(BytesWritable.class);
      job.setInputFormatClass(GenDCDataFormat.class);
      job.setOutputFormatClass(NullOutputFormat.class);
      job.setJarByClass(GenerateDistCacheData.class);
      try {
        FileInputFormat.addInputPath(job, new Path("ignored"));
      } catch (IOException e) {
        LOG.error("Error while adding input path ", e);
      }
      job.submit();
      return job;
    }
  });
  return job;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:GenerateDistCacheData.java

示例12: writeOutput

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
private void writeOutput(RecordWriter theRecordWriter,
    TaskAttemptContext context) throws IOException, InterruptedException {
  NullWritable nullWritable = NullWritable.get();

  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);
  } finally {
    theRecordWriter.close(null);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestFileOutputCommitter.java

示例13: map

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
	String[] parsedTriple = rdfParser.parseTriple(value.toString());
	if (parsedTriple != null) {
		// Convert liters to Pig Types, if possible
		parsedTriple[2] = Util.toPigTypes(parsedTriple[2]);
		// Use Predicate for Vertical Partitioning
		multipleOutputs.write(NullWritable.get(), new Text(parsedTriple[0] + "\t" + parsedTriple[2]),
				Util.generateFileName(parsedTriple[1]));
		// Write all parsed triples also to "inputData" for queries where Predicate is not known
		multipleOutputs.write(NullWritable.get(), new Text(parsedTriple[0] + "\t" + parsedTriple[1] + "\t" + parsedTriple[2]),
				Util.generateFileName("inputData"));
		context.getCounter("RDF Dataset Properties", VALID_TRIPLES).increment(1);
	} else {
		if (value.getLength() == 0 || value.toString().startsWith("@")) {
			System.out.println("IGNORING: " + value);
			context.getCounter("RDF Dataset Properties", IGNORED_LINES).increment(1);
		} else {
			System.out.println("DISCARDED: " + value);
			context.getCounter("RDF Dataset Properties", INVALID_TRIPLES).increment(1);
		}
	}
}
 
开发者ID:aschaetzle,项目名称:PigSPARQL,代码行数:24,代码来源:VPMapOnlyMapper.java

示例14: testEmptyJoin

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
public void testEmptyJoin() throws Exception {
  JobConf job = new JobConf();
  Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
  Path[] src = { new Path(base,"i0"), new Path("i1"), new Path("i2") };
  job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer",
      Fake_IF.class, src));
  job.setInputFormat(CompositeInputFormat.class);
  FileOutputFormat.setOutputPath(job, new Path(base, "out"));

  job.setMapperClass(IdentityMapper.class);
  job.setReducerClass(IdentityReducer.class);
  job.setOutputKeyClass(IncomparableKey.class);
  job.setOutputValueClass(NullWritable.class);

  JobClient.runJob(job);
  base.getFileSystem(job).delete(base, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDatamerge.java

示例15: getRecordReader

import org.apache.hadoop.io.NullWritable; //导入依赖的package包/类
public RecordReader<NullWritable,NullWritable> getRecordReader(
    InputSplit ignored, JobConf conf, Reporter reporter) {
  return new RecordReader<NullWritable,NullWritable>() {
    private boolean done = false;
    public boolean next(NullWritable key, NullWritable value)
        throws IOException {
      if (done)
        return false;
      done = true;
      return true;
    }
    public NullWritable createKey() { return NullWritable.get(); }
    public NullWritable createValue() { return NullWritable.get(); }
    public long getPos() throws IOException { return 0L; }
    public void close() throws IOException { }
    public float getProgress() throws IOException { return 0.0f; }
  };
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestReduceFetchFromPartialMem.java


注:本文中的org.apache.hadoop.io.NullWritable类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。