當前位置: 首頁>>代碼示例>>Java>>正文


Java NullWritable類代碼示例

本文整理匯總了Java中org.apache.hadoop.io.NullWritable的典型用法代碼示例。如果您正苦於以下問題:Java NullWritable類的具體用法?Java NullWritable怎麽用?Java NullWritable使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


NullWritable類屬於org.apache.hadoop.io包,在下文中一共展示了NullWritable類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testEmptyJoin

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
public void testEmptyJoin() throws Exception {
  Configuration conf = new Configuration();
  Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
  Path[] src = { new Path(base,"i0"), new Path("i1"), new Path("i2") };
  conf.set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose("outer",
      MapReduceTestUtil.Fake_IF.class, src));
  MapReduceTestUtil.Fake_IF.setKeyClass(conf, 
    MapReduceTestUtil.IncomparableKey.class);
  Job job = Job.getInstance(conf);
  job.setInputFormatClass(CompositeInputFormat.class);
  FileOutputFormat.setOutputPath(job, new Path(base, "out"));

  job.setMapperClass(Mapper.class);
  job.setReducerClass(Reducer.class);
  job.setOutputKeyClass(MapReduceTestUtil.IncomparableKey.class);
  job.setOutputValueClass(NullWritable.class);

  job.waitForCompletion(true);
  assertTrue(job.isSuccessful());
  base.getFileSystem(conf).delete(base, true);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:22,代碼來源:TestJoinDatamerge.java

示例2: next

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
@Override
public boolean next( final NullWritable key, final ColumnAndIndex value ) throws IOException {
  if( currentSpread == null || currentIndex == currentIndexList.size() ){
    if( ! nextReader() ){
      updateCounter( reader.getReadStats() );
      isEnd = true;
      return false;
    }
  }

  spreadColumn.setSpread( currentSpread );
  value.column = spreadColumn;
  value.index =  currentIndexList.get( currentIndex );
  value.columnIndex = spreadCounter.get();
  currentIndex++;
  return true;
}
 
開發者ID:yahoojapan,項目名稱:multiple-dimension-spread,代碼行數:18,代碼來源:MDSHiveLineReader.java

示例3: init

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
/** {@inheritDoc} */
@Override
public void init(Job job) {
  // setup mapper
  job.setMapperClass(PartitionMapper.class);
  job.setMapOutputKeyClass(IntWritable.class);
  job.setMapOutputValueClass(SummationWritable.class);

  // setup partitioner
  job.setPartitionerClass(IndexPartitioner.class);

  // setup reducer
  job.setReducerClass(SummingReducer.class);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(TaskResult.class);
  final Configuration conf = job.getConfiguration();
  final int nParts = conf.getInt(N_PARTS, 1);
  job.setNumReduceTasks(nParts);

  // setup input
  job.setInputFormatClass(SummationInputFormat.class);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:23,代碼來源:DistSum.java

示例4: map

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
@Override
protected void map(LongWritable key, SqoopRecord val, Context context)
    throws IOException, InterruptedException {

  try {
    // Loading of LOBs was delayed until we have a Context.
    val.loadLargeObjects(lobLoader);
  } catch (SQLException sqlE) {
    throw new IOException(sqlE);
  }

  GenericRecord outKey = AvroUtil.toGenericRecord(val.getFieldMap(),
      schema, bigDecimalFormatString);
  wrapper.datum(outKey);
  context.write(wrapper, NullWritable.get());
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:17,代碼來源:AvroImportMapper.java

示例5: configureMapper

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
@Override
protected void configureMapper(Job job, String tableName,
    String tableClassName) throws ClassNotFoundException, IOException {
  if (isHCatJob) {
    throw new IOException("Sqoop-HCatalog Integration is not supported.");
  }
  switch (getInputFileType()) {
    case AVRO_DATA_FILE:
      throw new IOException("Avro data file is not supported.");
    case SEQUENCE_FILE:
    case UNKNOWN:
    default:
      job.setMapperClass(getMapperClass());
  }

  // Concurrent writes of the same records would be problematic.
  ConfigurationHelper.setJobMapSpeculativeExecution(job, false);
  job.setMapOutputKeyClass(NullWritable.class);
  job.setMapOutputValueClass(NullWritable.class);
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:21,代碼來源:PostgreSQLCopyExportJob.java

示例6: readPartitions

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
/**
 * Read the cut points from the given IFile.
 * @param fs The file system
 * @param p The path to read
 * @param keyClass The map output key class
 * @param job The job config
 * @throws IOException
 */
                               // matching key types enforced by passing in
@SuppressWarnings("unchecked") // map output key class
private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass,
    Configuration conf) throws IOException {
  SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, conf);
  ArrayList<K> parts = new ArrayList<K>();
  K key = ReflectionUtils.newInstance(keyClass, conf);
  NullWritable value = NullWritable.get();
  try {
    while (reader.next(key, value)) {
      parts.add(key);
      key = ReflectionUtils.newInstance(keyClass, conf);
    }
    reader.close();
    reader = null;
  } finally {
    IOUtils.cleanup(LOG, reader);
  }
  return parts.toArray((K[])Array.newInstance(keyClass, parts.size()));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:29,代碼來源:TotalOrderPartitioner.java

示例7: map

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
public void map(LongWritable key, Record val, Context context) 
    throws IOException, InterruptedException{
  try {
    odpsImpl.parse(val);
    context.write(odpsImpl, NullWritable.get());
  } catch (Exception e) {
    LOG.error("Exception raised during data export");
    LOG.error("Exception: ", e);

    LOG.error("On input: " + val);
    LOG.error("At position " + key);
    InputSplit is = context.getInputSplit();
    LOG.error("");
    LOG.error("Currently processing split:");
    LOG.error(is);
    LOG.error("");
    LOG.error("This issue might not necessarily be caused by current input");
    LOG.error("due to the batching nature of export.");
    LOG.error("");
    throw new IOException("Can't export data, please check failed map task logs", e);
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:23,代碼來源:OdpsExportMapper.java

示例8: writeOutput

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
private void writeOutput(RecordWriter theRecordWriter,
    TaskAttemptContext context) throws IOException, InterruptedException {
  NullWritable nullWritable = NullWritable.get();

  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);
  } finally {
    theRecordWriter.close(context);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestFileOutputCommitter.java

示例9: nextKeyValue

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
  if (!reader.hasNext() || reader.pastSync(end)) {
    key = null;
    value = null;
    return false;
  }
  if (key == null) {
    key = new AvroWrapper<T>();
  }
  if (value == null) {
    value = NullWritable.get();
  }
  key.datum(reader.next(key.datum()));
  return true;
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:17,代碼來源:AvroRecordReader.java

示例10: reduce

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
/**
 * {@inheritDoc}
 */
protected void reduce(final Text key, final Iterable<OrcValue> values, final Context context) throws IOException, InterruptedException {
    final Configuration configuration = context.getConfiguration();
    final String sourcePath = configuration.get("compactionSourcePath");
    final String targetPath = configuration.get("compactionTargetPath");

    // Reducer stores data at the target directory retaining the directory structure of files
    String filePath = key.toString().replace(sourcePath, targetPath);
    if (key.toString().endsWith("/")) {
        filePath = filePath.concat("file");
    }

    log.info("Compaction output path {}", filePath);
    final URI uri = URI.create(filePath);
    final MultipleOutputs multipleOutputs = new MultipleOutputs<NullWritable, OrcValue>(context);
    try {
        for (final OrcValue text : values) {
            multipleOutputs.write(NullWritable.get(), text, uri.toString());
        }
    } finally {
        multipleOutputs.close();
    }
}
 
開發者ID:ExpediaInceCommercePlatform,項目名稱:dataSqueeze,代碼行數:26,代碼來源:OrcCompactionReducer.java

示例11: call

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
@Override
public Job call() throws IOException, InterruptedException,
                         ClassNotFoundException {
  UserGroupInformation ugi = UserGroupInformation.getLoginUser();
  ugi.doAs( new PrivilegedExceptionAction <Job>() {
     public Job run() throws IOException, ClassNotFoundException,
                             InterruptedException {
      job.setMapperClass(GenDCDataMapper.class);
      job.setNumReduceTasks(0);
      job.setMapOutputKeyClass(NullWritable.class);
      job.setMapOutputValueClass(BytesWritable.class);
      job.setInputFormatClass(GenDCDataFormat.class);
      job.setOutputFormatClass(NullOutputFormat.class);
      job.setJarByClass(GenerateDistCacheData.class);
      try {
        FileInputFormat.addInputPath(job, new Path("ignored"));
      } catch (IOException e) {
        LOG.error("Error while adding input path ", e);
      }
      job.submit();
      return job;
    }
  });
  return job;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:GenerateDistCacheData.java

示例12: writeOutput

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
private void writeOutput(RecordWriter theRecordWriter,
    TaskAttemptContext context) throws IOException, InterruptedException {
  NullWritable nullWritable = NullWritable.get();

  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);
  } finally {
    theRecordWriter.close(null);
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestFileOutputCommitter.java

示例13: map

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
@Override
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
	String[] parsedTriple = rdfParser.parseTriple(value.toString());
	if (parsedTriple != null) {
		// Convert liters to Pig Types, if possible
		parsedTriple[2] = Util.toPigTypes(parsedTriple[2]);
		// Use Predicate for Vertical Partitioning
		multipleOutputs.write(NullWritable.get(), new Text(parsedTriple[0] + "\t" + parsedTriple[2]),
				Util.generateFileName(parsedTriple[1]));
		// Write all parsed triples also to "inputData" for queries where Predicate is not known
		multipleOutputs.write(NullWritable.get(), new Text(parsedTriple[0] + "\t" + parsedTriple[1] + "\t" + parsedTriple[2]),
				Util.generateFileName("inputData"));
		context.getCounter("RDF Dataset Properties", VALID_TRIPLES).increment(1);
	} else {
		if (value.getLength() == 0 || value.toString().startsWith("@")) {
			System.out.println("IGNORING: " + value);
			context.getCounter("RDF Dataset Properties", IGNORED_LINES).increment(1);
		} else {
			System.out.println("DISCARDED: " + value);
			context.getCounter("RDF Dataset Properties", INVALID_TRIPLES).increment(1);
		}
	}
}
 
開發者ID:aschaetzle,項目名稱:PigSPARQL,代碼行數:24,代碼來源:VPMapOnlyMapper.java

示例14: testEmptyJoin

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
public void testEmptyJoin() throws Exception {
  JobConf job = new JobConf();
  Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
  Path[] src = { new Path(base,"i0"), new Path("i1"), new Path("i2") };
  job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer",
      Fake_IF.class, src));
  job.setInputFormat(CompositeInputFormat.class);
  FileOutputFormat.setOutputPath(job, new Path(base, "out"));

  job.setMapperClass(IdentityMapper.class);
  job.setReducerClass(IdentityReducer.class);
  job.setOutputKeyClass(IncomparableKey.class);
  job.setOutputValueClass(NullWritable.class);

  JobClient.runJob(job);
  base.getFileSystem(job).delete(base, true);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestDatamerge.java

示例15: getRecordReader

import org.apache.hadoop.io.NullWritable; //導入依賴的package包/類
public RecordReader<NullWritable,NullWritable> getRecordReader(
    InputSplit ignored, JobConf conf, Reporter reporter) {
  return new RecordReader<NullWritable,NullWritable>() {
    private boolean done = false;
    public boolean next(NullWritable key, NullWritable value)
        throws IOException {
      if (done)
        return false;
      done = true;
      return true;
    }
    public NullWritable createKey() { return NullWritable.get(); }
    public NullWritable createValue() { return NullWritable.get(); }
    public long getPos() throws IOException { return 0L; }
    public void close() throws IOException { }
    public float getProgress() throws IOException { return 0.0f; }
  };
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:19,代碼來源:TestReduceFetchFromPartialMem.java


注:本文中的org.apache.hadoop.io.NullWritable類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。