当前位置: 首页>>代码示例>>Java>>正文


Java ByteWritable类代码示例

本文整理汇总了Java中org.apache.hadoop.io.ByteWritable的典型用法代码示例。如果您正苦于以下问题:Java ByteWritable类的具体用法?Java ByteWritable怎么用?Java ByteWritable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ByteWritable类属于org.apache.hadoop.io包,在下文中一共展示了ByteWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: init

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
@Override
public void init() throws IOException {
  registerKey(NullWritable.class.getName(), NullWritableSerializer.class);
  registerKey(Text.class.getName(), TextSerializer.class);
  registerKey(LongWritable.class.getName(), LongWritableSerializer.class);
  registerKey(IntWritable.class.getName(), IntWritableSerializer.class);
  registerKey(Writable.class.getName(), DefaultSerializer.class);
  registerKey(BytesWritable.class.getName(), BytesWritableSerializer.class);
  registerKey(BooleanWritable.class.getName(), BoolWritableSerializer.class);
  registerKey(ByteWritable.class.getName(), ByteWritableSerializer.class);
  registerKey(FloatWritable.class.getName(), FloatWritableSerializer.class);
  registerKey(DoubleWritable.class.getName(), DoubleWritableSerializer.class);
  registerKey(VIntWritable.class.getName(), VIntWritableSerializer.class);
  registerKey(VLongWritable.class.getName(), VLongWritableSerializer.class);

  LOG.info("Hadoop platform inited");
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:18,代码来源:HadoopPlatform.java

示例2: reduce

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
@Override
public void reduce(ByteWritable key, Iterator<Text> values,
    OutputCollector<Text, ByteWritable> output, Reporter reporter)
    throws IOException {
  while (values.hasNext()) {
    Text document = values.next();
    writers.delete(document.toString());
    totalDeleted++;
    reporter.incrCounter("CleaningJobStatus", "Deleted documents", 1);
    // if (numDeletes >= NUM_MAX_DELETE_REQUEST) {
    // LOG.info("CleaningJob: deleting " + numDeletes
    // + " documents");
    // // TODO updateRequest.process(solr);
    // // TODO updateRequest = new UpdateRequest();
    // writers.delete(key.toString());
    // totalDeleted += numDeletes;
    // numDeletes = 0;
    // }
  }
}
 
开发者ID:jorcox,项目名称:GeoCrawler,代码行数:21,代码来源:CleaningJob.java

示例3: testWriteByte

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
@Test
public void testWriteByte() throws Exception {
    if (!canTest()) {
        return;
    }
    byte aByte = 8;
    template.sendBody("direct:write_byte", aByte);

    Configuration conf = new Configuration();
    Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-byte");
    SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
    Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
    Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
    reader.next(key, value);
    byte rByte = ((ByteWritable) value).get();
    assertEquals(rByte, aByte);

    IOHelper.close(reader);
}
 
开发者ID:HydAu,项目名称:Camel,代码行数:20,代码来源:HdfsProducerTest.java

示例4: testWriteByte

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
@Test
public void testWriteByte() throws Exception {
    if (!canTest()) {
        return;
    }
    byte aByte = 8;
    template.sendBody("direct:write_byte", aByte);

    Configuration conf = new Configuration();
    Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-byte");
    FileSystem fs1 = FileSystem.get(file1.toUri(), conf);
    SequenceFile.Reader reader = new SequenceFile.Reader(fs1, file1, conf);
    Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
    Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
    reader.next(key, value);
    byte rByte = ((ByteWritable) value).get();
    assertEquals(rByte, aByte);

    IOHelper.close(reader);
}
 
开发者ID:HydAu,项目名称:Camel,代码行数:21,代码来源:HdfsProducerTest.java

示例5: reduce

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public void reduce(ByteWritable key, Iterator<OffsetCount> values,
		OutputCollector<ByteWritable, OffsetCount> out, Reporter report)
		throws IOException 
{	
	//need to sort the values by filename and fileoffset
	while(values.hasNext())
		list.add(new OffsetCount(values.next()));
	Collections.sort(list);
	
	long lineOffset=0;
	for(OffsetCount oc: list)
	{
		long count=oc.count;
		oc.count=lineOffset;
		out.collect(key, oc);
		lineOffset+=count;
	}
	report.incrCounter(CSVReblockMR.NUM_ROWS_IN_MATRIX, key.toString(), lineOffset);
	list.clear();
}
 
开发者ID:apache,项目名称:systemml,代码行数:23,代码来源:CSVAssignRowIDReducer.java

示例6: delete

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
public void delete(String crawldb, String solrUrl, boolean noCommit) throws IOException {
  SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
  long start = System.currentTimeMillis();
  LOG.info("SolrClean: starting at " + sdf.format(start));

  JobConf job = new NutchJob(getConf());

  FileInputFormat.addInputPath(job, new Path(crawldb, CrawlDb.CURRENT_NAME));
  job.setBoolean("noCommit", noCommit);
  job.set(SolrConstants.SERVER_URL, solrUrl);
  job.setInputFormat(SequenceFileInputFormat.class);
  job.setOutputFormat(NullOutputFormat.class);
  job.setMapOutputKeyClass(ByteWritable.class);
  job.setMapOutputValueClass(Text.class);
  job.setMapperClass(DBFilter.class);
  job.setReducerClass(SolrDeleter.class);

  JobClient.runJob(job);

  long end = System.currentTimeMillis();
  LOG.info("SolrClean: finished at " + sdf.format(end) + ", elapsed: " + TimingUtil.elapsedTime(start, end));
}
 
开发者ID:yahoo,项目名称:anthelion,代码行数:23,代码来源:SolrClean.java

示例7: getWritableInstance

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
/**
 * Returns the corresponding Writable object for this column type.
 */
public Writable getWritableInstance(com.cloudera.recordservice.core.Schema.Type type) {
  switch (type) {
    case BOOLEAN: return new BooleanWritable();
    case TINYINT: return new ByteWritable();
    case SMALLINT: return new ShortWritable();
    case INT: return new IntWritable();
    case BIGINT: return new LongWritable();
    case FLOAT: return new FloatWritable();
    case DOUBLE: return new DoubleWritable();
    case VARCHAR:
    case CHAR:
    case STRING: return new Text();
    case TIMESTAMP_NANOS: return new TimestampNanosWritable();
    case DECIMAL: return new DecimalWritable();
    default: throw new UnsupportedOperationException(
        "Unexpected type: " + toString());
  }
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:22,代码来源:RecordServiceRecord.java

示例8: createWritable

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
private static final WritableComparable<?> createWritable(DataType type)
{
    switch (type)
    {
    case BOOLEAN:
        return new BooleanWritable();
    case BYTE:
        return new ByteWritable();
    case INT:
        return new IntWritable();
    case LONG:
        return new LongWritable();
    case FLOAT:
        return new FloatWritable();
    case DOUBLE:
        return new DoubleWritable();
    case STRING:
        return new Text();
    default:
        return null;
    }
}
 
开发者ID:linkedin,项目名称:Cubert,代码行数:23,代码来源:CompactWritablesDeserializer.java

示例9: reduce

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
protected void reduce(ByteWritable key, Iterable<RowNumberWritable> values, Context context) throws IOException, InterruptedException {
    Iterator<RowNumberWritable> itr = values.iterator();
    if (!itr.hasNext()) {
        return;
    }

    long offset = 0;
    RowNumberWritable value = itr.next();
    while (itr.hasNext() && value.getCount() > 0) {
        offset += value.getCount();
        value = itr.next();
    }
    outputKey.set(Long.toString(offset++));
    context.write(outputKey, value.getValue());

    while(itr.hasNext()) {
        value = itr.next();
        outputKey.set(Long.toString(offset++));
        context.write(outputKey, value.getValue());
    }
}
 
开发者ID:magsol,项目名称:Hadoop-Affinity,代码行数:22,代码来源:RowNumberJob.java

示例10: translateWritableToPigDataType

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
protected Object translateWritableToPigDataType(Writable w, byte dataType) {
  switch(dataType) {
    case DataType.CHARARRAY: return ((Text) w).toString();
    case DataType.BYTEARRAY:
          BytesWritable bw = (BytesWritable) w;
          // Make a copy
          return new DataByteArray(bw.getBytes(), 0, bw.getLength());
    case DataType.BOOLEAN: return ((BooleanWritable) w).get();
    case DataType.INTEGER: return ((IntWritable) w).get();
    case DataType.LONG: return ((LongWritable) w).get();
    case DataType.FLOAT: return ((FloatWritable) w).get();
    case DataType.DOUBLE: return ((DoubleWritable) w).get();
    case DataType.BYTE: return ((ByteWritable) w).get();
    case DataType.DATETIME: return ((DateTimeWritable) w).get();
  }

  return null;
}
 
开发者ID:sigmoidanalytics,项目名称:spork,代码行数:19,代码来源:SequenceFileLoader.java

示例11: convert

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private<T> T convert(Record flinkType, int pos, Class<T> hadoopType) {
	if(hadoopType == LongWritable.class ) {
		return (T) new LongWritable((flinkType.getField(pos, LongValue.class)).getValue());
	}
	if(hadoopType == org.apache.hadoop.io.Text.class) {
		return (T) new Text((flinkType.getField(pos, StringValue.class)).getValue());
	}
	if(hadoopType == org.apache.hadoop.io.IntWritable.class) {
		return (T) new IntWritable((flinkType.getField(pos, IntValue.class)).getValue());
	}
	if(hadoopType == org.apache.hadoop.io.FloatWritable.class) {
		return (T) new FloatWritable((flinkType.getField(pos, FloatValue.class)).getValue());
	}
	if(hadoopType == org.apache.hadoop.io.DoubleWritable.class) {
		return (T) new DoubleWritable((flinkType.getField(pos, DoubleValue.class)).getValue());
	}
	if(hadoopType == org.apache.hadoop.io.BooleanWritable.class) {
		return (T) new BooleanWritable((flinkType.getField(pos, BooleanValue.class)).getValue());
	}
	if(hadoopType == org.apache.hadoop.io.ByteWritable.class) {
		return (T) new ByteWritable((flinkType.getField(pos, ByteValue.class)).getValue());
	}

	throw new RuntimeException("Unable to convert Flink type ("+flinkType.getClass().getCanonicalName()+") to Hadoop.");
}
 
开发者ID:citlab,项目名称:vs.msc.ws14,代码行数:27,代码来源:DefaultFlinkTypeConverter.java

示例12: write

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
public void write(Writable w) throws IOException {
  if (w instanceof TypedBytesWritable) {
    writeTypedBytes((TypedBytesWritable) w);
  } else if (w instanceof BytesWritable) {
    writeBytes((BytesWritable) w);
  } else if (w instanceof ByteWritable) {
    writeByte((ByteWritable) w);
  } else if (w instanceof BooleanWritable) {
    writeBoolean((BooleanWritable) w);
  } else if (w instanceof IntWritable) {
    writeInt((IntWritable) w);
  } else if (w instanceof VIntWritable) {
    writeVInt((VIntWritable) w);
  } else if (w instanceof LongWritable) {
    writeLong((LongWritable) w);
  } else if (w instanceof VLongWritable) {
    writeVLong((VLongWritable) w);
  } else if (w instanceof FloatWritable) {
    writeFloat((FloatWritable) w);
  } else if (w instanceof DoubleWritable) {
    writeDouble((DoubleWritable) w);
  } else if (w instanceof Text) {
    writeText((Text) w);
  } else if (w instanceof ArrayWritable) {
    writeArray((ArrayWritable) w);
  } else if (w instanceof MapWritable) {
    writeMap((MapWritable) w);
  } else if (w instanceof SortedMapWritable) {
    writeSortedMap((SortedMapWritable) w);
  } else if (w instanceof Record) {
    writeRecord((Record) w);
  } else {
    writeWritable(w); // last resort
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TypedBytesWritableOutput.java

示例13: readType

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
public Class<? extends Writable> readType() throws IOException {
  Type type = in.readType();
  if (type == null) {
    return null;
  }
  switch (type) {
  case BYTES:
    return BytesWritable.class;
  case BYTE:
    return ByteWritable.class;
  case BOOL:
    return BooleanWritable.class;
  case INT:
    return VIntWritable.class;
  case LONG:
    return VLongWritable.class;
  case FLOAT:
    return FloatWritable.class;
  case DOUBLE:
    return DoubleWritable.class;
  case STRING:
    return Text.class;
  case VECTOR:
    return ArrayWritable.class;
  case MAP:
    return MapWritable.class;
  case WRITABLE:
    return Writable.class;
  default:
    throw new RuntimeException("unknown type");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TypedBytesWritableInput.java

示例14: updateObject

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
public static void updateObject(Writable obj, byte[] seed) {
  if (obj instanceof IntWritable) {
    ((IntWritable)obj).set(Ints.fromByteArray(seed));
  } else if (obj instanceof FloatWritable) {
    ((FloatWritable)obj).set(r.nextFloat());
  } else if (obj instanceof DoubleWritable) {
    ((DoubleWritable)obj).set(r.nextDouble());
  } else if (obj instanceof LongWritable) {
    ((LongWritable)obj).set(Longs.fromByteArray(seed));
  } else if (obj instanceof VIntWritable) {
    ((VIntWritable)obj).set(Ints.fromByteArray(seed));
  } else if (obj instanceof VLongWritable) {
    ((VLongWritable)obj).set(Longs.fromByteArray(seed));
  } else if (obj instanceof BooleanWritable) {
    ((BooleanWritable)obj).set(seed[0] % 2 == 1 ? true : false);
  } else if (obj instanceof Text) {
    ((Text)obj).set(BytesUtil.toStringBinary(seed));
  } else if (obj instanceof ByteWritable) {
    ((ByteWritable)obj).set(seed.length > 0 ? seed[0] : 0);
  } else if (obj instanceof BytesWritable) {
    ((BytesWritable)obj).set(seed, 0, seed.length);
  } else if (obj instanceof UTF8) {
    ((UTF8)obj).set(BytesUtil.toStringBinary(seed));
  } else if (obj instanceof MockValueClass) {
    ((MockValueClass)obj).set(seed);
  } else {
    throw new IllegalArgumentException("unknown writable: " +
                                       obj.getClass().getName());
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:31,代码来源:BytesFactory.java

示例15: toBytes

import org.apache.hadoop.io.ByteWritable; //导入依赖的package包/类
public static <VTYPE> byte[] toBytes(VTYPE obj) {
  final String className = obj.getClass().getName();
  if (className.equals(IntWritable.class.getName())) {
    return Ints.toByteArray(((IntWritable) obj).get());
  } else if (className.equals(FloatWritable.class.getName())) {
    return BytesUtil.toBytes(((FloatWritable) obj).get());
  } else if (className.equals(DoubleWritable.class.getName())) {
    return BytesUtil.toBytes(((DoubleWritable) obj).get());
  } else if (className.equals(LongWritable.class.getName())) {
    return Longs.toByteArray(((LongWritable) obj).get());
  } else if (className.equals(VIntWritable.class.getName())) {
    return Ints.toByteArray(((VIntWritable) obj).get());
  } else if (className.equals(VLongWritable.class.getName())) {
    return Longs.toByteArray(((VLongWritable) obj).get());
  } else if (className.equals(BooleanWritable.class.getName())) {
    return BytesUtil.toBytes(((BooleanWritable) obj).get());
  } else if (className.equals(Text.class.getName())) {
    return ((Text)obj).copyBytes();
  } else if (className.equals(ByteWritable.class.getName())) {
    return Ints.toByteArray((int) ((ByteWritable) obj).get());
  } else if (className.equals(BytesWritable.class.getName())) {
    // TODO: copyBytes instead?
    return ((BytesWritable) obj).getBytes();
  } else {
    return new byte[0];
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:28,代码来源:BytesFactory.java


注:本文中的org.apache.hadoop.io.ByteWritable类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。