本文整理汇总了Java中org.apache.hadoop.io.BytesWritable类的典型用法代码示例。如果您正苦于以下问题:Java BytesWritable类的具体用法?Java BytesWritable怎么用?Java BytesWritable使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
BytesWritable类属于org.apache.hadoop.io包,在下文中一共展示了BytesWritable类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doVerify
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
protected void doVerify(Configuration conf, HTableDescriptor htd) throws Exception {
Path outputDir = getTestDir(TEST_NAME, "verify-output");
LOG.info("Verify output dir: " + outputDir);
Job job = Job.getInstance(conf);
job.setJarByClass(this.getClass());
job.setJobName(TEST_NAME + " Verification for " + htd.getTableName());
setJobScannerConf(job);
Scan scan = new Scan();
TableMapReduceUtil.initTableMapperJob(
htd.getTableName().getNameAsString(), scan, VerifyMapper.class,
BytesWritable.class, BytesWritable.class, job);
TableMapReduceUtil.addDependencyJars(job.getConfiguration(), AbstractHBaseTool.class);
int scannerCaching = conf.getInt("verify.scannercaching", SCANNER_CACHING);
TableMapReduceUtil.setScannerCaching(job, scannerCaching);
job.setReducerClass(VerifyReducer.class);
job.setNumReduceTasks(conf.getInt(NUM_REDUCE_TASKS_KEY, NUM_REDUCE_TASKS_DEFAULT));
FileOutputFormat.setOutputPath(job, outputDir);
assertTrue(job.waitForCompletion(true));
long numOutputRecords = job.getCounters().findCounter(Counters.ROWS_WRITTEN).getValue();
assertEquals(0, numOutputRecords);
}
示例2: readObject
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
private void readObject(Writable obj) throws IOException {
int numBytes = WritableUtils.readVInt(inStream);
byte[] buffer;
// For BytesWritable and Text, use the specified length to set the length
// this causes the "obvious" translations to work. So that if you emit
// a string "abc" from C++, it shows up as "abc".
if (obj instanceof BytesWritable) {
buffer = new byte[numBytes];
inStream.readFully(buffer);
((BytesWritable) obj).set(buffer, 0, numBytes);
} else if (obj instanceof Text) {
buffer = new byte[numBytes];
inStream.readFully(buffer);
((Text) obj).set(buffer);
} else {
obj.readFields(inStream);
}
}
示例3: getValue
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
/**
* Copy the value into BytesWritable. The input BytesWritable will be
* automatically resized to the actual value size. The implementation
* directly uses the buffer inside BytesWritable for storing the value.
* The call does not require the value length to be known.
*
* @param value
* @throws IOException
*/
public long getValue(BytesWritable value) throws IOException {
DataInputStream dis = getValueStream();
int size = 0;
try {
int remain;
while ((remain = valueBufferInputStream.getRemain()) > 0) {
value.setSize(size + remain);
dis.readFully(value.getBytes(), size, remain);
size += remain;
}
return value.getLength();
} finally {
dis.close();
}
}
示例4: SeqFileAppendable
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
public SeqFileAppendable(FileSystem fs, Path path, int osBufferSize,
String compress, int minBlkSize) throws IOException {
Configuration conf = new Configuration();
CompressionCodec codec = null;
if ("lzo".equals(compress)) {
codec = Compression.Algorithm.LZO.getCodec();
}
else if ("gz".equals(compress)) {
codec = Compression.Algorithm.GZ.getCodec();
}
else if (!"none".equals(compress))
throw new IOException("Codec not supported.");
this.fsdos = fs.create(path, true, osBufferSize);
if (!"none".equals(compress)) {
writer =
SequenceFile.createWriter(conf, fsdos, BytesWritable.class,
BytesWritable.class, SequenceFile.CompressionType.BLOCK, codec);
}
else {
writer =
SequenceFile.createWriter(conf, fsdos, BytesWritable.class,
BytesWritable.class, SequenceFile.CompressionType.NONE, null);
}
}
示例5: testNestedIterable
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
public void testNestedIterable() throws Exception {
Random r = new Random();
Writable[] writs = {
new BooleanWritable(r.nextBoolean()),
new FloatWritable(r.nextFloat()),
new FloatWritable(r.nextFloat()),
new IntWritable(r.nextInt()),
new LongWritable(r.nextLong()),
new BytesWritable("dingo".getBytes()),
new LongWritable(r.nextLong()),
new IntWritable(r.nextInt()),
new BytesWritable("yak".getBytes()),
new IntWritable(r.nextInt())
};
TupleWritable sTuple = makeTuple(writs);
assertTrue("Bad count", writs.length == verifIter(writs, sTuple, 0));
}
示例6: evaluate
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
if (digest == null) {
return null;
}
digest.reset();
if (isStr) {
Text n = getTextValue(arguments, 0, converters);
if (n == null) {
return null;
}
digest.update(n.getBytes(), 0, n.getLength());
} else {
BytesWritable bWr = getBinaryValue(arguments, 0, converters);
if (bWr == null) {
return null;
}
digest.update(bWr.getBytes(), 0, bWr.getLength());
}
byte[] resBin = digest.digest();
String resStr = Hex.encodeHexString(resBin);
output.set(resStr);
return output;
}
示例7: next
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
/**
* Read raw bytes from a SequenceFile.
*/
public synchronized boolean next(BytesWritable key, BytesWritable val)
throws IOException {
if (done) return false;
long pos = in.getPosition();
boolean eof = -1 == in.nextRawKey(buffer);
if (!eof) {
key.set(buffer.getData(), 0, buffer.getLength());
buffer.reset();
in.nextRawValue(vbytes);
vbytes.writeUncompressedBytes(buffer);
val.set(buffer.getData(), 0, buffer.getLength());
buffer.reset();
}
return !(done = (eof || (pos >= end && in.syncSeen())));
}
示例8: resolve
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
/**
* Resolves a given identifier. This method has to be called before calling
* any of the getters.
*/
public void resolve(String identifier) {
if (identifier.equalsIgnoreCase(RAW_BYTES_ID)) {
setInputWriterClass(RawBytesInputWriter.class);
setOutputReaderClass(RawBytesOutputReader.class);
setOutputKeyClass(BytesWritable.class);
setOutputValueClass(BytesWritable.class);
} else if (identifier.equalsIgnoreCase(TYPED_BYTES_ID)) {
setInputWriterClass(TypedBytesInputWriter.class);
setOutputReaderClass(TypedBytesOutputReader.class);
setOutputKeyClass(TypedBytesWritable.class);
setOutputValueClass(TypedBytesWritable.class);
} else if (identifier.equalsIgnoreCase(KEY_ONLY_TEXT_ID)) {
setInputWriterClass(KeyOnlyTextInputWriter.class);
setOutputReaderClass(KeyOnlyTextOutputReader.class);
setOutputKeyClass(Text.class);
setOutputValueClass(NullWritable.class);
} else { // assume TEXT_ID
setInputWriterClass(TextInputWriter.class);
setOutputReaderClass(TextOutputReader.class);
setOutputKeyClass(Text.class);
setOutputValueClass(Text.class);
}
}
示例9: run
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
@Override
public void run() {
for (int i = 0; i < count; i++) {
try {
int byteSize = RANDOM.nextInt(BYTE_COUNT);
byte[] bytes = new byte[byteSize];
System.arraycopy(BYTES, 0, bytes, 0, byteSize);
Writable param = new BytesWritable(bytes);
client.call(param, address);
Thread.sleep(RANDOM.nextInt(20));
} catch (Exception e) {
LOG.fatal("Caught Exception", e);
failed = true;
}
}
}
示例10: readFields
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
public void readFields(DataInput in) throws IOException {
// After the RecordStartMark, we expect to get a SEGMENT_HEADER_ID (-1).
long segmentId = WritableUtils.readVLong(in);
if (SEGMENT_HEADER_ID != segmentId) {
throw new IOException("Expected segment header id " + SEGMENT_HEADER_ID
+ "; got " + segmentId);
}
// Get the length of the rest of the segment, in bytes.
long length = WritableUtils.readVLong(in);
// Now read the actual main byte array.
if (length > Integer.MAX_VALUE) {
throw new IOException("Unexpected oversize data array length: "
+ length);
} else if (length < 0) {
throw new IOException("Unexpected undersize data array length: "
+ length);
}
byte [] segmentData = new byte[(int) length];
in.readFully(segmentData);
recordLenBytes = new BytesWritable(segmentData);
reset(); // Reset the iterator allowing the user to yield offset/lengths.
}
示例11: accept
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
/** Filtering method
* If MD5(key) % frequency==0, return true; otherwise return false
* @see Filter#accept(Object)
*/
public boolean accept(Object key) {
try {
long hashcode;
if (key instanceof Text) {
hashcode = MD5Hashcode((Text)key);
} else if (key instanceof BytesWritable) {
hashcode = MD5Hashcode((BytesWritable)key);
} else {
ByteBuffer bb;
bb = Text.encode(key.toString());
hashcode = MD5Hashcode(bb.array(), 0, bb.limit());
}
if (hashcode / frequency * frequency == hashcode)
return true;
} catch(Exception e) {
LOG.warn(e);
throw new RuntimeException(e);
}
return false;
}
示例12: reduce
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
/**
* {@inheritDoc}
*/
protected void reduce(final Text key, final Iterable<BytesWritable> values, final Context context) throws IOException, InterruptedException {
final Configuration configuration = context.getConfiguration();
final String sourcePath = configuration.get("compactionSourcePath");
final String targetPath = configuration.get("compactionTargetPath");
// Reducer stores data at the target directory retaining the directory structure of files
String filePath = key.toString().replace(sourcePath, targetPath);
if (key.toString().endsWith("/")) {
filePath = filePath.concat("file");
}
log.info("Compaction output path {}", filePath);
final URI uri = URI.create(filePath);
final MultipleOutputs multipleOutputs = new MultipleOutputs<NullWritable, BytesWritable>(context);
try {
for (final BytesWritable text : values) {
multipleOutputs.write(NullWritable.get(), text, uri.toString());
}
} finally {
multipleOutputs.close();
}
}
示例13: readObject
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
protected void readObject(Writable obj, DataInputStream inStream) throws IOException {
int numBytes = WritableUtils.readVInt(inStream);
byte[] buffer;
// For BytesWritable and Text, use the specified length to set the length
// this causes the "obvious" translations to work. So that if you emit
// a string "abc" from C++, it shows up as "abc".
if (obj instanceof BytesWritable) {
buffer = new byte[numBytes];
inStream.readFully(buffer);
((BytesWritable) obj).set(buffer, 0, numBytes);
} else if (obj instanceof Text) {
buffer = new byte[numBytes];
inStream.readFully(buffer);
((Text) obj).set(buffer);
} else {
obj.readFields(inStream);
}
}
示例14: configureWaitingJobConf
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
/**
* Configure a waiting job
*/
static void configureWaitingJobConf(JobConf jobConf, Path inDir,
Path outputPath, int numMaps, int numRed,
String jobName, String mapSignalFilename,
String redSignalFilename)
throws IOException {
jobConf.setJobName(jobName);
jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class);
jobConf.setOutputFormat(SequenceFileOutputFormat.class);
FileInputFormat.setInputPaths(jobConf, inDir);
FileOutputFormat.setOutputPath(jobConf, outputPath);
jobConf.setMapperClass(UtilsForTests.HalfWaitingMapper.class);
jobConf.setReducerClass(IdentityReducer.class);
jobConf.setOutputKeyClass(BytesWritable.class);
jobConf.setOutputValueClass(BytesWritable.class);
jobConf.setInputFormat(RandomInputFormat.class);
jobConf.setNumMapTasks(numMaps);
jobConf.setNumReduceTasks(numRed);
jobConf.setJar("build/test/mapred/testjar/testjob.jar");
jobConf.set(getTaskSignalParameter(true), mapSignalFilename);
jobConf.set(getTaskSignalParameter(false), redSignalFilename);
}
示例15: createFiles
import org.apache.hadoop.io.BytesWritable; //导入依赖的package包/类
private static void createFiles(int length, int numFiles, Random random,
Job job) throws IOException {
Range[] ranges = createRanges(length, numFiles, random);
for (int i = 0; i < numFiles; i++) {
Path file = new Path(workDir, "test_" + i + ".seq");
// create a file with length entries
@SuppressWarnings("deprecation")
SequenceFile.Writer writer =
SequenceFile.createWriter(localFs, job.getConfiguration(), file,
IntWritable.class, BytesWritable.class);
Range range = ranges[i];
try {
for (int j = range.start; j < range.end; j++) {
IntWritable key = new IntWritable(j);
byte[] data = new byte[random.nextInt(10)];
random.nextBytes(data);
BytesWritable value = new BytesWritable(data);
writer.append(key, value);
}
} finally {
writer.close();
}
}
}