本文整理汇总了Java中org.apache.hadoop.hbase.util.Writables.copyWritable方法的典型用法代码示例。如果您正苦于以下问题:Java Writables.copyWritable方法的具体用法?Java Writables.copyWritable怎么用?Java Writables.copyWritable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.Writables
的用法示例。
在下文中一共展示了Writables.copyWritable方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: test_TIMERANGE
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
Path dir =
util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir);
try {
// build a record writer using HFileOutputFormat
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = getTestTaskAttemptContext(job);
HFileOutputFormat hof = new HFileOutputFormat();
writer = hof.getRecordWriter(context);
// Pass two key values with explicit times stamps
final byte [] b = Bytes.toBytes("b");
// value 1 with timestamp 2000
KeyValue kv = new KeyValue(b, b, b, 2000, b);
KeyValue original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original,kv);
// value 2 with timestamp 1000
kv = new KeyValue(b, b, b, 1000, b);
original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// verify that the file has the proper FileInfo.
writer.close(context);
// the generated file lives 1 directory down from the attempt directory
// and is the only file, e.g.
// _attempt__0000_r_000000_0/b/1979617994050536795
FileSystem fs = FileSystem.get(conf);
Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
FileStatus[] sub1 = fs.listStatus(attemptDirectory);
FileStatus[] file = fs.listStatus(sub1[0].getPath());
// open as HFile Reader and pull out TIMERANGE FileInfo.
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
new CacheConfig(conf));
Map<byte[],byte[]> finfo = rd.loadFileInfo();
byte[] range = finfo.get("TIMERANGE".getBytes());
assertNotNull(range);
// unmarshall and check values.
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(range, timeRangeTracker);
LOG.info(timeRangeTracker.getMinimumTimestamp() +
"...." + timeRangeTracker.getMaximumTimestamp());
assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
rd.close();
} finally {
if (writer != null && context != null) writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}
示例2: test_TIMERANGE
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, Cell> writer = null;
TaskAttemptContext context = null;
Path dir =
util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir);
try {
// build a record writer using HFileOutputFormat2
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat2 hof = new HFileOutputFormat2();
writer = hof.getRecordWriter(context);
// Pass two key values with explicit times stamps
final byte [] b = Bytes.toBytes("b");
// value 1 with timestamp 2000
KeyValue kv = new KeyValue(b, b, b, 2000, b);
KeyValue original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original,kv);
// value 2 with timestamp 1000
kv = new KeyValue(b, b, b, 1000, b);
original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// verify that the file has the proper FileInfo.
writer.close(context);
// the generated file lives 1 directory down from the attempt directory
// and is the only file, e.g.
// _attempt__0000_r_000000_0/b/1979617994050536795
FileSystem fs = FileSystem.get(conf);
Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
FileStatus[] sub1 = fs.listStatus(attemptDirectory);
FileStatus[] file = fs.listStatus(sub1[0].getPath());
// open as HFile Reader and pull out TIMERANGE FileInfo.
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
new CacheConfig(conf), conf);
Map<byte[],byte[]> finfo = rd.loadFileInfo();
byte[] range = finfo.get("TIMERANGE".getBytes());
assertNotNull(range);
// unmarshall and check values.
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(range, timeRangeTracker);
LOG.info(timeRangeTracker.getMinimumTimestamp() +
"...." + timeRangeTracker.getMaximumTimestamp());
assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
rd.close();
} finally {
if (writer != null && context != null) writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}
示例3: test_TIMERANGE
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
Path dir =
util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir);
try {
// build a record writer using HFileOutputFormat
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat hof = new HFileOutputFormat();
writer = hof.getRecordWriter(context);
// Pass two key values with explicit times stamps
final byte [] b = Bytes.toBytes("b");
// value 1 with timestamp 2000
KeyValue kv = new KeyValue(b, b, b, 2000, b);
KeyValue original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original,kv);
// value 2 with timestamp 1000
kv = new KeyValue(b, b, b, 1000, b);
original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// verify that the file has the proper FileInfo.
writer.close(context);
// the generated file lives 1 directory down from the attempt directory
// and is the only file, e.g.
// _attempt__0000_r_000000_0/b/1979617994050536795
FileSystem fs = FileSystem.get(conf);
Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
FileStatus[] sub1 = fs.listStatus(attemptDirectory);
FileStatus[] file = fs.listStatus(sub1[0].getPath());
// open as HFile Reader and pull out TIMERANGE FileInfo.
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
new CacheConfig(conf), conf);
Map<byte[],byte[]> finfo = rd.loadFileInfo();
byte[] range = finfo.get("TIMERANGE".getBytes());
assertNotNull(range);
// unmarshall and check values.
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(range, timeRangeTracker);
LOG.info(timeRangeTracker.getMinimumTimestamp() +
"...." + timeRangeTracker.getMaximumTimestamp());
assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
rd.close();
} finally {
if (writer != null && context != null) writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}
示例4: printMeta
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
private void printMeta(HFile.Reader reader, Map<byte[], byte[]> fileInfo)
throws IOException {
System.out.println("Block index size as per heapsize: "
+ reader.indexSize());
System.out.println(asSeparateLines(reader.toString()));
System.out.println("Trailer:\n "
+ asSeparateLines(reader.getTrailer().toString()));
System.out.println("Fileinfo:");
for (Map.Entry<byte[], byte[]> e : fileInfo.entrySet()) {
System.out.print(FOUR_SPACES + Bytes.toString(e.getKey()) + " = ");
if (Bytes.compareTo(e.getKey(), Bytes.toBytes("MAX_SEQ_ID_KEY")) == 0) {
long seqid = Bytes.toLong(e.getValue());
System.out.println(seqid);
} else if (Bytes.compareTo(e.getKey(), Bytes.toBytes("TIMERANGE")) == 0) {
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(e.getValue(), timeRangeTracker);
System.out.println(timeRangeTracker.getMinimumTimestamp() + "...."
+ timeRangeTracker.getMaximumTimestamp());
} else if (Bytes.compareTo(e.getKey(), FileInfo.AVG_KEY_LEN) == 0
|| Bytes.compareTo(e.getKey(), FileInfo.AVG_VALUE_LEN) == 0) {
System.out.println(Bytes.toInt(e.getValue()));
} else {
System.out.println(Bytes.toStringBinary(e.getValue()));
}
}
System.out.println("Mid-key: " + Bytes.toStringBinary(reader.midkey()));
// Printing bloom information
DataInput bloomMeta = reader.getBloomFilterMetadata();
BloomFilter bloomFilter = null;
if (bloomMeta != null)
bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader);
System.out.println("Bloom filter:");
if (bloomFilter != null) {
System.out.println(FOUR_SPACES + bloomFilter.toString().replaceAll(
ByteBloomFilter.STATS_RECORD_SEP, "\n" + FOUR_SPACES));
} else {
System.out.println(FOUR_SPACES + "Not present");
}
}
示例5: test_TIMERANGE
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
Path dir =
util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir);
try {
// build a record writer using HFileOutputFormat
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat hof = new HFileOutputFormat();
writer = hof.getRecordWriter(context);
// Pass two key values with explicit times stamps
final byte [] b = Bytes.toBytes("b");
// value 1 with timestamp 2000
KeyValue kv = new KeyValue(b, b, b, 2000, b);
KeyValue original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original,kv);
// value 2 with timestamp 1000
kv = new KeyValue(b, b, b, 1000, b);
original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// verify that the file has the proper FileInfo.
writer.close(context);
// the generated file lives 1 directory down from the attempt directory
// and is the only file, e.g.
// _attempt__0000_r_000000_0/b/1979617994050536795
FileSystem fs = FileSystem.get(conf);
Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
FileStatus[] sub1 = fs.listStatus(attemptDirectory);
FileStatus[] file = fs.listStatus(sub1[0].getPath());
// open as HFile Reader and pull out TIMERANGE FileInfo.
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
new CacheConfig(conf));
Map<byte[],byte[]> finfo = rd.loadFileInfo();
byte[] range = finfo.get("TIMERANGE".getBytes());
assertNotNull(range);
// unmarshall and check values.
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(range, timeRangeTracker);
LOG.info(timeRangeTracker.getMinimumTimestamp() +
"...." + timeRangeTracker.getMaximumTimestamp());
assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
rd.close();
} finally {
if (writer != null && context != null) writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}