本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFile.getWriterFactoryNoCache方法的典型用法代码示例。如果您正苦于以下问题:Java HFile.getWriterFactoryNoCache方法的具体用法?Java HFile.getWriterFactoryNoCache怎么用?Java HFile.getWriterFactoryNoCache使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.HFile
的用法示例。
在下文中一共展示了HFile.getWriterFactoryNoCache方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createHFileForFamilies
import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private String createHFileForFamilies(byte[] family) throws IOException {
HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf);
// TODO We need a way to do this without creating files
File hFileLocation = testFolder.newFile();
FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation));
try {
hFileFactory.withOutputStream(out);
hFileFactory.withFileContext(new HFileContext());
HFile.Writer writer = hFileFactory.create();
try {
writer.append(new KeyValue(CellUtil.createCell(randomBytes,
family,
randomBytes,
0l,
KeyValue.Type.Put.getCode(),
randomBytes)));
} finally {
writer.close();
}
} finally {
out.close();
}
return hFileLocation.getAbsoluteFile().getAbsolutePath();
}
示例2: createHFileForFamilies
import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private String createHFileForFamilies(Path testPath, byte[] family,
byte[] valueBytes) throws IOException {
HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
// TODO We need a way to do this without creating files
Path testFile = new Path(testPath, UUID.randomUUID().toString());
FSDataOutputStream out = TEST_UTIL.getTestFileSystem().create(testFile);
try {
hFileFactory.withOutputStream(out);
hFileFactory.withFileContext(new HFileContext());
HFile.Writer writer = hFileFactory.create();
try {
writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0l,
KeyValue.Type.Put.getCode(), valueBytes)));
} finally {
writer.close();
}
} finally {
out.close();
}
return testFile.toString();
}
示例3: writeToHFile
import org.apache.hadoop.hbase.io.hfile.HFile; //导入方法依赖的package包/类
private Path writeToHFile(long l, String hFilePath, String pathStr, boolean nativeHFile)
throws IOException {
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
final Path hfilePath = new Path(hFilePath);
fs.mkdirs(hfilePath);
Path path = new Path(pathStr);
HFile.WriterFactory wf = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration());
Assert.assertNotNull(wf);
HFileContext context = new HFileContext();
HFile.Writer writer = wf.withPath(fs, path).withFileContext(context).create();
KeyValue kv = new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l,
Bytes.toBytes("version2"));
// Set cell seq id to test bulk load native hfiles.
if (nativeHFile) {
// Set a big seq id. Scan should not look at this seq id in a bulk loaded file.
// Scan should only look at the seq id appended at the bulk load time, and not skip
// this kv.
kv.setSequenceId(9999999);
}
writer.append(kv);
if (nativeHFile) {
// Set a big MAX_SEQ_ID_KEY. Scan should not look at this seq id in a bulk loaded file.
// Scan should only look at the seq id appended at the bulk load time, and not skip its
// kv.
writer.appendFileInfo(StoreFile.MAX_SEQ_ID_KEY, Bytes.toBytes(new Long(9999999)));
}
else {
writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis()));
}
writer.close();
return hfilePath;
}