本文整理汇总了Java中org.apache.hadoop.hbase.util.EnvironmentEdgeManager.getDelegate方法的典型用法代码示例。如果您正苦于以下问题:Java EnvironmentEdgeManager.getDelegate方法的具体用法?Java EnvironmentEdgeManager.getDelegate怎么用?Java EnvironmentEdgeManager.getDelegate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.EnvironmentEdgeManager
的用法示例。
在下文中一共展示了EnvironmentEdgeManager.getDelegate方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: prepareData
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
private Store prepareData() throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
FIFOCompactionPolicy.class.getName());
desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
DisabledRegionSplitPolicy.class.getName());
HColumnDescriptor colDesc = new HColumnDescriptor(family);
colDesc.setTimeToLive(1); // 1 sec
desc.addFamily(colDesc);
admin.createTable(desc);
Table table = TEST_UTIL.getConnection().getTable(tableName);
Random rand = new Random();
TimeOffsetEnvironmentEdge edge =
(TimeOffsetEnvironmentEdge) EnvironmentEdgeManager.getDelegate();
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
byte[] value = new byte[128 * 1024];
rand.nextBytes(value);
table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
}
admin.flush(tableName);
edge.increment(1001);
}
return getStoreWithName(tableName);
}
示例2: testHFileCleaning
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Test(timeout = 60 *1000)
public void testHFileCleaning() throws Exception {
final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
String prefix = "someHFileThatWouldBeAUUID";
Configuration conf = UTIL.getConfiguration();
// set TTL
long ttl = 2000;
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
"org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
Server server = new DummyServer();
Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
FileSystem fs = FileSystem.get(conf);
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
// Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
final long createTime = System.currentTimeMillis();
fs.delete(archivedHfileDir, true);
fs.mkdirs(archivedHfileDir);
// Case 1: 1 invalid file, which should be deleted directly
fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
// Case 2: 1 "recent" file, not even deletable for the first log cleaner
// (TimeToLiveLogCleaner), so we are not going down the chain
LOG.debug("Now is: " + createTime);
for (int i = 1; i < 32; i++) {
// Case 3: old files which would be deletable for the first log cleaner
// (TimeToLiveHFileCleaner),
Path fileName = new Path(archivedHfileDir, (prefix + "." + (createTime + i)));
fs.createNewFile(fileName);
// set the creation time past ttl to ensure that it gets removed
fs.setTimes(fileName, createTime - ttl - 1, -1);
LOG.debug("Creating " + getFileStats(fileName, fs));
}
// Case 2: 1 newer file, not even deletable for the first log cleaner
// (TimeToLiveLogCleaner), so we are not going down the chain
Path saved = new Path(archivedHfileDir, prefix + ".00000000000");
fs.createNewFile(saved);
// set creation time within the ttl
fs.setTimes(saved, createTime - ttl / 2, -1);
LOG.debug("Creating " + getFileStats(saved, fs));
for (FileStatus stat : fs.listStatus(archivedHfileDir)) {
LOG.debug(stat.getPath().toString());
}
assertEquals(33, fs.listStatus(archivedHfileDir).length);
// set a custom edge manager to handle time checking
EnvironmentEdge setTime = new EnvironmentEdge() {
@Override
public long currentTime() {
return createTime;
}
};
EnvironmentEdgeManager.injectEdge(setTime);
// run the chore
cleaner.chore();
// ensure we only end up with the saved file
assertEquals(1, fs.listStatus(archivedHfileDir).length);
for (FileStatus file : fs.listStatus(archivedHfileDir)) {
LOG.debug("Kept hfiles: " + file.getPath().getName());
}
// reset the edge back to the original edge
EnvironmentEdgeManager.injectEdge(originalEdge);
}
示例3: testFlushSequenceIdIsGreaterThanAllEditsInHFile
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
/**
* Test flush for sure has a sequence id that is beyond the last edit appended. We do this
* by slowing appends in the background ring buffer thread while in foreground we call
* flush. The addition of the sync over HRegion in flush should fix an issue where flush was
* returning before all of its appends had made it out to the WAL (HBASE-11109).
* @throws IOException
* @see HBASE-11109
*/
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
String testName = "testFlushSequenceIdIsGreaterThanAllEditsInHFile";
final TableName tableName = TableName.valueOf(testName);
final HRegionInfo hri = new HRegionInfo(tableName);
final byte[] rowName = tableName.getName();
final HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("f"));
HRegion r = HRegion.createHRegion(hri, TEST_UTIL.getDefaultRootDirPath(),
TEST_UTIL.getConfiguration(), htd);
HRegion.closeHRegion(r);
final int countPerFamily = 10;
final MutableBoolean goslow = new MutableBoolean(false);
// subclass and doctor a method.
FSHLog wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDefaultRootDirPath(),
testName, conf) {
@Override
void atHeadOfRingBufferEventHandlerAppend() {
if (goslow.isTrue()) {
Threads.sleep(100);
LOG.debug("Sleeping before appending 100ms");
}
super.atHeadOfRingBufferEventHandlerAppend();
}
};
HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(),
TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
try {
List<Put> puts = null;
for (HColumnDescriptor hcd: htd.getFamilies()) {
puts =
TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
}
// Now assert edits made it in.
final Get g = new Get(rowName);
Result result = region.get(g);
assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
// Construct a WALEdit and add it a few times to the WAL.
WALEdit edits = new WALEdit();
for (Put p: puts) {
CellScanner cs = p.cellScanner();
while (cs.advance()) {
edits.add(cs.current());
}
}
// Add any old cluster id.
List<UUID> clusterIds = new ArrayList<UUID>();
clusterIds.add(UUID.randomUUID());
// Now make appends run slow.
goslow.setValue(true);
for (int i = 0; i < countPerFamily; i++) {
final HRegionInfo info = region.getRegionInfo();
final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC());
wal.append(htd, info, logkey, edits, true);
}
region.flush(true);
// FlushResult.flushSequenceId is not visible here so go get the current sequence id.
long currentSequenceId = region.getSequenceId();
// Now release the appends
goslow.setValue(false);
synchronized (goslow) {
goslow.notifyAll();
}
assertTrue(currentSequenceId >= region.getSequenceId());
} finally {
region.close(true);
wal.close();
}
}