本文整理汇总了Java中org.apache.hadoop.hbase.util.EnvironmentEdgeManager.injectEdge方法的典型用法代码示例。如果您正苦于以下问题:Java EnvironmentEdgeManager.injectEdge方法的具体用法?Java EnvironmentEdgeManager.injectEdge怎么用?Java EnvironmentEdgeManager.injectEdge使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.EnvironmentEdgeManager
的用法示例。
在下文中一共展示了EnvironmentEdgeManager.injectEdge方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSortExtract
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Test
public void testSortExtract(){
ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(mee);
mee.setValue(1);
DeadServer d = new DeadServer();
d.add(hostname123);
mee.incValue(1);
d.add(hostname1234);
mee.incValue(1);
d.add(hostname12345);
List<Pair<ServerName, Long>> copy = d.copyDeadServersSince(2L);
Assert.assertEquals(2, copy.size());
Assert.assertEquals(hostname1234, copy.get(0).getFirst());
Assert.assertEquals(new Long(2L), copy.get(0).getSecond());
Assert.assertEquals(hostname12345, copy.get(1).getFirst());
Assert.assertEquals(new Long(3L), copy.get(1).getSecond());
EnvironmentEdgeManager.reset();
}
示例2: checkShouldFlush
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
private void checkShouldFlush(Configuration conf, boolean expected) throws Exception {
try {
EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
EnvironmentEdgeManager.injectEdge(edge);
HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf);
HRegion region = hbaseUtility.createTestRegion("foobar", new HColumnDescriptor("foo"));
List<Store> stores = region.getStores();
assertTrue(stores.size() == 1);
Store s = stores.iterator().next();
edge.setCurrentTimeMillis(1234);
s.add(KeyValueTestUtil.create("r", "f", "q", 100, "v"));
edge.setCurrentTimeMillis(1234 + 100);
StringBuffer sb = new StringBuffer();
assertTrue(region.shouldFlush(sb) == false);
edge.setCurrentTimeMillis(1234 + 10000);
assertTrue(region.shouldFlush(sb) == expected);
} finally {
EnvironmentEdgeManager.reset();
}
}
示例3: testUpdateToTimeOfOldestEdit
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
/**
* Tests that the timeOfOldestEdit is updated correctly for the
* various edit operations in memstore.
* @throws Exception
*/
public void testUpdateToTimeOfOldestEdit() throws Exception {
try {
EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
EnvironmentEdgeManager.injectEdge(edge);
DefaultMemStore memstore = new DefaultMemStore();
long t = memstore.timeOfOldestEdit();
assertEquals(t, Long.MAX_VALUE);
// test the case that the timeOfOldestEdit is updated after a KV add
memstore.add(KeyValueTestUtil.create("r", "f", "q", 100, "v"));
t = memstore.timeOfOldestEdit();
assertTrue(t == 1234);
// snapshot() will reset timeOfOldestEdit. The method will also assert the
// value is reset to Long.MAX_VALUE
t = runSnapshot(memstore);
// test the case that the timeOfOldestEdit is updated after a KV delete
memstore.delete(KeyValueTestUtil.create("r", "f", "q", 100, "v"));
t = memstore.timeOfOldestEdit();
assertTrue(t == 1234);
t = runSnapshot(memstore);
// test the case that the timeOfOldestEdit is updated after a KV upsert
List<Cell> l = new ArrayList<Cell>();
KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
kv1.setSequenceId(100);
l.add(kv1);
memstore.upsert(l, 1000);
t = memstore.timeOfOldestEdit();
assertTrue(t == 1234);
} finally {
EnvironmentEdgeManager.reset();
}
}
示例4: testShouldFlushMeta
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
public void testShouldFlushMeta() throws Exception {
// write an edit in the META and ensure the shouldFlush (that the periodic memstore
// flusher invokes) returns true after SYSTEM_CACHE_FLUSH_INTERVAL (even though
// the MEMSTORE_PERIODIC_FLUSH_INTERVAL is set to a higher value)
Configuration conf = new Configuration();
conf.setInt(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, HRegion.SYSTEM_CACHE_FLUSH_INTERVAL * 10);
HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf);
Path testDir = hbaseUtility.getDataTestDir();
EnvironmentEdgeForMemstoreTest edge = new EnvironmentEdgeForMemstoreTest();
EnvironmentEdgeManager.injectEdge(edge);
edge.setCurrentTimeMillis(1234);
WALFactory wFactory = new WALFactory(conf, null, "1234");
HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
conf, HTableDescriptor.metaTableDescriptor(conf),
wFactory.getMetaWAL(HRegionInfo.FIRST_META_REGIONINFO.
getEncodedNameAsBytes()));
HRegionInfo hri = new HRegionInfo(TableName.valueOf("testShouldFlushMeta"),
Bytes.toBytes("row_0200"), Bytes.toBytes("row_0300"));
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("testShouldFlushMeta"));
desc.addFamily(new HColumnDescriptor("foo".getBytes()));
HRegion r =
HRegion.createHRegion(hri, testDir, conf, desc,
wFactory.getWAL(hri.getEncodedNameAsBytes()));
HRegion.addRegionToMETA(meta, r);
edge.setCurrentTimeMillis(1234 + 100);
StringBuffer sb = new StringBuffer();
assertTrue(meta.shouldFlush(sb) == false);
edge.setCurrentTimeMillis(edge.currentTime() + HRegion.SYSTEM_CACHE_FLUSH_INTERVAL + 1);
assertTrue(meta.shouldFlush(sb) == true);
}
示例5: testAppendTimestampsAreMonotonic
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Test
public void testAppendTimestampsAreMonotonic() throws IOException {
HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1);
ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(edge);
edge.setValue(10);
Append a = new Append(row);
a.setDurability(Durability.SKIP_WAL);
a.add(fam1, qual1, qual1);
region.append(a);
Result result = region.get(new Get(row));
Cell c = result.getColumnLatestCell(fam1, qual1);
assertNotNull(c);
assertEquals(c.getTimestamp(), 10L);
edge.setValue(1); // clock goes back
region.append(a);
result = region.get(new Get(row));
c = result.getColumnLatestCell(fam1, qual1);
assertEquals(c.getTimestamp(), 10L);
byte[] expected = new byte[qual1.length*2];
System.arraycopy(qual1, 0, expected, 0, qual1.length);
System.arraycopy(qual1, 0, expected, qual1.length, qual1.length);
assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(),
expected, 0, expected.length));
}
示例6: testMergeExpiredFiles
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void testMergeExpiredFiles() throws Exception {
ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
long now = defaultTtl + 2;
edge.setValue(now);
EnvironmentEdgeManager.injectEdge(edge);
try {
StoreFile expiredFile = createFile(), notExpiredFile = createFile();
when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1);
when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1);
List<StoreFile> expired = Lists.newArrayList(expiredFile, expiredFile);
List<StoreFile> notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile);
List<StoreFile> mixed = Lists.newArrayList(expiredFile, notExpiredFile);
StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create(),
defaultSplitSize, defaultSplitCount, defaultInitialCount, true);
// Merge expired if there are eligible stripes.
StripeCompactionPolicy.StripeInformationProvider si =
createStripesWithFiles(expired, expired, expired);
verifyWholeStripesCompaction(policy, si, 0, 2, null, 1, Long.MAX_VALUE, false);
// Don't merge if nothing expired.
si = createStripesWithFiles(notExpired, notExpired, notExpired);
assertNull(policy.selectCompaction(si, al(), false));
// Merge one expired stripe with next.
si = createStripesWithFiles(notExpired, expired, notExpired);
verifyWholeStripesCompaction(policy, si, 1, 2, null, 1, Long.MAX_VALUE, false);
// Merge the biggest run out of multiple options.
// Merge one expired stripe with next.
si = createStripesWithFiles(notExpired, expired, notExpired, expired, expired, notExpired);
verifyWholeStripesCompaction(policy, si, 3, 4, null, 1, Long.MAX_VALUE, false);
// Stripe with a subset of expired files is not merged.
si = createStripesWithFiles(expired, expired, notExpired, expired, mixed);
verifyWholeStripesCompaction(policy, si, 0, 1, null, 1, Long.MAX_VALUE, false);
} finally {
EnvironmentEdgeManager.reset();
}
}
示例7: testMergeExpiredStripes
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Test
public void testMergeExpiredStripes() throws Exception {
// HBASE-11397
ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
long now = defaultTtl + 2;
edge.setValue(now);
EnvironmentEdgeManager.injectEdge(edge);
try {
StoreFile expiredFile = createFile(), notExpiredFile = createFile();
when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1);
when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1);
List<StoreFile> expired = Lists.newArrayList(expiredFile, expiredFile);
List<StoreFile> notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile);
StripeCompactionPolicy policy =
createPolicy(HBaseConfiguration.create(), defaultSplitSize, defaultSplitCount,
defaultInitialCount, true);
// Merge all three expired stripes into one.
StripeCompactionPolicy.StripeInformationProvider si =
createStripesWithFiles(expired, expired, expired);
verifyMergeCompatcion(policy, si, 0, 2);
// Merge two adjacent expired stripes into one.
si = createStripesWithFiles(notExpired, expired, notExpired, expired, expired, notExpired);
verifyMergeCompatcion(policy, si, 3, 4);
} finally {
EnvironmentEdgeManager.reset();
}
}
示例8: testCleanup
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Test
public void testCleanup() throws Exception {
ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(edge);
try {
ServerNonceManager nm = createManager(6);
ScheduledChore cleanup = nm.createCleanupScheduledChore(Mockito.mock(Stoppable.class));
edge.setValue(1);
assertTrue(nm.startOperation(NO_NONCE, 1, createStoppable()));
assertTrue(nm.startOperation(NO_NONCE, 2, createStoppable()));
assertTrue(nm.startOperation(NO_NONCE, 3, createStoppable()));
edge.setValue(2);
nm.endOperation(NO_NONCE, 1, true);
edge.setValue(4);
nm.endOperation(NO_NONCE, 2, true);
edge.setValue(9);
cleanup.choreForTesting();
// Nonce 1 has been cleaned up.
assertTrue(nm.startOperation(NO_NONCE, 1, createStoppable()));
// Nonce 2 has not been cleaned up.
assertFalse(nm.startOperation(NO_NONCE, 2, createStoppable()));
// Nonce 3 was active and active ops should never be cleaned up; try to end and start.
nm.endOperation(NO_NONCE, 3, false);
assertTrue(nm.startOperation(NO_NONCE, 3, createStoppable()));
edge.setValue(11);
cleanup.choreForTesting();
// Now, nonce 2 has been cleaned up.
assertTrue(nm.startOperation(NO_NONCE, 2, createStoppable()));
} finally {
EnvironmentEdgeManager.reset();
}
}
示例9: testWalNonces
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Test
public void testWalNonces() throws Exception {
ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(edge);
try {
ServerNonceManager nm = createManager(6);
ScheduledChore cleanup = nm.createCleanupScheduledChore(Mockito.mock(Stoppable.class));
// Add nonces from WAL, including dups.
edge.setValue(12);
nm.reportOperationFromWal(NO_NONCE, 1, 8);
nm.reportOperationFromWal(NO_NONCE, 2, 2);
nm.reportOperationFromWal(NO_NONCE, 3, 5);
nm.reportOperationFromWal(NO_NONCE, 3, 6);
// WAL nonces should prevent cross-server conflicts.
assertFalse(nm.startOperation(NO_NONCE, 1, createStoppable()));
// Make sure we ignore very old nonces, but not borderline old nonces.
assertTrue(nm.startOperation(NO_NONCE, 2, createStoppable()));
assertFalse(nm.startOperation(NO_NONCE, 3, createStoppable()));
// Make sure grace period is counted from recovery time.
edge.setValue(17);
cleanup.choreForTesting();
assertFalse(nm.startOperation(NO_NONCE, 1, createStoppable()));
assertFalse(nm.startOperation(NO_NONCE, 3, createStoppable()));
edge.setValue(19);
cleanup.choreForTesting();
assertTrue(nm.startOperation(NO_NONCE, 1, createStoppable()));
assertTrue(nm.startOperation(NO_NONCE, 3, createStoppable()));
} finally {
EnvironmentEdgeManager.reset();
}
}
示例10: testConnectionIdle
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
/**
* Test that connection can become idle without breaking everything.
*/
@Test
public void testConnectionIdle() throws Exception {
TableName tableName = TableName.valueOf("HCM-testConnectionIdle");
TEST_UTIL.createTable(tableName, FAM_NAM).close();
int idleTime = 20000;
boolean previousBalance = TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, true);
Configuration c2 = new Configuration(TEST_UTIL.getConfiguration());
// We want to work on a separate connection.
c2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
c2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); // Don't retry: retry = test failed
c2.setInt(RpcClient.IDLE_TIME, idleTime);
final Table table = new HTable(c2, tableName);
Put put = new Put(ROW);
put.add(FAM_NAM, ROW, ROW);
table.put(put);
ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
mee.setValue(System.currentTimeMillis());
EnvironmentEdgeManager.injectEdge(mee);
LOG.info("first get");
table.get(new Get(ROW));
LOG.info("first get - changing the time & sleeping");
mee.incValue(idleTime + 1000);
Thread.sleep(1500); // we need to wait a little for the connection to be seen as idle.
// 1500 = sleep time in RpcClient#waitForWork + a margin
LOG.info("second get - connection has been marked idle in the middle");
// To check that the connection actually became idle would need to read some private
// fields of RpcClient.
table.get(new Get(ROW));
mee.incValue(idleTime + 1000);
LOG.info("third get - connection is idle, but the reader doesn't know yet");
// We're testing here a special case:
// time limit reached BUT connection not yet reclaimed AND a new call.
// in this situation, we don't close the connection, instead we use it immediately.
// If we're very unlucky we can have a race condition in the test: the connection is already
// under closing when we do the get, so we have an exception, and we don't retry as the
// retry number is 1. The probability is very very low, and seems acceptable for now. It's
// a test issue only.
table.get(new Get(ROW));
LOG.info("we're done - time will change back");
table.close();
EnvironmentEdgeManager.reset();
TEST_UTIL.getHBaseAdmin().setBalancerRunning(previousBalance, true);
}
示例11: testErrorBackoffTimeCalculation
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Ignore ("Test presumes RETRY_BACKOFF will never change; it has") @Test
public void testErrorBackoffTimeCalculation() throws Exception {
// TODO: This test would seem to presume hardcoded RETRY_BACKOFF which it should not.
final long ANY_PAUSE = 100;
ServerName location = ServerName.valueOf("127.0.0.1", 1, 0);
ServerName diffLocation = ServerName.valueOf("127.0.0.1", 2, 0);
ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(timeMachine);
try {
long timeBase = timeMachine.currentTime();
long largeAmountOfTime = ANY_PAUSE * 1000;
ConnectionManager.ServerErrorTracker tracker =
new ConnectionManager.ServerErrorTracker(largeAmountOfTime, 100);
// The default backoff is 0.
assertEquals(0, tracker.calculateBackoffTime(location, ANY_PAUSE));
// Check some backoff values from HConstants sequence.
tracker.reportServerError(location);
assertEqualsWithJitter(ANY_PAUSE, tracker.calculateBackoffTime(location, ANY_PAUSE));
tracker.reportServerError(location);
tracker.reportServerError(location);
tracker.reportServerError(location);
assertEqualsWithJitter(ANY_PAUSE * 5, tracker.calculateBackoffTime(location, ANY_PAUSE));
// All of this shouldn't affect backoff for different location.
assertEquals(0, tracker.calculateBackoffTime(diffLocation, ANY_PAUSE));
tracker.reportServerError(diffLocation);
assertEqualsWithJitter(ANY_PAUSE, tracker.calculateBackoffTime(diffLocation, ANY_PAUSE));
// Check with different base.
assertEqualsWithJitter(ANY_PAUSE * 10,
tracker.calculateBackoffTime(location, ANY_PAUSE * 2));
// See that time from last error is taken into account. Time shift is applied after jitter,
// so pass the original expected backoff as the base for jitter.
long timeShift = (long)(ANY_PAUSE * 0.5);
timeMachine.setValue(timeBase + timeShift);
assertEqualsWithJitter((ANY_PAUSE * 5) - timeShift,
tracker.calculateBackoffTime(location, ANY_PAUSE), ANY_PAUSE * 2);
// However we should not go into negative.
timeMachine.setValue(timeBase + ANY_PAUSE * 100);
assertEquals(0, tracker.calculateBackoffTime(location, ANY_PAUSE));
// We also should not go over the boundary; last retry would be on it.
long timeLeft = (long)(ANY_PAUSE * 0.5);
timeMachine.setValue(timeBase + largeAmountOfTime - timeLeft);
assertTrue(tracker.canRetryMore(1));
tracker.reportServerError(location);
assertEquals(timeLeft, tracker.calculateBackoffTime(location, ANY_PAUSE));
timeMachine.setValue(timeBase + largeAmountOfTime);
assertFalse(tracker.canRetryMore(1));
} finally {
EnvironmentEdgeManager.reset();
}
}
示例12: before
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Before
public void before() {
mee.setValue(0);
EnvironmentEdgeManager.injectEdge(mee);
}
示例13: testHFileCleaning
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Test(timeout = 60 *1000)
public void testHFileCleaning() throws Exception {
final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
String prefix = "someHFileThatWouldBeAUUID";
Configuration conf = UTIL.getConfiguration();
// set TTL
long ttl = 2000;
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
"org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
Server server = new DummyServer();
Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
FileSystem fs = FileSystem.get(conf);
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
// Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
final long createTime = System.currentTimeMillis();
fs.delete(archivedHfileDir, true);
fs.mkdirs(archivedHfileDir);
// Case 1: 1 invalid file, which should be deleted directly
fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
// Case 2: 1 "recent" file, not even deletable for the first log cleaner
// (TimeToLiveLogCleaner), so we are not going down the chain
LOG.debug("Now is: " + createTime);
for (int i = 1; i < 32; i++) {
// Case 3: old files which would be deletable for the first log cleaner
// (TimeToLiveHFileCleaner),
Path fileName = new Path(archivedHfileDir, (prefix + "." + (createTime + i)));
fs.createNewFile(fileName);
// set the creation time past ttl to ensure that it gets removed
fs.setTimes(fileName, createTime - ttl - 1, -1);
LOG.debug("Creating " + getFileStats(fileName, fs));
}
// Case 2: 1 newer file, not even deletable for the first log cleaner
// (TimeToLiveLogCleaner), so we are not going down the chain
Path saved = new Path(archivedHfileDir, prefix + ".00000000000");
fs.createNewFile(saved);
// set creation time within the ttl
fs.setTimes(saved, createTime - ttl / 2, -1);
LOG.debug("Creating " + getFileStats(saved, fs));
for (FileStatus stat : fs.listStatus(archivedHfileDir)) {
LOG.debug(stat.getPath().toString());
}
assertEquals(33, fs.listStatus(archivedHfileDir).length);
// set a custom edge manager to handle time checking
EnvironmentEdge setTime = new EnvironmentEdge() {
@Override
public long currentTime() {
return createTime;
}
};
EnvironmentEdgeManager.injectEdge(setTime);
// run the chore
cleaner.chore();
// ensure we only end up with the saved file
assertEquals(1, fs.listStatus(archivedHfileDir).length);
for (FileStatus file : fs.listStatus(archivedHfileDir)) {
LOG.debug("Kept hfiles: " + file.getPath().getName());
}
// reset the edge back to the original edge
EnvironmentEdgeManager.injectEdge(originalEdge);
}
示例14: testIncrementTimestampsAreMonotonic
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@Test
public void testIncrementTimestampsAreMonotonic() throws IOException {
HRegion region = initHRegion(tableName, name.getMethodName(), CONF, fam1);
ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(edge);
edge.setValue(10);
Increment inc = new Increment(row);
inc.setDurability(Durability.SKIP_WAL);
inc.addColumn(fam1, qual1, 1L);
region.increment(inc);
Result result = region.get(new Get(row));
Cell c = result.getColumnLatestCell(fam1, qual1);
assertNotNull(c);
assertEquals(c.getTimestamp(), 10L);
edge.setValue(1); // clock goes back
region.increment(inc);
result = region.get(new Get(row));
c = result.getColumnLatestCell(fam1, qual1);
assertEquals(c.getTimestamp(), 10L);
assertEquals(Bytes.toLong(c.getValueArray(), c.getValueOffset(), c.getValueLength()), 2L);
}
示例15: setEnvironmentEdge
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; //导入方法依赖的package包/类
@BeforeClass
public static void setEnvironmentEdge()
{
EnvironmentEdge ee = new TimeOffsetEnvironmentEdge();
EnvironmentEdgeManager.injectEdge(ee);
}