本文整理汇总了Java中org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper.injectEdge方法的典型用法代码示例。如果您正苦于以下问题:Java EnvironmentEdgeManagerTestHelper.injectEdge方法的具体用法?Java EnvironmentEdgeManagerTestHelper.injectEdge怎么用?Java EnvironmentEdgeManagerTestHelper.injectEdge使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper
的用法示例。
在下文中一共展示了EnvironmentEdgeManagerTestHelper.injectEdge方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testIncrementColumnValue_heapSize
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
public void testIncrementColumnValue_heapSize() throws IOException {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
this.region = initHRegion(tableName, getName(), conf, fam1);
try {
long byAmount = 1L;
long size;
for( int i = 0; i < 1000 ; i++) {
region.incrementColumnValue(row, fam1, qual1, byAmount, true);
size = region.memstoreSize.get();
assertTrue("memstore size: " + size, size >= 0);
}
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例2: setUpBeforeClass
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, REFRESH_TIME);
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
TEST_UTIL.startMiniCluster(1);
TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
QuotaCache.TEST_FORCE_REFRESH = true;
tables = new Table[TABLE_NAMES.length];
for (int i = 0; i < TABLE_NAMES.length; ++i) {
tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
}
envEdge = new ManualEnvironmentEdge();
envEdge.setValue(EnvironmentEdgeManager.currentTime());
EnvironmentEdgeManagerTestHelper.injectEdge(envEdge);
}
示例3: setUpBeforeClass
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
TEST_UTIL.startMiniCluster();
//ensure that server time increments every time we do an operation, otherwise
//successive puts having the same timestamp will override each other
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
示例4: setUpBeforeClass
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.thrift.http", true);
TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
TEST_UTIL.startMiniCluster();
//ensure that server time increments every time we do an operation, otherwise
//successive puts having the same timestamp will override each other
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
示例5: setUp
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
/* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
* implicit RS timing.
* Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
* compact timestamps are tracked. Otherwise, forced major compaction will not purge
* Delete's having the same timestamp. see ScanQueryMatcher.match():
* if (retainDeletesInOutput
* || (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp)
* <= timeToPurgeDeletes) ... )
*
*/
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
示例6: testIncrementColumnValue_BumpSnapshot
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
public void testIncrementColumnValue_BumpSnapshot() throws IOException {
ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
EnvironmentEdgeManagerTestHelper.injectEdge(mee);
this.region = initHRegion(tableName, getName(), conf, fam1);
try {
long value = 42L;
long incr = 44L;
// first put something in kvset, then snapshot it.
Put put = new Put(row);
put.add(fam1, qual1, Bytes.toBytes(value));
region.put(put);
// get the store in question:
Store s = region.getStore(fam1);
s.snapshot(); //bam
// now increment:
long newVal = region.incrementColumnValue(row, fam1, qual1,
incr, false);
assertEquals(value+incr, newVal);
// get both versions:
Get get = new Get(row);
get.setMaxVersions();
get.addColumn(fam1,qual1);
Result r = region.get(get, null);
assertEquals(2, r.size());
KeyValue first = r.raw()[0];
KeyValue second = r.raw()[1];
assertTrue("ICV failed to upgrade timestamp",
first.getTimestamp() != second.getTimestamp());
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例7: setUpBeforeClass
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
//ensure that server time increments every time we do an operation, otherwise
//successive puts having the same timestamp will override each other
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
示例8: setUp
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
/* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
* implicit RS timing.
* Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
* compact timestamps are tracked. Otherwise, forced major compaction will not purge
* Delete's having the same timestamp. see ScanQueryMatcher.match():
* if (retainDeletesInOutput
* || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp)
* <= timeToPurgeDeletes) ... )
*
*/
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
示例9: waitMinuteQuota
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
private void waitMinuteQuota() {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge(
EnvironmentEdgeManager.currentTime() + 70000));
}
示例10: testDeleteMarkerLongevity
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
public void testDeleteMarkerLongevity() throws Exception {
try {
final long now = System.currentTimeMillis();
EnvironmentEdgeManagerTestHelper.injectEdge(new EnvironmentEdge() {
public long currentTime() {
return now;
}
});
KeyValue[] kvs = new KeyValue[]{
/*0*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null,
now - 100, KeyValue.Type.DeleteFamily), // live
/*1*/ new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null,
now - 1000, KeyValue.Type.DeleteFamily), // expired
/*2*/ KeyValueTestUtil.create("R1", "cf", "a", now - 50,
KeyValue.Type.Put, "v3"), // live
/*3*/ KeyValueTestUtil.create("R1", "cf", "a", now - 55,
KeyValue.Type.Delete, "dontcare"), // live
/*4*/ KeyValueTestUtil.create("R1", "cf", "a", now - 55,
KeyValue.Type.Put, "deleted-version v2"), // deleted
/*5*/ KeyValueTestUtil.create("R1", "cf", "a", now - 60,
KeyValue.Type.Put, "v1"), // live
/*6*/ KeyValueTestUtil.create("R1", "cf", "a", now - 65,
KeyValue.Type.Put, "v0"), // max-version reached
/*7*/ KeyValueTestUtil.create("R1", "cf", "a",
now - 100, KeyValue.Type.DeleteColumn, "dont-care"), // max-version
/*8*/ KeyValueTestUtil.create("R1", "cf", "b", now - 600,
KeyValue.Type.DeleteColumn, "dont-care"), //expired
/*9*/ KeyValueTestUtil.create("R1", "cf", "b", now - 70,
KeyValue.Type.Put, "v2"), //live
/*10*/ KeyValueTestUtil.create("R1", "cf", "b", now - 750,
KeyValue.Type.Put, "v1"), //expired
/*11*/ KeyValueTestUtil.create("R1", "cf", "c", now - 500,
KeyValue.Type.Delete, "dontcare"), //expired
/*12*/ KeyValueTestUtil.create("R1", "cf", "c", now - 600,
KeyValue.Type.Put, "v1"), //expired
/*13*/ KeyValueTestUtil.create("R1", "cf", "c", now - 1000,
KeyValue.Type.Delete, "dontcare"), //expired
/*14*/ KeyValueTestUtil.create("R1", "cf", "d", now - 60,
KeyValue.Type.Put, "expired put"), //live
/*15*/ KeyValueTestUtil.create("R1", "cf", "d", now - 100,
KeyValue.Type.Delete, "not-expired delete"), //live
};
List<KeyValueScanner> scanners = scanFixture(kvs);
Scan scan = new Scan();
scan.setMaxVersions(2);
ScanInfo scanInfo = new ScanInfo(CONF, Bytes.toBytes("cf"),
0 /* minVersions */,
2 /* maxVersions */, 500 /* ttl */,
KeepDeletedCells.FALSE /* keepDeletedCells */,
200, /* timeToPurgeDeletes */
KeyValue.COMPARATOR);
StoreScanner scanner =
new StoreScanner(scan, scanInfo,
ScanType.COMPACT_DROP_DELETES, null, scanners,
HConstants.OLDEST_TIMESTAMP);
List<Cell> results = new ArrayList<Cell>();
results = new ArrayList<Cell>();
assertEquals(true, scanner.next(results));
assertEquals(kvs[0], results.get(0));
assertEquals(kvs[2], results.get(1));
assertEquals(kvs[3], results.get(2));
assertEquals(kvs[5], results.get(3));
assertEquals(kvs[9], results.get(4));
assertEquals(kvs[14], results.get(5));
assertEquals(kvs[15], results.get(6));
assertEquals(7, results.size());
scanner.close();
}finally{
EnvironmentEdgeManagerTestHelper.reset();
}
}
示例11: testICV_negMemstoreSize
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
@Test
public void testICV_negMemstoreSize() throws IOException {
init(this.name.getMethodName());
long time = 100;
ManualEnvironmentEdge ee = new ManualEnvironmentEdge();
ee.setValue(time);
EnvironmentEdgeManagerTestHelper.injectEdge(ee);
long newValue = 3L;
long size = 0;
size += this.store.add(new KeyValue(Bytes.toBytes("200909091000"), family, qf1,
System.currentTimeMillis(), Bytes.toBytes(newValue)));
size += this.store.add(new KeyValue(Bytes.toBytes("200909091200"), family, qf1,
System.currentTimeMillis(), Bytes.toBytes(newValue)));
size += this.store.add(new KeyValue(Bytes.toBytes("200909091300"), family, qf1,
System.currentTimeMillis(), Bytes.toBytes(newValue)));
size += this.store.add(new KeyValue(Bytes.toBytes("200909091400"), family, qf1,
System.currentTimeMillis(), Bytes.toBytes(newValue)));
size += this.store.add(new KeyValue(Bytes.toBytes("200909091500"), family, qf1,
System.currentTimeMillis(), Bytes.toBytes(newValue)));
for ( int i = 0 ; i < 10000 ; ++i) {
newValue++;
long ret = this.store.updateColumnValue(row, family, qf1, newValue);
long ret2 = this.store.updateColumnValue(row2, family, qf1, newValue);
if (ret != 0) System.out.println("ret: " + ret);
if (ret2 != 0) System.out.println("ret2: " + ret2);
Assert.assertTrue("ret: " + ret, ret >= 0);
size += ret;
Assert.assertTrue("ret2: " + ret2, ret2 >= 0);
size += ret2;
if (i % 1000 == 0)
ee.setValue(++time);
}
long computedSize=0;
for (Cell cell : ((DefaultMemStore)this.store.memstore).cellSet) {
long kvsize = DefaultMemStore.heapSizeChange(cell, true);
//System.out.println(kv + " size= " + kvsize + " kvsize= " + kv.heapSize());
computedSize += kvsize;
}
Assert.assertEquals(computedSize, size);
}
示例12: testIncrementColumnValue_SnapshotFlushCombo
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
@Test
public void testIncrementColumnValue_SnapshotFlushCombo() throws Exception {
ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
EnvironmentEdgeManagerTestHelper.injectEdge(mee);
init(this.name.getMethodName());
long oldValue = 1L;
long newValue = 3L;
this.store.add(new KeyValue(row, family, qf1,
EnvironmentEdgeManager.currentTime(),
Bytes.toBytes(oldValue)));
// snapshot the store.
this.store.snapshot();
// update during the snapshot, the exact same TS as the Put (lololol)
long ret = this.store.updateColumnValue(row, family, qf1, newValue);
// memstore should have grown by some amount.
Assert.assertTrue(ret > 0);
// then flush.
flushStore(store, id++);
Assert.assertEquals(1, this.store.getStorefiles().size());
Assert.assertEquals(1, ((DefaultMemStore)this.store.memstore).cellSet.size());
// now increment again:
newValue += 1;
this.store.updateColumnValue(row, family, qf1, newValue);
// at this point we have a TS=1 in snapshot, and a TS=2 in kvset, so increment again:
newValue += 1;
this.store.updateColumnValue(row, family, qf1, newValue);
// the second TS should be TS=2 or higher., even though 'time=1' right now.
// how many key/values for this row are there?
Get get = new Get(row);
get.addColumn(family, qf1);
get.setMaxVersions(); // all versions.
List<Cell> results = new ArrayList<Cell>();
results = HBaseTestingUtility.getFromStoreFile(store, get);
Assert.assertEquals(2, results.size());
long ts1 = results.get(0).getTimestamp();
long ts2 = results.get(1).getTimestamp();
Assert.assertTrue(ts1 > ts2);
Assert.assertEquals(newValue, Bytes.toLong(CellUtil.cloneValue(results.get(0))));
Assert.assertEquals(oldValue, Bytes.toLong(CellUtil.cloneValue(results.get(1))));
mee.setValue(2); // time goes up slightly
newValue += 1;
this.store.updateColumnValue(row, family, qf1, newValue);
results = HBaseTestingUtility.getFromStoreFile(store, get);
Assert.assertEquals(2, results.size());
ts1 = results.get(0).getTimestamp();
ts2 = results.get(1).getTimestamp();
Assert.assertTrue(ts1 > ts2);
Assert.assertEquals(newValue, Bytes.toLong(CellUtil.cloneValue(results.get(0))));
Assert.assertEquals(oldValue, Bytes.toLong(CellUtil.cloneValue(results.get(1))));
}
示例13: testDelete_mixed
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
public void testDelete_mixed() throws IOException, InterruptedException {
byte [] tableName = Bytes.toBytes("testtable");
byte [] fam = Bytes.toBytes("info");
byte [][] families = {fam};
String method = this.getName();
this.region = initHRegion(tableName, method, conf, families);
try {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
byte [] row = Bytes.toBytes("table_name");
// column names
byte [] serverinfo = Bytes.toBytes("serverinfo");
byte [] splitA = Bytes.toBytes("splitA");
byte [] splitB = Bytes.toBytes("splitB");
// add some data:
Put put = new Put(row);
put.add(fam, splitA, Bytes.toBytes("reference_A"));
region.put(put);
put = new Put(row);
put.add(fam, splitB, Bytes.toBytes("reference_B"));
region.put(put);
put = new Put(row);
put.add(fam, serverinfo, Bytes.toBytes("ip_address"));
region.put(put);
// ok now delete a split:
Delete delete = new Delete(row);
delete.deleteColumns(fam, splitA);
region.delete(delete, null, true);
// assert some things:
Get get = new Get(row).addColumn(fam, serverinfo);
Result result = region.get(get, null);
assertEquals(1, result.size());
get = new Get(row).addColumn(fam, splitA);
result = region.get(get, null);
assertEquals(0, result.size());
get = new Get(row).addColumn(fam, splitB);
result = region.get(get, null);
assertEquals(1, result.size());
// Assert that after a delete, I can put.
put = new Put(row);
put.add(fam, splitA, Bytes.toBytes("reference_A"));
region.put(put);
get = new Get(row);
result = region.get(get, null);
assertEquals(3, result.size());
// Now delete all... then test I can add stuff back
delete = new Delete(row);
region.delete(delete, null, false);
assertEquals(0, region.get(get, null).size());
region.put(new Put(row).add(fam, splitA, Bytes.toBytes("reference_A")));
result = region.get(get, null);
assertEquals(1, result.size());
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例14: doTestDelete_AndPostInsert
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
public void doTestDelete_AndPostInsert(Delete delete)
throws IOException, InterruptedException {
this.region = initHRegion(tableName, getName(), conf, fam1);
try {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
Put put = new Put(row);
put.add(fam1, qual1, value1);
region.put(put);
// now delete the value:
region.delete(delete, null, true);
// ok put data:
put = new Put(row);
put.add(fam1, qual1, value2);
region.put(put);
// ok get:
Get get = new Get(row);
get.addColumn(fam1, qual1);
Result r = region.get(get, null);
assertEquals(1, r.size());
assertByteEquals(value2, r.getValue(fam1, qual1));
// next:
Scan scan = new Scan(row);
scan.addColumn(fam1, qual1);
InternalScanner s = region.getScanner(scan);
List<KeyValue> results = new ArrayList<KeyValue>();
assertEquals(false, s.next(results));
assertEquals(1, results.size());
KeyValue kv = results.get(0);
assertByteEquals(value2, kv.getValue());
assertByteEquals(fam1, kv.getFamily());
assertByteEquals(qual1, kv.getQualifier());
assertByteEquals(row, kv.getRow());
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例15: testIncrementColumnValue_UpdatingInPlace_TimestampClobber
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; //导入方法依赖的package包/类
/**
* Added for HBASE-3235.
*
* When the initial put and an ICV update were arriving with the same timestamp,
* the initial Put KV was being skipped during {@link MemStore#upsert(KeyValue)}
* causing the iteration for matching KVs, causing the update-in-place to not
* happen and the ICV put to effectively disappear.
* @throws IOException
*/
public void testIncrementColumnValue_UpdatingInPlace_TimestampClobber() throws IOException {
this.region = initHRegion(tableName, getName(), conf, fam1);
try {
long value = 1L;
long amount = 3L;
long now = EnvironmentEdgeManager.currentTimeMillis();
ManualEnvironmentEdge mock = new ManualEnvironmentEdge();
mock.setValue(now);
EnvironmentEdgeManagerTestHelper.injectEdge(mock);
// verify we catch an ICV on a put with the same timestamp
Put put = new Put(row);
put.add(fam1, qual1, now, Bytes.toBytes(value));
region.put(put);
long result = region.incrementColumnValue(row, fam1, qual1, amount, true);
assertEquals(value+amount, result);
Store store = region.getStore(fam1);
// ICV should update the existing Put with the same timestamp
assertEquals(1, store.memstore.kvset.size());
assertTrue(store.memstore.snapshot.isEmpty());
assertICV(row, fam1, qual1, value+amount);
// verify we catch an ICV even when the put ts > now
put = new Put(row);
put.add(fam1, qual2, now+1, Bytes.toBytes(value));
region.put(put);
result = region.incrementColumnValue(row, fam1, qual2, amount, true);
assertEquals(value+amount, result);
store = region.getStore(fam1);
// ICV should update the existing Put with the same timestamp
assertEquals(2, store.memstore.kvset.size());
assertTrue(store.memstore.snapshot.isEmpty());
assertICV(row, fam1, qual2, value+amount);
EnvironmentEdgeManagerTestHelper.reset();
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}