本文整理汇总了Java中org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge类的典型用法代码示例。如果您正苦于以下问题:Java IncrementingEnvironmentEdge类的具体用法?Java IncrementingEnvironmentEdge怎么用?Java IncrementingEnvironmentEdge使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IncrementingEnvironmentEdge类属于org.apache.hadoop.hbase.util包,在下文中一共展示了IncrementingEnvironmentEdge类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testIncrementColumnValue_heapSize
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
public void testIncrementColumnValue_heapSize() throws IOException {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
this.region = initHRegion(tableName, getName(), conf, fam1);
try {
long byAmount = 1L;
long size;
for( int i = 0; i < 1000 ; i++) {
region.incrementColumnValue(row, fam1, qual1, byAmount, true);
size = region.memstoreSize.get();
assertTrue("memstore size: " + size, size >= 0);
}
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例2: setUpBeforeClass
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
TEST_UTIL.startMiniCluster();
//ensure that server time increments every time we do an operation, otherwise
//successive puts having the same timestamp will override each other
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
示例3: setUpBeforeClass
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.thrift.http", true);
TEST_UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
TEST_UTIL.startMiniCluster();
//ensure that server time increments every time we do an operation, otherwise
//successive puts having the same timestamp will override each other
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
示例4: setUp
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
/* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
* implicit RS timing.
* Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
* compact timestamps are tracked. Otherwise, forced major compaction will not purge
* Delete's having the same timestamp. see ScanQueryMatcher.match():
* if (retainDeletesInOutput
* || (!isUserScan && (EnvironmentEdgeManager.currentTime() - timestamp)
* <= timeToPurgeDeletes) ... )
*
*/
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
示例5: setUpBeforeClass
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
//ensure that server time increments every time we do an operation, otherwise
//successive puts having the same timestamp will override each other
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
示例6: setUp
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
/* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
* implicit RS timing.
* Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
* compact timestamps are tracked. Otherwise, forced major compaction will not purge
* Delete's having the same timestamp. see ScanQueryMatcher.match():
* if (retainDeletesInOutput
* || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp)
* <= timeToPurgeDeletes) ... )
*
*/
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
示例7: testBypassAlsoCompletes
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
/**
* Test that when bypass is called, we skip out calling any other coprocessors stacked up method,
* in this case, a prePut.
* If the column family is 'test', then bypass is invoked.
*/
@Test
public void testBypassAlsoCompletes() throws IOException {
//ensure that server time increments every time we do an operation, otherwise
//previous deletes will eclipse successive puts having the same timestamp
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
Table t = util.getConnection().getTable(tableName);
List<Put> puts = new ArrayList<>();
Put p = new Put(row1);
p.addColumn(dummy, dummy, dummy);
puts.add(p);
p = new Put(row2);
p.addColumn(test, dummy, dummy);
puts.add(p);
p = new Put(row3);
p.addColumn(test, dummy, dummy);
puts.add(p);
t.put(puts);
// Ensure expected result.
checkRowAndDelete(t,row1,1);
checkRowAndDelete(t,row2,0);
checkRowAndDelete(t,row3,0);
// We have three Coprocessors stacked up on the prePut. See the beforeClass setup. We did three
// puts above two of which bypassed. A bypass means do not call the other coprocessors in the
// stack so for the two 'test' calls in the above, we should not have call through to all all
// three coprocessors in the chain. So we should have:
// 3 invocations for first put + 1 invocation + 1 bypass for second put + 1 invocation +
// 1 bypass for the last put. Assert.
assertEquals("Total CP invocation count", 5, TestCoprocessor.PREPUT_INVOCATIONS.get());
assertEquals("Total CP bypasses", 2, TestCoprocessor.PREPUT_BYPASSES.get());
}
示例8: waitMinuteQuota
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
private void waitMinuteQuota() {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge(
EnvironmentEdgeManager.currentTime() + 70000));
}
示例9: testDelete_mixed
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
public void testDelete_mixed() throws IOException, InterruptedException {
byte [] tableName = Bytes.toBytes("testtable");
byte [] fam = Bytes.toBytes("info");
byte [][] families = {fam};
String method = this.getName();
this.region = initHRegion(tableName, method, conf, families);
try {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
byte [] row = Bytes.toBytes("table_name");
// column names
byte [] serverinfo = Bytes.toBytes("serverinfo");
byte [] splitA = Bytes.toBytes("splitA");
byte [] splitB = Bytes.toBytes("splitB");
// add some data:
Put put = new Put(row);
put.add(fam, splitA, Bytes.toBytes("reference_A"));
region.put(put);
put = new Put(row);
put.add(fam, splitB, Bytes.toBytes("reference_B"));
region.put(put);
put = new Put(row);
put.add(fam, serverinfo, Bytes.toBytes("ip_address"));
region.put(put);
// ok now delete a split:
Delete delete = new Delete(row);
delete.deleteColumns(fam, splitA);
region.delete(delete, null, true);
// assert some things:
Get get = new Get(row).addColumn(fam, serverinfo);
Result result = region.get(get, null);
assertEquals(1, result.size());
get = new Get(row).addColumn(fam, splitA);
result = region.get(get, null);
assertEquals(0, result.size());
get = new Get(row).addColumn(fam, splitB);
result = region.get(get, null);
assertEquals(1, result.size());
// Assert that after a delete, I can put.
put = new Put(row);
put.add(fam, splitA, Bytes.toBytes("reference_A"));
region.put(put);
get = new Get(row);
result = region.get(get, null);
assertEquals(3, result.size());
// Now delete all... then test I can add stuff back
delete = new Delete(row);
region.delete(delete, null, false);
assertEquals(0, region.get(get, null).size());
region.put(new Put(row).add(fam, splitA, Bytes.toBytes("reference_A")));
result = region.get(get, null);
assertEquals(1, result.size());
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例10: doTestDelete_AndPostInsert
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
public void doTestDelete_AndPostInsert(Delete delete)
throws IOException, InterruptedException {
this.region = initHRegion(tableName, getName(), conf, fam1);
try {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
Put put = new Put(row);
put.add(fam1, qual1, value1);
region.put(put);
// now delete the value:
region.delete(delete, null, true);
// ok put data:
put = new Put(row);
put.add(fam1, qual1, value2);
region.put(put);
// ok get:
Get get = new Get(row);
get.addColumn(fam1, qual1);
Result r = region.get(get, null);
assertEquals(1, r.size());
assertByteEquals(value2, r.getValue(fam1, qual1));
// next:
Scan scan = new Scan(row);
scan.addColumn(fam1, qual1);
InternalScanner s = region.getScanner(scan);
List<KeyValue> results = new ArrayList<KeyValue>();
assertEquals(false, s.next(results));
assertEquals(1, results.size());
KeyValue kv = results.get(0);
assertByteEquals(value2, kv.getValue());
assertByteEquals(fam1, kv.getFamily());
assertByteEquals(qual1, kv.getQualifier());
assertByteEquals(row, kv.getRow());
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例11: testDelete_mixed
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
@Test
public void testDelete_mixed() throws IOException, InterruptedException {
byte[] fam = Bytes.toBytes("info");
byte[][] families = { fam };
String method = this.getName();
this.region = initHRegion(tableName, method, CONF, families);
try {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
byte[] row = Bytes.toBytes("table_name");
// column names
byte[] serverinfo = Bytes.toBytes("serverinfo");
byte[] splitA = Bytes.toBytes("splitA");
byte[] splitB = Bytes.toBytes("splitB");
// add some data:
Put put = new Put(row);
put.add(fam, splitA, Bytes.toBytes("reference_A"));
region.put(put);
put = new Put(row);
put.add(fam, splitB, Bytes.toBytes("reference_B"));
region.put(put);
put = new Put(row);
put.add(fam, serverinfo, Bytes.toBytes("ip_address"));
region.put(put);
// ok now delete a split:
Delete delete = new Delete(row);
delete.deleteColumns(fam, splitA);
region.delete(delete);
// assert some things:
Get get = new Get(row).addColumn(fam, serverinfo);
Result result = region.get(get);
assertEquals(1, result.size());
get = new Get(row).addColumn(fam, splitA);
result = region.get(get);
assertEquals(0, result.size());
get = new Get(row).addColumn(fam, splitB);
result = region.get(get);
assertEquals(1, result.size());
// Assert that after a delete, I can put.
put = new Put(row);
put.add(fam, splitA, Bytes.toBytes("reference_A"));
region.put(put);
get = new Get(row);
result = region.get(get);
assertEquals(3, result.size());
// Now delete all... then test I can add stuff back
delete = new Delete(row);
region.delete(delete);
assertEquals(0, region.get(get).size());
region.put(new Put(row).add(fam, splitA, Bytes.toBytes("reference_A")));
result = region.get(get);
assertEquals(1, result.size());
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例12: doTestDelete_AndPostInsert
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
public void doTestDelete_AndPostInsert(Delete delete) throws IOException, InterruptedException {
TableName tableName = TableName.valueOf(name.getMethodName());
this.region = initHRegion(tableName, getName(), CONF, fam1);
try {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
Put put = new Put(row);
put.add(fam1, qual1, value1);
region.put(put);
// now delete the value:
region.delete(delete);
// ok put data:
put = new Put(row);
put.add(fam1, qual1, value2);
region.put(put);
// ok get:
Get get = new Get(row);
get.addColumn(fam1, qual1);
Result r = region.get(get);
assertEquals(1, r.size());
assertArrayEquals(value2, r.getValue(fam1, qual1));
// next:
Scan scan = new Scan(row);
scan.addColumn(fam1, qual1);
InternalScanner s = region.getScanner(scan);
List<Cell> results = new ArrayList<Cell>();
assertEquals(false, s.next(results));
assertEquals(1, results.size());
Cell kv = results.get(0);
assertArrayEquals(value2, CellUtil.cloneValue(kv));
assertArrayEquals(fam1, CellUtil.cloneFamily(kv));
assertArrayEquals(qual1, CellUtil.cloneQualifier(kv));
assertArrayEquals(row, CellUtil.cloneRow(kv));
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例13: testDeleteExpiredStoreFiles
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
@Test
public void testDeleteExpiredStoreFiles() throws Exception {
int storeFileNum = 4;
int ttl = 4;
IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
EnvironmentEdgeManagerTestHelper.injectEdge(edge);
Configuration conf = HBaseConfiguration.create();
// Enable the expired store file deletion
conf.setBoolean("hbase.store.delete.expired.storefile", true);
HColumnDescriptor hcd = new HColumnDescriptor(family);
hcd.setTimeToLive(ttl);
init(name.getMethodName(), conf, hcd);
long sleepTime = this.store.getScanInfo().getTtl() / storeFileNum;
long timeStamp;
// There are 4 store files and the max time stamp difference among these
// store files will be (this.store.ttl / storeFileNum)
for (int i = 1; i <= storeFileNum; i++) {
LOG.info("Adding some data for the store file #" + i);
timeStamp = EnvironmentEdgeManager.currentTimeMillis();
this.store.add(new KeyValue(row, family, qf1, timeStamp, (byte[]) null));
this.store.add(new KeyValue(row, family, qf2, timeStamp, (byte[]) null));
this.store.add(new KeyValue(row, family, qf3, timeStamp, (byte[]) null));
flush(i);
edge.incrementTime(sleepTime);
}
// Verify the total number of store files
Assert.assertEquals(storeFileNum, this.store.getStorefiles().size());
// Each compaction request will find one expired store file and delete it
// by the compaction.
for (int i = 1; i <= storeFileNum; i++) {
// verify the expired store file.
CompactionContext compaction = this.store.requestCompaction();
CompactionRequest cr = compaction.getRequest();
// the first is expired normally.
// If not the first compaction, there is another empty store file,
List<StoreFile> files = new ArrayList<StoreFile>(cr.getFiles());
Assert.assertEquals(Math.min(i, 2), cr.getFiles().size());
for (int j = 0; j < files.size(); j++) {
Assert.assertTrue(files.get(j).getReader().getMaxTimestamp() < (edge
.currentTimeMillis() - this.store.getScanInfo().getTtl()));
}
// Verify that the expired store file is compacted to an empty store file.
// Default compaction policy creates just one and only one compacted file.
StoreFile compactedFile = this.store.compact(compaction).get(0);
// It is an empty store file.
Assert.assertEquals(0, compactedFile.getReader().getEntries());
// Let the next store file expired.
edge.incrementTime(sleepTime);
}
}
示例14: testDelete_mixed
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
@Test
public void testDelete_mixed() throws IOException, InterruptedException {
byte[] fam = Bytes.toBytes("info");
byte[][] families = { fam };
this.region = initHRegion(tableName, method, CONF, families);
try {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
byte[] row = Bytes.toBytes("table_name");
// column names
byte[] serverinfo = Bytes.toBytes("serverinfo");
byte[] splitA = Bytes.toBytes("splitA");
byte[] splitB = Bytes.toBytes("splitB");
// add some data:
Put put = new Put(row);
put.addColumn(fam, splitA, Bytes.toBytes("reference_A"));
region.put(put);
put = new Put(row);
put.addColumn(fam, splitB, Bytes.toBytes("reference_B"));
region.put(put);
put = new Put(row);
put.addColumn(fam, serverinfo, Bytes.toBytes("ip_address"));
region.put(put);
// ok now delete a split:
Delete delete = new Delete(row);
delete.addColumns(fam, splitA);
region.delete(delete);
// assert some things:
Get get = new Get(row).addColumn(fam, serverinfo);
Result result = region.get(get);
assertEquals(1, result.size());
get = new Get(row).addColumn(fam, splitA);
result = region.get(get);
assertEquals(0, result.size());
get = new Get(row).addColumn(fam, splitB);
result = region.get(get);
assertEquals(1, result.size());
// Assert that after a delete, I can put.
put = new Put(row);
put.addColumn(fam, splitA, Bytes.toBytes("reference_A"));
region.put(put);
get = new Get(row);
result = region.get(get);
assertEquals(3, result.size());
// Now delete all... then test I can add stuff back
delete = new Delete(row);
region.delete(delete);
assertEquals(0, region.get(get).size());
region.put(new Put(row).addColumn(fam, splitA, Bytes.toBytes("reference_A")));
result = region.get(get);
assertEquals(1, result.size());
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
}
}
示例15: doTestDelete_AndPostInsert
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; //导入依赖的package包/类
public void doTestDelete_AndPostInsert(Delete delete) throws IOException, InterruptedException {
this.region = initHRegion(tableName, method, CONF, fam1);
try {
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
Put put = new Put(row);
put.addColumn(fam1, qual1, value1);
region.put(put);
// now delete the value:
region.delete(delete);
// ok put data:
put = new Put(row);
put.addColumn(fam1, qual1, value2);
region.put(put);
// ok get:
Get get = new Get(row);
get.addColumn(fam1, qual1);
Result r = region.get(get);
assertEquals(1, r.size());
assertArrayEquals(value2, r.getValue(fam1, qual1));
// next:
Scan scan = new Scan(row);
scan.addColumn(fam1, qual1);
InternalScanner s = region.getScanner(scan);
List<Cell> results = new ArrayList<>();
assertEquals(false, s.next(results));
assertEquals(1, results.size());
Cell kv = results.get(0);
assertArrayEquals(value2, CellUtil.cloneValue(kv));
assertArrayEquals(fam1, CellUtil.cloneFamily(kv));
assertArrayEquals(qual1, CellUtil.cloneQualifier(kv));
assertArrayEquals(row, CellUtil.cloneRow(kv));
} finally {
HBaseTestingUtility.closeRegionAndWAL(this.region);
this.region = null;
}
}