本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.wal.WALEdit.getFlushDescriptor方法的典型用法代码示例。如果您正苦于以下问题:Java WALEdit.getFlushDescriptor方法的具体用法?Java WALEdit.getFlushDescriptor怎么用?Java WALEdit.getFlushDescriptor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.regionserver.wal.WALEdit
的用法示例。
在下文中一共展示了WALEdit.getFlushDescriptor方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: matches
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入方法依赖的package包/类
@Override
public boolean matches(Object edit) {
List<Cell> cells = ((WALEdit)edit).getCells();
if (cells.isEmpty()) {
return false;
}
if (WALEdit.isMetaEditFamily(cells.get(0))) {
FlushDescriptor desc = null;
try {
desc = WALEdit.getFlushDescriptor(cells.get(0));
} catch (IOException e) {
LOG.warn(e);
return false;
}
if (desc != null) {
for (FlushAction action : actions) {
if (desc.getAction() == action) {
return true;
}
}
}
}
return false;
}
示例2: testReplayingFlushRequestRestoresReadsEnabledState
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入方法依赖的package包/类
/**
* Test the case where the secondary region replica is not in reads enabled state because it is
* waiting for a flush or region open marker from primary region. Replaying CANNOT_FLUSH
* flush marker entry should restore the reads enabled status in the region and allow the reads
* to continue.
*/
@Test
public void testReplayingFlushRequestRestoresReadsEnabledState() throws IOException {
disableReads(secondaryRegion);
// Test case 1: Test that replaying CANNOT_FLUSH request marker assuming this came from
// triggered flush restores readsEnabled
primaryRegion.flushcache(true, true);
reader = createWALReaderForPrimary();
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flush != null) {
secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getLogSeqNum());
}
}
// now reads should be enabled
secondaryRegion.get(new Get(Bytes.toBytes(0)));
}
示例3: testOnlyReplayingFlushStartDoesNotHoldUpRegionClose
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入方法依赖的package包/类
/**
* Tests a case where we replay only a flush start marker, then the region is closed. This region
* should not block indefinitely
*/
@Test (timeout = 60000)
public void testOnlyReplayingFlushStartDoesNotHoldUpRegionClose() throws IOException {
// load some data to primary and flush
int start = 0;
LOG.info("-- Writing some data to primary from " + start + " to " + (start+100));
putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families);
LOG.info("-- Flushing primary, creating 3 files for 3 stores");
primaryRegion.flush(true);
// now replay the edits and the flush marker
reader = createWALReaderForPrimary();
LOG.info("-- Replaying edits and flush events in secondary");
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flushDesc
= WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flushDesc != null) {
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
LOG.info("-- Replaying flush start in secondary");
PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(flushDesc);
} else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
LOG.info("-- NOT Replaying flush commit in secondary");
}
} else {
replayEdit(secondaryRegion, entry);
}
}
assertTrue(rss.getRegionServerAccounting().getGlobalMemstoreSize() > 0);
// now close the region which should not cause hold because of un-committed flush
secondaryRegion.close();
// verify that the memstore size is back to what it was
assertEquals(0, rss.getRegionServerAccounting().getGlobalMemstoreSize());
}
示例4: testWriteFlushRequestMarker
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入方法依赖的package包/类
/**
* Tests the case where a request for flush cache is sent to the region, but region cannot flush.
* It should write the flush request marker instead.
*/
@Test
public void testWriteFlushRequestMarker() throws IOException {
// primary region is empty at this point. Request a flush with writeFlushRequestWalMarker=false
FlushResultImpl result = (FlushResultImpl)((HRegion)primaryRegion).flushcache(true, false);
assertNotNull(result);
assertEquals(result.result, FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY);
assertFalse(result.wroteFlushWalMarker);
// request flush again, but this time with writeFlushRequestWalMarker = true
result = (FlushResultImpl)((HRegion)primaryRegion).flushcache(true, true);
assertNotNull(result);
assertEquals(result.result, FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY);
assertTrue(result.wroteFlushWalMarker);
List<FlushDescriptor> flushes = Lists.newArrayList();
reader = createWALReaderForPrimary();
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flush != null) {
flushes.add(flush);
}
}
assertEquals(1, flushes.size());
assertNotNull(flushes.get(0));
assertEquals(FlushDescriptor.FlushAction.CANNOT_FLUSH, flushes.get(0).getAction());
}
示例5: doReplayBatchOp
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入方法依赖的package包/类
/**
* Execute a list of Put/Delete mutations. The function returns OperationStatus instead of
* constructing MultiResponse to save a possible loop if caller doesn't need MultiResponse.
* @param region
* @param mutations
* @param replaySeqId
* @return an array of OperationStatus which internally contains the OperationStatusCode and the
* exceptionMessage if any
* @throws IOException
*/
private OperationStatus [] doReplayBatchOp(final Region region,
final List<WALSplitter.MutationReplay> mutations, long replaySeqId) throws IOException {
long before = EnvironmentEdgeManager.currentTime();
boolean batchContainsPuts = false, batchContainsDelete = false;
try {
for (Iterator<WALSplitter.MutationReplay> it = mutations.iterator(); it.hasNext();) {
WALSplitter.MutationReplay m = it.next();
if (m.type == MutationType.PUT) {
batchContainsPuts = true;
} else {
batchContainsDelete = true;
}
NavigableMap<byte[], List<Cell>> map = m.mutation.getFamilyCellMap();
List<Cell> metaCells = map.get(WALEdit.METAFAMILY);
if (metaCells != null && !metaCells.isEmpty()) {
for (Cell metaCell : metaCells) {
CompactionDescriptor compactionDesc = WALEdit.getCompaction(metaCell);
boolean isDefaultReplica = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
HRegion hRegion = (HRegion)region;
if (compactionDesc != null) {
// replay the compaction. Remove the files from stores only if we are the primary
// region replica (thus own the files)
hRegion.replayWALCompactionMarker(compactionDesc, !isDefaultReplica, isDefaultReplica,
replaySeqId);
continue;
}
FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(metaCell);
if (flushDesc != null && !isDefaultReplica) {
hRegion.replayWALFlushMarker(flushDesc, replaySeqId);
continue;
}
RegionEventDescriptor regionEvent = WALEdit.getRegionEventDescriptor(metaCell);
if (regionEvent != null && !isDefaultReplica) {
hRegion.replayWALRegionEventMarker(regionEvent);
continue;
}
BulkLoadDescriptor bulkLoadEvent = WALEdit.getBulkLoadDescriptor(metaCell);
if (bulkLoadEvent != null) {
hRegion.replayWALBulkLoadEventMarker(bulkLoadEvent);
continue;
}
}
it.remove();
}
}
requestCount.add(mutations.size());
if (!region.getRegionInfo().isMetaTable()) {
regionServer.cacheFlusher.reclaimMemStoreMemory();
}
return region.batchReplay(mutations.toArray(
new WALSplitter.MutationReplay[mutations.size()]), replaySeqId);
} finally {
if (regionServer.metricsRegionServer != null) {
long after = EnvironmentEdgeManager.currentTime();
if (batchContainsPuts) {
regionServer.metricsRegionServer.updatePut(after - before);
}
if (batchContainsDelete) {
regionServer.metricsRegionServer.updateDelete(after - before);
}
}
}
}
示例6: testReplayFlushSeqIds
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入方法依赖的package包/类
@Test
public void testReplayFlushSeqIds() throws IOException {
// load some data to primary and flush
int start = 0;
LOG.info("-- Writing some data to primary from " + start + " to " + (start+100));
putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families);
LOG.info("-- Flushing primary, creating 3 files for 3 stores");
primaryRegion.flush(true);
// now replay the flush marker
reader = createWALReaderForPrimary();
long flushSeqId = -1;
LOG.info("-- Replaying flush events in secondary");
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flushDesc
= WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flushDesc != null) {
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
LOG.info("-- Replaying flush start in secondary");
secondaryRegion.replayWALFlushStartMarker(flushDesc);
flushSeqId = flushDesc.getFlushSequenceNumber();
} else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
LOG.info("-- Replaying flush commit in secondary");
secondaryRegion.replayWALFlushCommitMarker(flushDesc);
assertEquals(flushSeqId, flushDesc.getFlushSequenceNumber());
}
}
// else do not replay
}
// TODO: what to do with this?
// assert that the newly picked up flush file is visible
long readPoint = secondaryRegion.getMVCC().getReadPoint();
assertEquals(flushSeqId, readPoint);
// after replay verify that everything is still visible
verifyData(secondaryRegion, 0, 100, cq, families);
}