本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.wal.WALEdit类的典型用法代码示例。如果您正苦于以下问题:Java WALEdit类的具体用法?Java WALEdit怎么用?Java WALEdit使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
WALEdit类属于org.apache.hadoop.hbase.regionserver.wal包,在下文中一共展示了WALEdit类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: postProcess
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
@Override
public void postProcess(HRegion region, WALEdit walEdit, boolean success) throws IOException {
RegionCoprocessorHost coprocessorHost = region.getCoprocessorHost();
if (coprocessorHost != null) {
for (Mutation m : mutations) {
if (m instanceof Put) {
coprocessorHost.postPut((Put) m, walEdit, m.getDurability());
} else if (m instanceof Delete) {
coprocessorHost.postDelete((Delete) m, walEdit, m.getDurability());
}
}
// At the end call the CP hook postBatchMutateIndispensably
if (miniBatch != null) {
// Directly calling this hook, with out calling pre/postBatchMutate() when Processor do a
// read only process. Then no need to call this batch based CP hook also.
coprocessorHost.postBatchMutateIndispensably(miniBatch, success);
}
}
}
示例2: mockWAL
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
/**
* Utility method to setup a WAL mock.
* Needs to do the bit where we close latch on the WALKey on append else test hangs.
* @return
* @throws IOException
*/
private WAL mockWAL() throws IOException {
WAL wal = mock(WAL.class);
Mockito.when(wal.append((HTableDescriptor)Mockito.any(), (HRegionInfo)Mockito.any(),
(WALKey)Mockito.any(), (WALEdit)Mockito.any(), Mockito.anyBoolean())).
thenAnswer(new Answer<Long>() {
@Override
public Long answer(InvocationOnMock invocation) throws Throwable {
WALKey key = invocation.getArgumentAt(2, WALKey.class);
MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin();
key.setWriteEntry(we);
return 1L;
}
});
return wal;
}
示例3: map
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
@Override
public void map(WALKey key, WALEdit value,
Context context)
throws IOException {
try {
// skip all other tables
if (Bytes.equals(table, key.getTablename().getName())) {
for (Cell cell : value.getCells()) {
KeyValue kv = KeyValueUtil.ensureKeyValueTypeForMR(cell);
if (WALEdit.isMetaEditFamily(kv.getFamily())) continue;
context.write(new ImmutableBytesWritable(kv.getRow()), kv);
}
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
示例4: testReplayingFlushRequestRestoresReadsEnabledState
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
/**
* Test the case where the secondary region replica is not in reads enabled state because it is
* waiting for a flush or region open marker from primary region. Replaying CANNOT_FLUSH
* flush marker entry should restore the reads enabled status in the region and allow the reads
* to continue.
*/
@Test
public void testReplayingFlushRequestRestoresReadsEnabledState() throws IOException {
disableReads(secondaryRegion);
// Test case 1: Test that replaying CANNOT_FLUSH request marker assuming this came from
// triggered flush restores readsEnabled
primaryRegion.flushcache(true, true);
reader = createWALReaderForPrimary();
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flush != null) {
secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getLogSeqNum());
}
}
// now reads should be enabled
secondaryRegion.get(new Get(Bytes.toBytes(0)));
}
示例5: matchesSafely
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
@Override
protected boolean matchesSafely(WALEdit item) {
assertTrue(Arrays.equals(item.getCells().get(0).getQualifier(), typeBytes));
BulkLoadDescriptor desc;
try {
desc = WALEdit.getBulkLoadDescriptor(item.getCells().get(0));
} catch (IOException e) {
return false;
}
assertNotNull(desc);
if (tableName != null) {
assertTrue(Bytes.equals(ProtobufUtil.toTableName(desc.getTableName()).getName(),
tableName));
}
if(storeFileNames != null) {
int index=0;
StoreDescriptor store = desc.getStores(0);
assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), familyName));
assertTrue(Bytes.equals(Bytes.toBytes(store.getStoreHomeDir()), familyName));
assertEquals(storeFileNames.size(), store.getStoreFileCount());
}
return true;
}
示例6: appendEmptyEdit
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
/**
* Append a faked WALEdit in order to get a long sequence number and wal syncer will just ignore
* the WALEdit append later.
*
* @param wal
* @return Return the key used appending with no sync and no append.
* @throws IOException
*/
private WALKey appendEmptyEdit(final WAL wal) throws IOException {
// we use HLogKey here instead of WALKey directly to support legacy
// coprocessors.
@SuppressWarnings("deprecation") WALKey key =
new HLogKey(getRegionInfo().getEncodedNameAsBytes(), getRegionInfo().getTable(),
WALKey.NO_SEQUENCE_ID, 0, null, HConstants.NO_NONCE, HConstants.NO_NONCE, getMVCC());
// Call append but with an empty WALEdit. The returned sequence id will not
// be associated
// with any edit and we can be sure it went in after all outstanding
// appends.
try {
wal.append(getTableDesc(), getRegionInfo(), key, WALEdit.EMPTY_WALEDIT, false);
} catch (Throwable t) {
// If exception, our mvcc won't get cleaned up by client, so do it here.
getMVCC().complete(key.getWriteEntry());
}
return key;
}
示例7: preWALRestore
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
/**
* @param info
* @param logKey
* @param logEdit
* @return true if default behavior should be bypassed, false otherwise
* @throws IOException
*/
public boolean preWALRestore(final HRegionInfo info, final WALKey logKey,
final WALEdit logEdit) throws IOException {
return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
// Once we don't need to support the legacy call, replace RegionOperation with a version
// that's ObserverContext<RegionEnvironment> and avoid this cast.
final RegionEnvironment env = (RegionEnvironment)ctx.getEnvironment();
if (env.useLegacyPre) {
if (logKey instanceof HLogKey) {
oserver.preWALRestore(ctx, info, (HLogKey)logKey, logEdit);
} else {
legacyWarning(oserver.getClass(), "There are wal keys present that are not HLogKey.");
}
} else {
oserver.preWALRestore(ctx, info, logKey, logEdit);
}
}
});
}
示例8: postWALRestore
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
/**
* @param info
* @param logKey
* @param logEdit
* @throws IOException
*/
public void postWALRestore(final HRegionInfo info, final WALKey logKey, final WALEdit logEdit)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
// Once we don't need to support the legacy call, replace RegionOperation with a version
// that's ObserverContext<RegionEnvironment> and avoid this cast.
final RegionEnvironment env = (RegionEnvironment)ctx.getEnvironment();
if (env.useLegacyPost) {
if (logKey instanceof HLogKey) {
oserver.postWALRestore(ctx, info, (HLogKey)logKey, logEdit);
} else {
legacyWarning(oserver.getClass(), "There are wal keys present that are not HLogKey.");
}
} else {
oserver.postWALRestore(ctx, info, logKey, logEdit);
}
}
});
}
示例9: matches
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
@Override
public boolean matches(Object edit) {
List<Cell> cells = ((WALEdit)edit).getCells();
if (cells.isEmpty()) {
return false;
}
if (WALEdit.isMetaEditFamily(cells.get(0))) {
FlushDescriptor desc = null;
try {
desc = WALEdit.getFlushDescriptor(cells.get(0));
} catch (IOException e) {
LOG.warn(e);
return false;
}
if (desc != null) {
for (FlushAction action : actions) {
if (desc.getAction() == action) {
return true;
}
}
}
}
return false;
}
示例10: testWriteFlushRequestMarker
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
/**
* Tests the case where a request for flush cache is sent to the region, but region cannot flush.
* It should write the flush request marker instead.
*/
@Test
public void testWriteFlushRequestMarker() throws IOException {
// primary region is empty at this point. Request a flush with writeFlushRequestWalMarker=false
FlushResultImpl result = (FlushResultImpl)((HRegion)primaryRegion).flushcache(true, false);
assertNotNull(result);
assertEquals(result.result, FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY);
assertFalse(result.wroteFlushWalMarker);
// request flush again, but this time with writeFlushRequestWalMarker = true
result = (FlushResultImpl)((HRegion)primaryRegion).flushcache(true, true);
assertNotNull(result);
assertEquals(result.result, FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY);
assertTrue(result.wroteFlushWalMarker);
List<FlushDescriptor> flushes = Lists.newArrayList();
reader = createWALReaderForPrimary();
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flush != null) {
flushes.add(flush);
}
}
assertEquals(1, flushes.size());
assertNotNull(flushes.get(0));
assertEquals(FlushDescriptor.FlushAction.CANNOT_FLUSH, flushes.get(0).getAction());
}
示例11: countDistinctRowKeys
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
/**
* Count the number of different row keys in the given edit because of mini-batching. We assume
* that there's at least one Cell in the WALEdit.
* @param edit edit to count row keys from
* @return number of different row keys
*/
private int countDistinctRowKeys(WALEdit edit) {
List<Cell> cells = edit.getCells();
int distinctRowKeys = 1;
Cell lastCell = cells.get(0);
for (int i = 0; i < edit.size(); i++) {
if (!CellUtil.matchingRow(cells.get(i), lastCell)) {
distinctRowKeys++;
}
}
return distinctRowKeys;
}
示例12: prePut
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
@SuppressWarnings("null")
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit,
final Durability durability) {
String tableName =
c.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString();
if (tableName.equals("observed_table")) {
// Trigger a NPE to fail the coprocessor
Integer i = null;
i = i + 1;
}
}
示例13: prePut
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e,
final Put put, final WALEdit edit, final Durability durability)
throws IOException {
Map<byte[], List<Cell>> familyMap = put.getFamilyCellMap();
if (familyMap.containsKey(test)) {
e.bypass();
}
}
示例14: testOnlyReplayingFlushStartDoesNotHoldUpRegionClose
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
/**
* Tests a case where we replay only a flush start marker, then the region is closed. This region
* should not block indefinitely
*/
@Test (timeout = 60000)
public void testOnlyReplayingFlushStartDoesNotHoldUpRegionClose() throws IOException {
// load some data to primary and flush
int start = 0;
LOG.info("-- Writing some data to primary from " + start + " to " + (start+100));
putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families);
LOG.info("-- Flushing primary, creating 3 files for 3 stores");
primaryRegion.flush(true);
// now replay the edits and the flush marker
reader = createWALReaderForPrimary();
LOG.info("-- Replaying edits and flush events in secondary");
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flushDesc
= WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flushDesc != null) {
if (flushDesc.getAction() == FlushAction.START_FLUSH) {
LOG.info("-- Replaying flush start in secondary");
PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(flushDesc);
} else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
LOG.info("-- NOT Replaying flush commit in secondary");
}
} else {
replayEdit(secondaryRegion, entry);
}
}
assertTrue(rss.getRegionServerAccounting().getGlobalMemstoreSize() > 0);
// now close the region which should not cause hold because of un-committed flush
secondaryRegion.close();
// verify that the memstore size is back to what it was
assertEquals(0, rss.getRegionServerAccounting().getGlobalMemstoreSize());
}
示例15: postPut
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; //导入依赖的package包/类
@Override
public void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability) {
if (aclRegion) {
updateACL(c.getEnvironment(), put.getFamilyCellMap());
}
}