本文整理汇总了Java中org.apache.hadoop.hbase.wal.WAL类的典型用法代码示例。如果您正苦于以下问题:Java WAL类的具体用法?Java WAL怎么用?Java WAL使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
WAL类属于org.apache.hadoop.hbase.wal包,在下文中一共展示了WAL类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addWALEdits
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
private void addWALEdits(final TableName tableName, final HRegionInfo hri, final byte[] rowName,
final byte[] family, final int count, EnvironmentEdge ee, final WAL wal,
final HTableDescriptor htd, final MultiVersionConcurrencyControl mvcc) throws IOException {
String familyStr = Bytes.toString(family);
long txid = -1;
for (int j = 0; j < count; j++) {
byte[] qualifierBytes = Bytes.toBytes(Integer.toString(j));
byte[] columnBytes = Bytes.toBytes(familyStr + ":" + Integer.toString(j));
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes));
// uses WALKey instead of HLogKey on purpose. will only work for tests where we don't care
// about legacy coprocessors
txid = wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName,
ee.currentTime(), mvcc), edit, true);
}
if (-1 != txid) {
wal.sync(txid);
}
}
示例2: addWALEdits
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
private void addWALEdits(final TableName tableName, final HRegionInfo hri, final byte[] rowName,
final byte[] family, final int count, EnvironmentEdge ee, final WAL wal,
final HTableDescriptor htd, final MultiVersionConcurrencyControl mvcc)
throws IOException {
String familyStr = Bytes.toString(family);
for (int j = 0; j < count; j++) {
byte[] qualifierBytes = Bytes.toBytes(Integer.toString(j));
byte[] columnBytes = Bytes.toBytes(familyStr + ":" + Integer.toString(j));
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, qualifierBytes,
ee.currentTime(), columnBytes));
wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName,999, mvcc),
edit, true);
}
wal.sync();
}
示例3: getWAL
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
@Override public WAL getWAL(HRegionInfo regionInfo) throws IOException {
WAL wal;
LogRoller roller = walRoller;
//_ROOT_ and hbase:meta regions have separate WAL.
if (regionInfo != null && regionInfo.isMetaTable()
&& regionInfo.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
roller = ensureMetaWALRoller();
wal = walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes());
} else if (regionInfo == null) {
wal = walFactory.getWAL(UNSPECIFIED_REGION);
} else {
wal = walFactory.getWAL(regionInfo.getEncodedNameAsBytes());
}
roller.addWAL(wal);
return wal;
}
示例4: writeMarker
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
private static long writeMarker(final WAL wal, final HTableDescriptor htd, final HRegionInfo hri,
final WALEdit edit, final MultiVersionConcurrencyControl mvcc, final boolean sync)
throws IOException {
// TODO: Pass in current time to use?
WALKey key =
new HLogKey(hri.getEncodedNameAsBytes(), hri.getTable(), System.currentTimeMillis(), mvcc);
// Add it to the log but the false specifies that we don't need to add it to the memstore
long trx = MultiVersionConcurrencyControl.NONE;
try {
trx = wal.append(htd, hri, key, edit, false);
if (sync) wal.sync(trx);
} finally {
// If you get hung here, is it a real WAL or a mocked WAL? If the latter, you need to
// trip the latch that is inside in getWriteEntry up in your mock. See down in the append
// called from onEvent in FSHLog.
MultiVersionConcurrencyControl.WriteEntry we = key.getWriteEntry();
if (mvcc != null && we != null) mvcc.complete(we);
}
return trx;
}
示例5: createHRegion
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
/**
* Convenience method creating new HRegions. Used by createTable. The {@link WAL} for the created
* region needs to be closed explicitly, if it is not null. Use {@link HRegion#getWAL()} to get
* access.
*
* @param info Info for region to create.
* @param rootDir Root directory for HBase instance
* @param tableDir table directory
* @param wal shared WAL
* @param initialize - true to initialize the region
* @param ignoreWAL - true to skip generate new wal if it is null, mostly for createTable
* @return new HRegion
* @throws IOException
*/
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
final Path tableDir, final Configuration conf, final HTableDescriptor hTableDescriptor,
final WAL wal, final boolean initialize, final boolean ignoreWAL) throws IOException {
LOG.info("creating HRegion " + info.getTable().getNameAsString() + " HTD == " + hTableDescriptor
+ " RootDir = " + rootDir + " Table name == " + info.getTable().getNameAsString());
FileSystem fs = FileSystem.get(conf);
HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, info);
WAL effectiveWAL = wal;
if (wal == null && !ignoreWAL) {
// TODO HBASE-11983 There'll be no roller for this wal?
// The WAL subsystem will use the default rootDir rather than the passed
// in rootDir
// unless I pass along via the conf.
Configuration confForWAL = new Configuration(conf);
confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
effectiveWAL = (new WALFactory(confForWAL,
Collections.<WALActionsListener>singletonList(new MetricsWAL()),
"hregion-" + RandomStringUtils.randomNumeric(8))).getWAL(info.getEncodedNameAsBytes());
}
HRegion region =
HRegion.newHRegion(tableDir, effectiveWAL, fs, conf, info, hTableDescriptor, null);
if (initialize) region.initialize(null);
return region;
}
示例6: warmupHRegion
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
public static void warmupHRegion(final HRegionInfo info, final HTableDescriptor htd,
final WAL wal, final Configuration conf, final RegionServerServices rsServices,
final CancelableProgressable reporter) throws IOException {
if (info == null) throw new NullPointerException("Passed region info is null");
if (LOG.isDebugEnabled()) {
LOG.debug("HRegion.Warming up region: " + info);
}
Path rootDir = FSUtils.getRootDir(conf);
Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());
FileSystem fs = null;
if (rsServices != null) {
fs = rsServices.getFileSystem();
}
if (fs == null) {
fs = FileSystem.get(conf);
}
HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, null);
r.initializeWarmup(reporter);
}
示例7: testReplayingFlushRequestRestoresReadsEnabledState
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
/**
* Test the case where the secondary region replica is not in reads enabled state because it is
* waiting for a flush or region open marker from primary region. Replaying CANNOT_FLUSH
* flush marker entry should restore the reads enabled status in the region and allow the reads
* to continue.
*/
@Test
public void testReplayingFlushRequestRestoresReadsEnabledState() throws IOException {
disableReads(secondaryRegion);
// Test case 1: Test that replaying CANNOT_FLUSH request marker assuming this came from
// triggered flush restores readsEnabled
primaryRegion.flushcache(true, true);
reader = createWALReaderForPrimary();
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flush != null) {
secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getLogSeqNum());
}
}
// now reads should be enabled
secondaryRegion.get(new Get(Bytes.toBytes(0)));
}
示例8: writeFlushRequestMarkerToWAL
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
/**
* Writes a marker to WAL indicating a flush is requested but cannot be complete due to various
* reasons. Ignores exceptions from WAL. Returns whether the write succeeded.
*
* @param wal
* @return whether WAL write was successful
*/
private boolean writeFlushRequestMarkerToWAL(WAL wal, boolean writeFlushWalMarker) {
if (writeFlushWalMarker && wal != null && !writestate.readOnly) {
FlushDescriptor desc = ProtobufUtil
.toFlushDescriptor(FlushAction.CANNOT_FLUSH, getRegionInfo(), -1,
new TreeMap<byte[], List<Path>>(Bytes.BYTES_COMPARATOR));
try {
WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), desc, true, mvcc);
return true;
} catch (IOException e) {
LOG.warn(getRegionInfo().getEncodedName() + " : "
+ "Received exception while trying to write the flush request to wal", e);
}
}
return false;
}
示例9: getNextSequenceId
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
/**
* Method to safely get the next sequence number.
*
* @return Next sequence number unassociated with any actual edit.
* @throws IOException
*/
@VisibleForTesting protected long getNextSequenceId(final WAL wal) throws IOException {
// TODO: For review. Putting an empty edit in to get a sequenceid out will
// not work if the
// WAL is banjaxed... if it has gotten an exception and the WAL has not yet
// been rolled or
// aborted. In this case, we'll just get stuck here. For now, until
// HBASE-12751, just have
// a timeout. May happen in tests after we tightened the semantic via
// HBASE-14317.
// Also, the getSequenceId blocks on a latch. There is no global list of
// outstanding latches
// so if an abort or stop, there is no way to call them in.
WALKey key = this.appendEmptyEdit(wal);
mvcc.complete(key.getWriteEntry());
return key.getSequenceId(this.maxWaitForSeqId);
}
示例10: appendEmptyEdit
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
/**
* Append a faked WALEdit in order to get a long sequence number and wal syncer will just ignore
* the WALEdit append later.
*
* @param wal
* @return Return the key used appending with no sync and no append.
* @throws IOException
*/
private WALKey appendEmptyEdit(final WAL wal) throws IOException {
// we use HLogKey here instead of WALKey directly to support legacy
// coprocessors.
@SuppressWarnings("deprecation") WALKey key =
new HLogKey(getRegionInfo().getEncodedNameAsBytes(), getRegionInfo().getTable(),
WALKey.NO_SEQUENCE_ID, 0, null, HConstants.NO_NONCE, HConstants.NO_NONCE, getMVCC());
// Call append but with an empty WALEdit. The returned sequence id will not
// be associated
// with any edit and we can be sure it went in after all outstanding
// appends.
try {
wal.append(getTableDesc(), getRegionInfo(), key, WALEdit.EMPTY_WALEDIT, false);
} catch (Throwable t) {
// If exception, our mvcc won't get cleaned up by client, so do it here.
getMVCC().complete(key.getWriteEntry());
}
return key;
}
示例11: mergeAndVerify
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
private HRegion mergeAndVerify(final String msg, final String regionName1,
final String regionName2, final WAL log, final int upperbound)
throws Exception {
Merge merger = new Merge(this.conf);
LOG.info(msg);
LOG.info("fs2=" + this.conf.get("fs.defaultFS"));
int errCode = ToolRunner.run(this.conf, merger,
new String[] {this.desc.getTableName().getNameAsString(), regionName1, regionName2}
);
assertTrue("'" + msg + "' failed with errCode " + errCode, errCode == 0);
HRegionInfo mergedInfo = merger.getMergedHRegionInfo();
// Now verify that we can read all the rows from regions 0, 1
// in the new merged region.
HRegion merged = HRegion.openHRegion(mergedInfo, this.desc, log, this.conf);
verifyMerge(merged, upperbound);
merged.close();
LOG.info("Verified " + msg);
return merged;
}
示例12: mockWAL
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
/**
* Utility method to setup a WAL mock.
* Needs to do the bit where we close latch on the WALKey on append else test hangs.
* @return
* @throws IOException
*/
private WAL mockWAL() throws IOException {
WAL wal = mock(WAL.class);
Mockito.when(wal.append((HTableDescriptor)Mockito.any(), (HRegionInfo)Mockito.any(),
(WALKey)Mockito.any(), (WALEdit)Mockito.any(), Mockito.anyBoolean())).
thenAnswer(new Answer<Long>() {
@Override
public Long answer(InvocationOnMock invocation) throws Throwable {
WALKey key = invocation.getArgumentAt(2, WALKey.class);
MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin();
key.setWriteEntry(we);
return 1L;
}
});
return wal;
}
示例13: testAtomicBulkLoad
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
/**
* Atomic bulk load.
*/
@Test
public void testAtomicBulkLoad() throws Exception {
TableName TABLE_NAME = TableName.valueOf("atomicBulkLoad");
int millisToRun = 30000;
int numScanners = 50;
UTIL.startMiniCluster(1);
try {
WAL log = UTIL.getHBaseCluster().getRegionServer(0).getWAL(null);
FindBulkHBaseListener listener = new FindBulkHBaseListener();
log.registerWALActionsListener(listener);
runAtomicBulkloadTest(TABLE_NAME, millisToRun, numScanners);
assertThat(listener.isFound(), is(true));
} finally {
UTIL.shutdownMiniCluster();
}
}
示例14: addEdits
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
protected void addEdits(WAL log,
HRegionInfo hri,
HTableDescriptor htd,
int times,
MultiVersionConcurrencyControl mvcc)
throws IOException {
final byte[] row = Bytes.toBytes("row");
for (int i = 0; i < times; i++) {
long timestamp = System.currentTimeMillis();
WALEdit cols = new WALEdit();
cols.add(new KeyValue(row, row, row, timestamp, row));
WALKey key = new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(),
WALKey.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE,
HConstants.NO_NONCE, mvcc);
log.append(htd, hri, key, cols, true);
}
log.sync();
}
示例15: testNoEdits
import org.apache.hadoop.hbase.wal.WAL; //导入依赖的package包/类
/**
* Tests that the LogRoller perform the roll even if there are no edits
*/
@Test
public void testNoEdits() throws Exception {
TableName tableName = TableName.valueOf("TestLogRollPeriodNoEdits");
TEST_UTIL.createTable(tableName, "cf");
try {
Table table = new HTable(TEST_UTIL.getConfiguration(), tableName);
try {
HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
WAL log = server.getWAL(null);
checkMinLogRolls(log, 5);
} finally {
table.close();
}
} finally {
TEST_UTIL.deleteTable(tableName);
}
}