本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.wal.HLogKey类的典型用法代码示例。如果您正苦于以下问题:Java HLogKey类的具体用法?Java HLogKey怎么用?Java HLogKey使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HLogKey类属于org.apache.hadoop.hbase.regionserver.wal包,在下文中一共展示了HLogKey类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getCurrentKey
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
@Override
public HLogKey getCurrentKey() throws IOException, InterruptedException {
if (!(currentEntry.getKey() instanceof HLogKey)) {
final IllegalStateException exception = new IllegalStateException(
"HLogInputFormat only works when given entries that have HLogKey for keys. This" +
" one had '" + currentEntry.getKey().getClass() + "'");
LOG.error("The deprecated HLogInputFormat has to work with the deprecated HLogKey class, " +
" but HBase internals read the wal entry using some other class." +
" This is a bug; please file an issue or email the developer mailing list. It is " +
"likely that you would not have this problem if you updated to use WALInputFormat. " +
"You will need the following exception details when seeking help from the HBase " +
"community.",
exception);
throw exception;
}
return (HLogKey)currentEntry.getKey();
}
示例2: appendEmptyEdit
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
/**
* Append a faked WALEdit in order to get a long sequence number and wal syncer will just ignore
* the WALEdit append later.
*
* @param wal
* @return Return the key used appending with no sync and no append.
* @throws IOException
*/
private WALKey appendEmptyEdit(final WAL wal) throws IOException {
// we use HLogKey here instead of WALKey directly to support legacy
// coprocessors.
@SuppressWarnings("deprecation") WALKey key =
new HLogKey(getRegionInfo().getEncodedNameAsBytes(), getRegionInfo().getTable(),
WALKey.NO_SEQUENCE_ID, 0, null, HConstants.NO_NONCE, HConstants.NO_NONCE, getMVCC());
// Call append but with an empty WALEdit. The returned sequence id will not
// be associated
// with any edit and we can be sure it went in after all outstanding
// appends.
try {
wal.append(getTableDesc(), getRegionInfo(), key, WALEdit.EMPTY_WALEDIT, false);
} catch (Throwable t) {
// If exception, our mvcc won't get cleaned up by client, so do it here.
getMVCC().complete(key.getWriteEntry());
}
return key;
}
示例3: preWALRestore
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
/**
* @param info
* @param logKey
* @param logEdit
* @return true if default behavior should be bypassed, false otherwise
* @throws IOException
*/
public boolean preWALRestore(final HRegionInfo info, final WALKey logKey,
final WALEdit logEdit) throws IOException {
return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
// Once we don't need to support the legacy call, replace RegionOperation with a version
// that's ObserverContext<RegionEnvironment> and avoid this cast.
final RegionEnvironment env = (RegionEnvironment)ctx.getEnvironment();
if (env.useLegacyPre) {
if (logKey instanceof HLogKey) {
oserver.preWALRestore(ctx, info, (HLogKey)logKey, logEdit);
} else {
legacyWarning(oserver.getClass(), "There are wal keys present that are not HLogKey.");
}
} else {
oserver.preWALRestore(ctx, info, logKey, logEdit);
}
}
});
}
示例4: postWALRestore
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
/**
* @param info
* @param logKey
* @param logEdit
* @throws IOException
*/
public void postWALRestore(final HRegionInfo info, final WALKey logKey, final WALEdit logEdit)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
// Once we don't need to support the legacy call, replace RegionOperation with a version
// that's ObserverContext<RegionEnvironment> and avoid this cast.
final RegionEnvironment env = (RegionEnvironment)ctx.getEnvironment();
if (env.useLegacyPost) {
if (logKey instanceof HLogKey) {
oserver.postWALRestore(ctx, info, (HLogKey)logKey, logEdit);
} else {
legacyWarning(oserver.getClass(), "There are wal keys present that are not HLogKey.");
}
} else {
oserver.postWALRestore(ctx, info, logKey, logEdit);
}
}
});
}
示例5: map
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
@Override
public void map(HLogKey key, WALEdit value,
Context context)
throws IOException {
try {
// skip all other tables
if (Bytes.equals(table, key.getTablename())) {
for (KeyValue kv : value.getKeyValues()) {
if (HLog.isMetaFamily(kv.getFamily())) continue;
context.write(new ImmutableBytesWritable(kv.getRow()), kv);
}
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
示例6: preWALRestore
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
/**
* @param info
* @param logKey
* @param logEdit
* @return true if default behavior should be bypassed, false otherwise
* @throws IOException
*/
public boolean preWALRestore(HRegionInfo info, HLogKey logKey,
WALEdit logEdit) throws IOException {
boolean bypass = false;
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((RegionObserver)env.getInstance()).preWALRestore(ctx, info, logKey,
logEdit);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
bypass |= ctx.shouldBypass();
if (ctx.shouldComplete()) {
break;
}
}
}
return bypass;
}
示例7: postWALRestore
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
/**
* @param info
* @param logKey
* @param logEdit
* @throws IOException
*/
public void postWALRestore(HRegionInfo info, HLogKey logKey,
WALEdit logEdit) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((RegionObserver)env.getInstance()).postWALRestore(ctx, info,
logKey, logEdit);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例8: verifyRecoverEdits
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
private void verifyRecoverEdits(final Path tableDir, final byte[] tableName,
final Map<byte[], byte[]> regionsMap) throws IOException {
for (FileStatus regionStatus: FSUtils.listStatus(fs, tableDir)) {
assertTrue(regionStatus.getPath().getName().startsWith(Bytes.toString(tableName)));
Path regionEdits = HLog.getRegionDirRecoveredEditsDir(regionStatus.getPath());
byte[] regionName = Bytes.toBytes(regionStatus.getPath().getName());
assertFalse(regionsMap.containsKey(regionName));
for (FileStatus logStatus: FSUtils.listStatus(fs, regionEdits)) {
HLog.Reader reader = HLog.getReader(fs, logStatus.getPath(), conf);
try {
HLog.Entry entry;
while ((entry = reader.next()) != null) {
HLogKey key = entry.getKey();
assertArrayEquals(tableName, key.getTablename());
assertArrayEquals(regionName, key.getEncodedRegionName());
}
} finally {
reader.close();
}
}
}
}
示例9: writeTestLog
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
private void writeTestLog(final Path logFile) throws IOException {
fs.mkdirs(logFile.getParent());
HLog.Writer writer = HLog.createWriter(fs, logFile, conf);
try {
for (int i = 0; i < 7; ++i) {
byte[] tableName = getTableName(i);
for (int j = 0; j < 10; ++j) {
byte[] regionName = getRegionName(tableName, j);
for (int k = 0; k < 50; ++k) {
byte[] rowkey = Bytes.toBytes("row-" + k);
HLogKey key = new HLogKey(regionName, tableName, (long)k,
System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowkey, TEST_FAMILY, TEST_QUALIFIER, rowkey));
writer.append(new HLog.Entry(key, edit));
}
}
}
} finally {
writer.close();
}
}
示例10: scopeWALEdits
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
/**
* Utility method used to set the correct scopes on each log key. Doesn't set a scope on keys
* from compaction WAL edits and if the scope is local.
* @param htd Descriptor used to find the scope to use
* @param logKey Key that may get scoped according to its edits
* @param logEdit Edits used to lookup the scopes
*/
public static void scopeWALEdits(HTableDescriptor htd, HLogKey logKey,
WALEdit logEdit) {
NavigableMap<byte[], Integer> scopes =
new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
byte[] family;
for (KeyValue kv : logEdit.getKeyValues()) {
family = kv.getFamily();
// This is expected and the KV should not be replicated
if (kv.matchingFamily(WALEdit.METAFAMILY)) continue;
// Unexpected, has a tendency to happen in unit tests
assert htd.getFamily(family) != null;
int scope = htd.getFamily(family).getScope();
if (scope != REPLICATION_SCOPE_LOCAL &&
!scopes.containsKey(family)) {
scopes.put(family, scope);
}
}
if (!scopes.isEmpty()) {
logKey.setScopes(scopes);
}
}
示例11: map
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
@Override
public void map(HLogKey key, WALEdit value,
Context context)
throws IOException {
try {
// skip all other tables
if (Bytes.equals(table, key.getTablename().getName())) {
for (KeyValue kv : value.getKeyValues()) {
if (WALEdit.isMetaEditFamily(kv.getFamily())) continue;
context.write(new ImmutableBytesWritable(kv.getRow()), kv);
}
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
示例12: preWALRestore
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
/**
* @param info
* @param logKey
* @param logEdit
* @return true if default behavior should be bypassed, false otherwise
* @throws IOException
*/
public boolean preWALRestore(final HRegionInfo info, final HLogKey logKey,
final WALEdit logEdit) throws IOException {
boolean bypass = false;
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
((RegionObserver)env.getInstance()).preWALRestore(ctx, info, logKey, logEdit);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
bypass |= ctx.shouldBypass();
if (ctx.shouldComplete()) {
break;
}
}
}
return bypass;
}
示例13: postWALRestore
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
/**
* @param info
* @param logKey
* @param logEdit
* @throws IOException
*/
public void postWALRestore(final HRegionInfo info, final HLogKey logKey, final WALEdit logEdit)
throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
((RegionObserver)env.getInstance()).postWALRestore(ctx, info, logKey, logEdit);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例14: verifyRecoverEdits
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
private void verifyRecoverEdits(final Path tableDir, final TableName tableName,
final Map<byte[], byte[]> regionsMap) throws IOException {
for (FileStatus regionStatus: FSUtils.listStatus(fs, tableDir)) {
assertTrue(regionStatus.getPath().getName().startsWith(tableName.getNameAsString()));
Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionStatus.getPath());
byte[] regionName = Bytes.toBytes(regionStatus.getPath().getName());
assertFalse(regionsMap.containsKey(regionName));
for (FileStatus logStatus: FSUtils.listStatus(fs, regionEdits)) {
HLog.Reader reader = HLogFactory.createReader(fs, logStatus.getPath(), conf);
try {
HLog.Entry entry;
while ((entry = reader.next()) != null) {
HLogKey key = entry.getKey();
assertEquals(tableName, key.getTablename());
assertArrayEquals(regionName, key.getEncodedRegionName());
}
} finally {
reader.close();
}
}
}
}
示例15: writeTestLog
import org.apache.hadoop.hbase.regionserver.wal.HLogKey; //导入依赖的package包/类
private void writeTestLog(final Path logFile) throws IOException {
fs.mkdirs(logFile.getParent());
HLog.Writer writer = HLogFactory.createWALWriter(fs, logFile, conf);
try {
for (int i = 0; i < 7; ++i) {
TableName tableName = getTableName(i);
for (int j = 0; j < 10; ++j) {
byte[] regionName = getRegionName(tableName, j);
for (int k = 0; k < 50; ++k) {
byte[] rowkey = Bytes.toBytes("row-" + k);
HLogKey key = new HLogKey(regionName, tableName, (long)k,
System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowkey, TEST_FAMILY, TEST_QUALIFIER, rowkey));
writer.append(new HLog.Entry(key, edit));
}
}
}
} finally {
writer.close();
}
}