本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.wal.WALUtil类的典型用法代码示例。如果您正苦于以下问题:Java WALUtil类的具体用法?Java WALUtil怎么用?Java WALUtil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
WALUtil类属于org.apache.hadoop.hbase.regionserver.wal包,在下文中一共展示了WALUtil类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeRegionCloseMarker
import org.apache.hadoop.hbase.regionserver.wal.WALUtil; //导入依赖的package包/类
private void writeRegionCloseMarker(WAL wal) throws IOException {
Map<byte[], List<Path>> storeFiles = getStoreFiles();
RegionEventDescriptor regionEventDesc = ProtobufUtil
.toRegionEventDescriptor(RegionEventDescriptor.EventType.REGION_CLOSE, getRegionInfo(),
mvcc.getReadPoint(), getRegionServerServices().getServerName(), storeFiles);
WALUtil.writeRegionEventMarker(wal, getTableDesc(), getRegionInfo(), regionEventDesc, mvcc);
// Store SeqId in HDFS when a region closes
// checking region folder exists is due to many tests which delete the table
// folder while a
// table is still online
if (this.fs.getFileSystem().exists(this.fs.getRegionDir())) {
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), this.fs.getRegionDir(),
mvcc.getReadPoint(), 0);
}
}
示例2: writeFlushRequestMarkerToWAL
import org.apache.hadoop.hbase.regionserver.wal.WALUtil; //导入依赖的package包/类
/**
* Writes a marker to WAL indicating a flush is requested but cannot be complete due to various
* reasons. Ignores exceptions from WAL. Returns whether the write succeeded.
*
* @param wal
* @return whether WAL write was successful
*/
private boolean writeFlushRequestMarkerToWAL(WAL wal, boolean writeFlushWalMarker) {
if (writeFlushWalMarker && wal != null && !writestate.readOnly) {
FlushDescriptor desc = ProtobufUtil
.toFlushDescriptor(FlushAction.CANNOT_FLUSH, getRegionInfo(), -1,
new TreeMap<byte[], List<Path>>(Bytes.BYTES_COMPARATOR));
try {
WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), desc, true, mvcc);
return true;
} catch (IOException e) {
LOG.warn(getRegionInfo().getEncodedName() + " : "
+ "Received exception while trying to write the flush request to wal", e);
}
}
return false;
}
示例3: writeRegionOpenMarker
import org.apache.hadoop.hbase.regionserver.wal.WALUtil; //导入依赖的package包/类
private void writeRegionOpenMarker(WAL wal, long openSeqId) throws IOException {
Map<byte[], List<Path>> storeFiles
= new TreeMap<byte[], List<Path>>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], Store> entry : getStores().entrySet()) {
Store store = entry.getValue();
ArrayList<Path> storeFileNames = new ArrayList<Path>();
for (StoreFile storeFile : store.getStorefiles()) {
storeFileNames.add(storeFile.getPath());
}
storeFiles.put(entry.getKey(), storeFileNames);
}
RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor(
RegionEventDescriptor.EventType.REGION_OPEN, getRegionInfo(), openSeqId,
getRegionServerServices().getServerName(), storeFiles);
WALUtil.writeRegionEventMarker(wal, getTableDesc(), getRegionInfo(), regionOpenDesc,
getSequenceId());
}
示例4: writeRegionCloseMarker
import org.apache.hadoop.hbase.regionserver.wal.WALUtil; //导入依赖的package包/类
private void writeRegionCloseMarker(WAL wal) throws IOException {
Map<byte[], List<Path>> storeFiles
= new TreeMap<byte[], List<Path>>(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], Store> entry : getStores().entrySet()) {
Store store = entry.getValue();
ArrayList<Path> storeFileNames = new ArrayList<Path>();
for (StoreFile storeFile : store.getStorefiles()) {
storeFileNames.add(storeFile.getPath());
}
storeFiles.put(entry.getKey(), storeFileNames);
}
RegionEventDescriptor regionEventDesc = ProtobufUtil.toRegionEventDescriptor(
RegionEventDescriptor.EventType.REGION_CLOSE, getRegionInfo(), getSequenceId().get(),
getRegionServerServices().getServerName(), storeFiles);
WALUtil.writeRegionEventMarker(wal, getTableDesc(), getRegionInfo(), regionEventDesc,
getSequenceId());
// Store SeqId in HDFS when a region closes
// checking region folder exists is due to many tests which delete the table folder while a
// table is still online
if (this.fs.getFileSystem().exists(this.fs.getRegionDir())) {
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), this.fs.getRegionDir(),
getSequenceId().get(), 0);
}
}
示例5: writeRegionCloseMarker
import org.apache.hadoop.hbase.regionserver.wal.WALUtil; //导入依赖的package包/类
private void writeRegionCloseMarker(WAL wal) throws IOException {
Map<byte[], List<Path>> storeFiles = getStoreFiles();
RegionEventDescriptor regionEventDesc = ProtobufUtil.toRegionEventDescriptor(
RegionEventDescriptor.EventType.REGION_CLOSE, getRegionInfo(), mvcc.getReadPoint(),
getRegionServerServices().getServerName(), storeFiles);
WALUtil.writeRegionEventMarker(wal, getReplicationScope(), getRegionInfo(), regionEventDesc,
mvcc);
// Store SeqId in HDFS when a region closes
// checking region folder exists is due to many tests which delete the table folder while a
// table is still online
if (this.fs.getFileSystem().exists(this.fs.getRegionDir())) {
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), this.fs.getRegionDir(),
mvcc.getReadPoint(), 0);
}
}
示例6: doAbortFlushToWAL
import org.apache.hadoop.hbase.regionserver.wal.WALUtil; //导入依赖的package包/类
private void doAbortFlushToWAL(final WAL wal, final long flushOpSeqId,
final Map<byte[], List<Path>> committedFiles) {
if (wal == null) return;
try {
FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
getRegionInfo(), flushOpSeqId, committedFiles);
WALUtil.writeFlushMarker(wal, this.getReplicationScope(), getRegionInfo(), desc, false,
mvcc);
} catch (Throwable t) {
LOG.warn("Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" +
StringUtils.stringifyException(t));
// ignore this since we will be aborting the RS with DSE.
}
// we have called wal.startCacheFlush(), now we have to abort it
wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
}
示例7: writeFlushRequestMarkerToWAL
import org.apache.hadoop.hbase.regionserver.wal.WALUtil; //导入依赖的package包/类
/**
* Writes a marker to WAL indicating a flush is requested but cannot be complete due to various
* reasons. Ignores exceptions from WAL. Returns whether the write succeeded.
* @param wal
* @return whether WAL write was successful
*/
private boolean writeFlushRequestMarkerToWAL(WAL wal, boolean writeFlushWalMarker) {
if (writeFlushWalMarker && wal != null && !writestate.readOnly) {
FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.CANNOT_FLUSH,
getRegionInfo(), -1, new TreeMap<>(Bytes.BYTES_COMPARATOR));
try {
WALUtil.writeFlushMarker(wal, this.getReplicationScope(), getRegionInfo(), desc, true,
mvcc);
return true;
} catch (IOException e) {
LOG.warn(getRegionInfo().getEncodedName() + " : "
+ "Received exception while trying to write the flush request to wal", e);
}
}
return false;
}
示例8: writeCompactionWalRecord
import org.apache.hadoop.hbase.regionserver.wal.WALUtil; //导入依赖的package包/类
/**
* Writes the compaction WAL record.
* @param filesCompacted Files compacted (input).
* @param newFiles Files from compaction.
*/
private void writeCompactionWalRecord(Collection<HStoreFile> filesCompacted,
Collection<HStoreFile> newFiles) throws IOException {
if (region.getWAL() == null) {
return;
}
List<Path> inputPaths =
filesCompacted.stream().map(HStoreFile::getPath).collect(Collectors.toList());
List<Path> outputPaths =
newFiles.stream().map(HStoreFile::getPath).collect(Collectors.toList());
RegionInfo info = this.region.getRegionInfo();
CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info,
family.getName(), inputPaths, outputPaths, fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString()));
// Fix reaching into Region to get the maxWaitForSeqId.
// Does this method belong in Region altogether given it is making so many references up there?
// Could be Region#writeCompactionMarker(compactionDescriptor);
WALUtil.writeCompactionMarker(this.region.getWAL(), this.region.getReplicationScope(),
this.region.getRegionInfo(), compactionDescriptor, this.region.getMVCC());
}
示例9: writeRegionOpenMarker
import org.apache.hadoop.hbase.regionserver.wal.WALUtil; //导入依赖的package包/类
private void writeRegionOpenMarker(WAL wal, long openSeqId) throws IOException {
Map<byte[], List<Path>> storeFiles = getStoreFiles();
RegionEventDescriptor regionOpenDesc = ProtobufUtil
.toRegionEventDescriptor(RegionEventDescriptor.EventType.REGION_OPEN, getRegionInfo(),
openSeqId, getRegionServerServices().getServerName(), storeFiles);
WALUtil.writeRegionEventMarker(wal, getTableDesc(), getRegionInfo(), regionOpenDesc, mvcc);
}
示例10: writeRegionOpenMarker
import org.apache.hadoop.hbase.regionserver.wal.WALUtil; //导入依赖的package包/类
private void writeRegionOpenMarker(WAL wal, long openSeqId) throws IOException {
Map<byte[], List<Path>> storeFiles = getStoreFiles();
RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor(
RegionEventDescriptor.EventType.REGION_OPEN, getRegionInfo(), openSeqId,
getRegionServerServices().getServerName(), storeFiles);
WALUtil.writeRegionEventMarker(wal, getReplicationScope(), getRegionInfo(), regionOpenDesc,
mvcc);
}