本文整理汇总了Java中org.apache.hadoop.hbase.monitoring.MonitoredTask类的典型用法代码示例。如果您正苦于以下问题:Java MonitoredTask类的具体用法?Java MonitoredTask怎么用?Java MonitoredTask使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MonitoredTask类属于org.apache.hadoop.hbase.monitoring包,在下文中一共展示了MonitoredTask类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initialize
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
/**
* Initialize this region.
*
* @param reporter Tickle every so often if initialize is taking a while.
* @return What the next sequence (edit) id should be.
* @throws IOException e
*/
private long initialize(final CancelableProgressable reporter) throws IOException {
MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this);
long nextSeqId = -1;
try {
nextSeqId = initializeRegionInternals(reporter, status);
return nextSeqId;
} finally {
// nextSeqid will be -1 if the initialization fails.
// At least it will be 0 otherwise.
if (nextSeqId == -1) {
status.abort("Exception during region " + getRegionInfo().getRegionNameAsString()
+ " initialization.");
}
}
}
示例2: close
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
/**
* Close down this HRegion. Flush the cache unless abort parameter is true, Shut down each HStore,
* don't service any more calls. This method could take some time to execute, so don't call it
* from a time-sensitive thread.
*
* @param abort true if server is aborting (only during testing)
* @return Vector of all the storage files that the HRegion's component HStores make use of. It's
* a list of HStoreFile objects. Can be null if we are not to close at this time or we are
* already closed.
* @throws IOException e
* @throws DroppedSnapshotException Thrown when replay of wal is required because a Snapshot was
* not properly persisted. The region is put in closing mode, and the caller MUST abort
* after this.
*/
public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException {
// Only allow one thread to close at a time. Serialize them so dual
// threads attempting to close will run up against each other.
MonitoredTask status =
TaskMonitor.get().createStatus("Closing region " + this + (abort ? " due to abort" : ""));
status.setStatus("Waiting for close lock");
try {
synchronized (closeLock) {
return doClose(abort, status);
}
} finally {
status.cleanup();
}
}
示例3: commitFile
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
private StoreFile commitFile(final Path path, final long logCacheFlushId, MonitoredTask status)
throws IOException {
// Write-out finished successfully, move into the right spot
Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path, this, true);
status.setStatus("Flushing " + this + ": reopening flushed file");
StoreFile sf = createStoreFileAndReader(dstPath);
StoreFile.Reader r = sf.getReader();
this.storeSize += r.length();
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
if (LOG.isInfoEnabled()) {
LOG.info("Added " + sf + ", entries=" + r.getEntries() + ", sequenceid=" + logCacheFlushId
+ ", filesize=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1));
}
return sf;
}
示例4: becomeActiveMaster
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
/**
* Try becoming active master.
* @param startupStatus
* @return True if we could successfully become the active master.
* @throws InterruptedException
*/
private boolean becomeActiveMaster(MonitoredTask startupStatus)
throws InterruptedException {
// TODO: This is wrong!!!! Should have new servername if we restart ourselves,
// if we come back to life.
this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName,
this);
this.zooKeeper.registerListener(activeMasterManager);
stallIfBackupMaster(this.conf, this.activeMasterManager);
// The ClusterStatusTracker is setup before the other
// ZKBasedSystemTrackers because it's needed by the activeMasterManager
// to check if the cluster should be shutdown.
this.clusterStatusTracker = new ClusterStatusTracker(getZooKeeper(), this);
this.clusterStatusTracker.start();
return this.activeMasterManager.blockUntilBecomingActiveMaster(startupStatus,
this.clusterStatusTracker);
}
示例5: initialize
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
/**
* Initialize this region.
* @param reporter Tickle every so often if initialize is taking a while.
* @return What the next sequence (edit) id should be.
* @throws IOException e
*/
public long initialize(final CancelableProgressable reporter) throws IOException {
MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this);
long nextSeqId = -1;
try {
nextSeqId = initializeRegionInternals(reporter, status);
return nextSeqId;
} finally {
// nextSeqid will be -1 if the initialization fails.
// At least it will be 0 otherwise.
if (nextSeqId == -1) {
status
.abort("Exception during region " + this.getRegionNameAsString() + " initialization.");
}
}
}
示例6: close
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
/**
* Close down this HRegion. Flush the cache unless abort parameter is true, Shut down each HStore,
* don't service any more calls. This method could take some time to execute, so don't call it
* from a time-sensitive thread.
* @param abort true if server is aborting (only during testing)
* @return Vector of all the storage files that the HRegion's component HStores make use of. It's
* a list of HStoreFile objects. Can be null if we are not to close at this time or we are
* already closed.
* @throws IOException e
*/
public List<StoreFile> close(final boolean abort) throws IOException {
// Only allow one thread to close at a time. Serialize them so dual
// threads attempting to close will run up against each other.
MonitoredTask status =
TaskMonitor.get().createStatus("Closing region " + this + (abort ? " due to abort" : ""));
status.setStatus("Waiting for close lock");
try {
synchronized (closeLock) {
return doClose(abort, status);
}
} finally {
status.cleanup();
}
}
示例7: commit
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
@Override
public boolean commit(MonitoredTask status) throws IOException {
if (storeFilePath == null) {
return false;
}
storeFile =
Store.this.commitFile(storeFilePath, cacheFlushId, snapshotTimeRangeTracker, flushedSize,
status);
if (Store.this.getHRegion().getCoprocessorHost() != null) {
Store.this.getHRegion().getCoprocessorHost().postFlush(Store.this, storeFile);
}
// Add new file to store files. Clear snapshot too while we have
// the Store write lock.
return Store.this.updateStorefiles(storeFile, snapshot);
}
示例8: RestoreSnapshotHelper
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
public RestoreSnapshotHelper(final Configuration conf,
final FileSystem fs,
final SnapshotManifest manifest,
final HTableDescriptor tableDescriptor,
final Path rootDir,
final ForeignExceptionDispatcher monitor,
final MonitoredTask status)
{
this.fs = fs;
this.conf = conf;
this.snapshotManifest = manifest;
this.snapshotDesc = manifest.getSnapshotDescription();
this.snapshotTable = TableName.valueOf(snapshotDesc.getTable());
this.tableDesc = tableDescriptor;
this.rootDir = rootDir;
this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName());
this.monitor = monitor;
this.status = status;
}
示例9: flushSnapshot
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
/**
* Turns a snapshot of memstore into a set of store files.
*
* @param snapshot {@link PMemStoreSnapshot} snapshot.
* @param cacheFlushSeqNum Log cache flush sequence number.
* @param status Task that represents the flush operation and may be updated with status.
* @return List of files written. Can be empty; must not be null.
*/
@Override
public List<Path> flushSnapshot(PMemStoreSnapshot snapshot, long cacheFlushSeqNum, MonitoredTask status)
throws IOException {
if(snapshot.getMutationCount() == 0) return new ArrayList<>();
ArrayList<Path> result = new ArrayList<Path>();
Map<String, String> meta = new HashMap<>();
meta.put(HConstants.START_KEY, Bytes.toString(snapshot.getStartKey()));
meta.put(HConstants.END_KEY, Bytes.toString(snapshot.getEndKey()));
PStoreFile.Writer writer = ((HStore)(store)).createParquetWriter(meta);
if(writer == null) return result;
RowScanner scanner = snapshot.getScanner();
while (scanner.hasNext()){
Mutation m = scanner.nextRow();
writer.append(m);
}
writer.close();
result.add(writer.getFilePath());
return result;
}
示例10: initialize
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
/**
* Initialize this region.
*
* @param reporter Tickle every so often if initialize is taking a while.
* @return What the next sequence (edit) id should be.
* @throws IOException e
*/
private long initialize(final CancelableProgressable reporter) throws IOException {
MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this);
long nextSeqId = -1;
try {
nextSeqId = initializeRegionInternals(reporter, status);
return nextSeqId;
} finally {
// nextSeqid will be -1 if the initialization fails.
// At least it will be 0 otherwise.
if (nextSeqId == -1) {
status
.abort("Exception during region " + this.getRegionNameAsString() + " initialization.");
}
}
}
示例11: close
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
/**
* Close down this HRegion. Flush the cache unless abort parameter is true,
* Shut down each HStore, don't service any more calls.
* <p/>
* This method could take some time to execute, so don't call it from a
* time-sensitive thread.
*
* @param abort true if server is aborting (only during testing)
* @return Vector of all the storage files that the HRegion's component
* HStores make use of. It's a list of HStoreFile objects. Can be null if
* we are not to close at this time or we are already closed.
* @throws IOException e
*/
public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException {
// Only allow one thread to close at a time. Serialize them so dual
// threads attempting to close will run up against each other.
MonitoredTask status = TaskMonitor.get().createStatus(
"Closing region " + this +
(abort ? " due to abort" : ""));
status.setStatus("Waiting for close lock");
try {
synchronized (closeLock) {
return doClose(abort, status);
}
} finally {
status.cleanup();
}
}
示例12: commitFile
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
private StoreFile commitFile(final Path path, final long logCacheFlushId, MonitoredTask status)
throws IOException {
// Write-out finished successfully, move into the right spot
Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);
status.setStatus("Flushing " + this + ": reopening flushed file");
StoreFile sf = createStoreFileAndReader(dstPath);
StoreFile.Reader r = sf.getReader();
this.storeSize += r.length();
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
if (LOG.isInfoEnabled()) {
LOG.info("Added " + sf + ", entries=" + r.getEntries() +
", sequenceid=" + logCacheFlushId +
", filesize=" + StringUtils.humanReadableInt(r.length()));
}
return sf;
}
示例13: becomeActiveMaster
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
/**
* Try becoming active master.
* @param startupStatus
* @return True if we could successfully become the active master.
* @throws InterruptedException
*/
private boolean becomeActiveMaster(MonitoredTask startupStatus)
throws InterruptedException {
// TODO: This is wrong!!!! Should have new servername if we restart ourselves,
// if we come back to life.
this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName,
this);
this.zooKeeper.registerListener(activeMasterManager);
stallIfBackupMaster(this.conf, this.activeMasterManager);
// The ClusterStatusTracker is setup before the other
// ZKBasedSystemTrackers because it's needed by the activeMasterManager
// to check if the cluster should be shutdown.
this.clusterStatusTracker = new ClusterStatusTracker(getZooKeeper(), this);
this.clusterStatusTracker.start();
return this.activeMasterManager.blockUntilBecomingActiveMaster(startupStatus);
}
示例14: RestoreSnapshotHelper
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
public RestoreSnapshotHelper(final Configuration conf,
final FileSystem fs,
final SnapshotDescription snapshotDescription,
final Path snapshotDir,
final HTableDescriptor tableDescriptor,
final Path rootDir,
final ForeignExceptionDispatcher monitor,
final MonitoredTask status)
{
this.fs = fs;
this.conf = conf;
this.snapshotDesc = snapshotDescription;
this.snapshotTable = TableName.valueOf(snapshotDescription.getTable());
this.snapshotDir = snapshotDir;
this.tableDesc = tableDescriptor;
this.rootDir = rootDir;
this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName());
this.monitor = monitor;
this.status = status;
}
示例15: initialize
import org.apache.hadoop.hbase.monitoring.MonitoredTask; //导入依赖的package包/类
/**
* Initialize this region.
*
* @param reporter Tickle every so often if initialize is taking a while.
* @return What the next sequence (edit) id should be.
* @throws IOException e
*/
private long initialize(final CancelableProgressable reporter) throws IOException {
MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this);
long nextSeqId = -1;
try {
nextSeqId = initializeRegionInternals(reporter, status);
return nextSeqId;
} finally {
// nextSeqid will be -1 if the initialization fails.
// At least it will be 0 otherwise.
if (nextSeqId == -1) {
status
.abort("Exception during region " + this.getRegionNameAsString() + " initialization.");
}
}
}