本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.beginStep方法的典型用法代码示例。如果您正苦于以下问题:Java StartupProgress.beginStep方法的具体用法?Java StartupProgress.beginStep怎么用?Java StartupProgress.beginStep使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress
的用法示例。
在下文中一共展示了StartupProgress.beginStep方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: saveCurrentTokens
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
* Private helper methods to save delegation keys and tokens in fsimage
*/
private synchronized void saveCurrentTokens(DataOutputStream out,
String sdPath) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(currentTokens.size());
Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
.iterator();
while (iter.hasNext()) {
DelegationTokenIdentifier id = iter.next();
id.write(out);
DelegationTokenInformation info = currentTokens.get(id);
out.writeLong(info.getRenewDate());
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
示例2: saveAllKeys
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(allKeys.size());
Iterator<Integer> iter = allKeys.keySet().iterator();
while (iter.hasNext()) {
Integer key = iter.next();
allKeys.get(key).write(out);
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
示例3: loadCurrentTokens
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
* Private helper methods to load Delegation tokens from fsimage
*/
private synchronized void loadCurrentTokens(DataInput in)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_TOKENS);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numberOfTokens = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numberOfTokens; i++) {
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
long expiryTime = in.readLong();
addPersistedDelegationToken(id, expiryTime);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
示例4: loadAllKeys
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
* Private helper method to load delegation keys from fsimage.
* @throws IOException on error
*/
private synchronized void loadAllKeys(DataInput in) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_KEYS);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numberOfKeys = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numberOfKeys; i++) {
DelegationKey value = new DelegationKey();
value.readFields(in);
addKey(value);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
示例5: loadFSEdits
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
* Load an edit log, and apply the changes to the in-memory structure
* This is where we apply edits that we've been writing to disk all
* along.
*/
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = createStartupProgressStep(edits);
prog.beginStep(Phase.LOADING_EDITS, step);
fsNamesys.writeLock();
try {
long startTime = monotonicNow();
FSImage.LOG.info("Start loading edits file " + edits.getName());
long numEdits = loadEditRecords(edits, false, expectedStartingTxId,
startOpt, recovery);
FSImage.LOG.info("Edits file " + edits.getName()
+ " of size " + edits.length() + " edits # " + numEdits
+ " loaded in " + (monotonicNow()-startTime)/1000 + " seconds");
return numEdits;
} finally {
edits.close();
fsNamesys.writeUnlock();
prog.endStep(Phase.LOADING_EDITS, step);
}
}
示例6: loadDirectives
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
* Load cache directives from the fsimage
*/
private void loadDirectives(DataInput in) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_ENTRIES);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numDirectives = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numDirectives; i++) {
CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
// Get pool reference by looking it up in the map
final String poolName = info.getPool();
CacheDirective directive =
new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
info.getReplication(), info.getExpiration().getAbsoluteMillis());
addCacheDirective(poolName, directive);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
示例7: loadFSEdits
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
* Load an edit log, and apply the changes to the in-memory structure
* This is where we apply edits that we've been writing to disk all
* along.
*/
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = createStartupProgressStep(edits);
prog.beginStep(Phase.LOADING_EDITS, step);
fsNamesys.writeLock();
try {
long startTime = now();
FSImage.LOG.info("Start loading edits file " + edits.getName());
long numEdits = loadEditRecords(edits, false, expectedStartingTxId,
startOpt, recovery);
FSImage.LOG.info("Edits file " + edits.getName()
+ " of size " + edits.length() + " edits # " + numEdits
+ " loaded in " + (now()-startTime)/1000 + " seconds");
return numEdits;
} finally {
edits.close();
fsNamesys.writeUnlock();
prog.endStep(Phase.LOADING_EDITS, step);
}
}
示例8: loadAllKeys
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
* Private helper method to load delegation keys from fsimage.
* @param in
* @throws IOException
*/
private synchronized void loadAllKeys(DataInput in) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_KEYS);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numberOfKeys = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numberOfKeys; i++) {
DelegationKey value = new DelegationKey();
value.readFields(in);
addKey(value);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
示例9: loadFSEdits
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
* Load an edit log, and apply the changes to the in-memory structure
* This is where we apply edits that we've been writing to disk all
* along.
*/
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
MetaRecoveryContext recovery) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = createStartupProgressStep(edits);
prog.beginStep(Phase.LOADING_EDITS, step);
fsNamesys.writeLock();
try {
long startTime = now();
FSImage.LOG.info("Start loading edits file " + edits.getName());
long numEdits = loadEditRecords(edits, false,
expectedStartingTxId, recovery);
FSImage.LOG.info("Edits file " + edits.getName()
+ " of size " + edits.length() + " edits # " + numEdits
+ " loaded in " + (now()-startTime)/1000 + " seconds");
return numEdits;
} finally {
edits.close();
fsNamesys.writeUnlock();
prog.endStep(Phase.LOADING_EDITS, step);
}
}
示例10: savePools
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
* Save cache pools to fsimage
*/
private void savePools(DataOutputStream out,
String sdPath) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_POOLS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(cachePools.size());
for (CachePool pool: cachePools.values()) {
FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
示例11: saveDirectives
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
private void saveDirectives(DataOutputStream out, String sdPath)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(directivesById.size());
for (CacheDirective directive : directivesById.values()) {
FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
示例12: loadPools
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
* Load cache pools from fsimage
*/
private void loadPools(DataInput in)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_POOLS);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numberOfPools = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numberOfPools; i++) {
addCachePool(FSImageSerialization.readCachePoolInfo(in));
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
示例13: save
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
void save(File newFile, FSImageCompression compression) throws IOException {
checkNotSaved();
final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
FSDirectory fsDir = sourceNamesystem.dir;
String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
Step step = new Step(StepType.INODES, sdPath);
StartupProgress prog = NameNode.getStartupProgress();
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step,
fsDir.rootDir.numItemsInTree());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
long startTime = now();
//
// Write out data
//
MessageDigest digester = MD5Hash.getDigester();
FileOutputStream fout = new FileOutputStream(newFile);
DigestOutputStream fos = new DigestOutputStream(fout, digester);
DataOutputStream out = new DataOutputStream(fos);
try {
out.writeInt(HdfsConstants.LAYOUT_VERSION);
// We use the non-locked version of getNamespaceInfo here since
// the coordinating thread of saveNamespace already has read-locked
// the namespace for us. If we attempt to take another readlock
// from the actual saver thread, there's a potential of a
// fairness-related deadlock. See the comments on HDFS-2223.
out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
.getNamespaceID());
out.writeLong(fsDir.rootDir.numItemsInTree());
out.writeLong(sourceNamesystem.getGenerationStampV1());
out.writeLong(sourceNamesystem.getGenerationStampV2());
out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
out.writeLong(sourceNamesystem.getLastAllocatedBlockId());
out.writeLong(context.getTxId());
out.writeLong(sourceNamesystem.getLastInodeId());
sourceNamesystem.getSnapshotManager().write(out);
// write compression info and set up compressed stream
out = compression.writeHeaderAndWrapStream(fos);
LOG.info("Saving image file " + newFile +
" using " + compression);
// save the root
saveINode2Image(fsDir.rootDir, out, false, referenceMap, counter);
// save the rest of the nodes
saveImage(fsDir.rootDir, out, true, counter);
prog.endStep(Phase.SAVING_CHECKPOINT, step);
// Now that the step is finished, set counter equal to total to adjust
// for possible under-counting due to reference inodes.
prog.setCount(Phase.SAVING_CHECKPOINT, step,
fsDir.rootDir.numItemsInTree());
// save files under construction
sourceNamesystem.saveFilesUnderConstruction(out);
context.checkCancelled();
sourceNamesystem.saveSecretManagerState(out, sdPath);
context.checkCancelled();
out.flush();
context.checkCancelled();
fout.getChannel().force(true);
} finally {
out.close();
}
saved = true;
// set md5 of the saved image
savedDigest = new MD5Hash(digester.digest());
LOG.info("Image file " + newFile + " of size " + newFile.length() +
" bytes saved in " + (now() - startTime)/1000 + " seconds.");
}