本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.Storage.StorageState类的典型用法代码示例。如果您正苦于以下问题:Java StorageState类的具体用法?Java StorageState怎么用?Java StorageState使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StorageState类属于org.apache.hadoop.hdfs.server.common.Storage包,在下文中一共展示了StorageState类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: recoverStorageDirs
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
/**
* For each storage directory, performs recovery of incomplete transitions
* (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
* state into the dataDirStates map.
* @param dataDirStates output of storage directory states
* @return true if there is at least one valid formatted storage directory
*/
private boolean recoverStorageDirs(StartupOption startOpt,
Map<StorageDirectory, StorageState> dataDirStates) throws IOException {
boolean isFormatted = false;
for (Iterator<StorageDirectory> it =
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt);
isFormatted |= NNStorage.recoverDirectory(sd, startOpt, curState, true);
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
dataDirStates.put(sd,curState);
}
return isFormatted;
}
示例2: recoverCreateRead
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
/**
* Analyze backup storage directories for consistency.<br>
* Recover from incomplete checkpoints if required.<br>
* Read VERSION and fstime files if exist.<br>
* Do not load image or edits.
*
* @throws IOException if the node should shutdown.
*/
void recoverCreateRead() throws IOException {
for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
StorageState curState;
try {
curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
// fail if any of the configured storage dirs are inaccessible
throw new InconsistentFSStateException(sd.getRoot(),
"checkpoint directory does not exist or is not accessible.");
case NOT_FORMATTED:
// for backup node all directories may be unformatted initially
LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
LOG.info("Formatting ...");
sd.clearDirectory(); // create empty current
break;
case NORMAL:
break;
default: // recovery is possible
sd.doRecover(curState);
}
if(curState != StorageState.NOT_FORMATTED) {
// read and verify consistency with other directories
storage.readProperties(sd);
}
} catch(IOException ioe) {
sd.unlock();
throw ioe;
}
}
}
示例3: checkAllowedNonFileState
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
/**
* Check if remote image/journal storage is in allowed state.
*/
private void checkAllowedNonFileState(StorageState curState, Object name)
throws IOException {
switch (curState) {
case NON_EXISTENT:
case NOT_FORMATTED:
case NORMAL:
break;
default:
throwIOException("ImageManager bad state: " + curState + " for: "
+ name.toString());
}
}
示例4: recoverStorageDirs
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
/**
* For each storage directory, performs recovery of incomplete transitions
* (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
* state into the dataDirStates map.
* @param dataDirStates output of storage directory states
* @return true if there is at least one valid formatted storage directory
*/
public static boolean recoverStorageDirs(StartupOption startOpt,
NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates)
throws IOException {
boolean isFormatted = false;
// This loop needs to be over all storage dirs, even shared dirs, to make
// sure that we properly examine their state, but we make sure we don't
// mutate the shared dir below in the actual loop.
for (Iterator<StorageDirectory> it =
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
StorageState curState;
if (startOpt == StartupOption.METADATAVERSION) {
/* All we need is the layout version. */
storage.readProperties(sd);
return true;
}
try {
curState = sd.analyzeStorage(startOpt, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
// name-node fails if any of the configured storage dirs are missing
throw new InconsistentFSStateException(sd.getRoot(),
"storage directory does not exist or is not accessible.");
case NOT_FORMATTED:
break;
case NORMAL:
break;
default: // recovery is possible
sd.doRecover(curState);
}
if (curState != StorageState.NOT_FORMATTED
&& startOpt != StartupOption.ROLLBACK) {
// read and verify consistency with other directories
storage.readProperties(sd, startOpt);
isFormatted = true;
}
if (startOpt == StartupOption.IMPORT && isFormatted)
// import of a checkpoint is allowed only into empty image directories
throw new IOException("Cannot import image from a checkpoint. "
+ " NameNode already contains an image in " + sd.getRoot());
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
dataDirStates.put(sd,curState);
}
return isFormatted;
}
示例5: recoverCreate
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
/**
* Analyze checkpoint directories.
* Create directories if they do not exist.
* Recover from an unsuccessful checkpoint if necessary.
*
* @throws IOException
*/
void recoverCreate(boolean format) throws IOException {
storage.attemptRestoreRemovedStorage();
storage.unlockAll();
for (Iterator<StorageDirectory> it =
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
boolean isAccessible = true;
try { // create directories if don't exist yet
if(!sd.getRoot().mkdirs()) {
// do nothing, directory is already created
}
} catch(SecurityException se) {
isAccessible = false;
}
if(!isAccessible)
throw new InconsistentFSStateException(sd.getRoot(),
"cannot access checkpoint directory.");
if (format) {
// Don't confirm, since this is just the secondary namenode.
LOG.info("Formatting storage directory " + sd);
sd.clearDirectory();
}
StorageState curState;
try {
curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
// fail if any of the configured checkpoint dirs are inaccessible
throw new InconsistentFSStateException(sd.getRoot(),
"checkpoint directory does not exist or is not accessible.");
case NOT_FORMATTED:
break; // it's ok since initially there is no current and VERSION
case NORMAL:
// Read the VERSION file. This verifies that:
// (a) the VERSION file for each of the directories is the same,
// and (b) when we connect to a NN, we can verify that the remote
// node matches the same namespace that we ran on previously.
storage.readProperties(sd);
break;
default: // recovery is possible
sd.doRecover(curState);
}
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
}
}
示例6: analyzeJournalStorage
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
@Override
public RemoteStorageState analyzeJournalStorage() {
return new RemoteStorageState(StorageState.NON_EXISTENT, new StorageInfo());
}
示例7: RemoteStorageState
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
public RemoteStorageState(StorageState state, StorageInfo storageInfo) {
this.state = state;
this.storageInfo = storageInfo;
}
示例8: getStorageState
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
public StorageState getStorageState() {
return state;
}
示例9: recoverCreate
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
/**
* Analyze checkpoint directories.
* Create directories if they do not exist.
* Recover from an unsuccessful checkpoint is necessary.
*
* @param dataDirs
* @param editsDirs
* @throws IOException
*/
void recoverCreate(Collection<URI> dataDirs,
Collection<URI> editsDirs) throws IOException {
Collection<URI> tempDataDirs = new ArrayList<URI>(dataDirs);
Collection<URI> tempEditsDirs = new ArrayList<URI>(editsDirs);
storage.setStorageDirectories(tempDataDirs, tempEditsDirs, null);
imageSet = new ImageSet(this, tempDataDirs, tempEditsDirs, null);
for (Iterator<StorageDirectory> it =
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
boolean isAccessible = true;
try { // create directories if don't exist yet
if(!sd.getRoot().mkdirs()) {
// do nothing, directory is already created
}
} catch(SecurityException se) {
isAccessible = false;
}
if(!isAccessible)
throw new InconsistentFSStateException(sd.getRoot(),
"cannot access checkpoint directory.");
StorageState curState;
try {
curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
// fail if any of the configured checkpoint dirs are inaccessible
throw new InconsistentFSStateException(sd.getRoot(),
"checkpoint directory does not exist or is not accessible.");
case NOT_FORMATTED:
break; // it's ok since initially there is no current and VERSION
case NORMAL:
break;
default: // recovery is possible
sd.doRecover(curState);
}
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
}
}
示例10: analyzeStorageDirs
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
ArrayList<StorageDirectory> analyzeStorageDirs(NamespaceInfo nsInfo,
Collection<File> dataDirs,
StartupOption startOpt
) throws IOException {
if (storageID == null)
this.storageID = "";
if (storageDirs == null) {
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
} else {
((ArrayList<StorageDirectory>) storageDirs)
.ensureCapacity(storageDirs.size() + dataDirs.size());
}
ArrayList<StorageDirectory> newDirs = new ArrayList<StorageDirectory>(
dataDirs.size());
ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(dataDirs.size());
for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next();
StorageDirectory sd = new StorageDirectory(dataDir);
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt);
// sd is locked but not opened
switch(curState) {
case NORMAL:
break;
case NON_EXISTENT:
// ignore this storage
LOG.info("Storage directory " + dataDir + " does not exist.");
it.remove();
continue;
case NOT_FORMATTED: // format
LOG.info("Storage directory " + dataDir + " is not formatted.");
if (!sd.isEmpty()) {
LOG.error("Storage directory " + dataDir
+ " is not empty, and will not be formatted! Exiting.");
throw new IOException(
"Storage directory " + dataDir + " is not empty!");
}
LOG.info("Formatting ...");
format(sd, nsInfo);
break;
default: // recovery part is common
sd.doRecover(curState);
}
} catch (IOException ioe) {
try {
sd.unlock();
}
catch (IOException e) {
LOG.warn("Exception when unlocking storage directory", e);
}
LOG.warn("Ignoring storage directory " + dataDir, ioe);
//continue with other good dirs
continue;
}
// add to the storage list
addStorageDir(sd);
newDirs.add(sd);
dataDirStates.add(curState);
}
if (dataDirs.size() == 0) // none of the data dirs exist
throw new IOException(
"All specified directories are not accessible or do not exist.");
return newDirs;
}
示例11: GetStorageStateProto
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
public GetStorageStateProto(StorageState state, StorageInfo storageInfo) {
this.state = state;
this.storageInfo = storageInfo;
}
示例12: readFields
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
@Override
public void readFields(DataInput in) throws IOException {
state = WritableUtils.readEnum(in, StorageState.class);
storageInfo = new StorageInfo();
storageInfo.readFields(in);
}
示例13: recoverStorageDirs
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
/**
* For each storage directory, performs recovery of incomplete transitions
* (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
* state into the dataDirStates map.
* @param dataDirStates output of storage directory states
* @return true if there is at least one valid formatted storage directory
*/
private boolean recoverStorageDirs(StartupOption startOpt,
Map<StorageDirectory, StorageState> dataDirStates) throws IOException {
boolean isFormatted = false;
for (Iterator<StorageDirectory> it =
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt, storage);
String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
if (curState != StorageState.NORMAL && HAUtil.isHAEnabled(conf, nameserviceId)) {
throw new IOException("Cannot start an HA namenode with name dirs " +
"that need recovery. Dir: " + sd + " state: " + curState);
}
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
// name-node fails if any of the configured storage dirs are missing
throw new InconsistentFSStateException(sd.getRoot(),
"storage directory does not exist or is not accessible.");
case NOT_FORMATTED:
break;
case NORMAL:
break;
default: // recovery is possible
sd.doRecover(curState);
}
if (curState != StorageState.NOT_FORMATTED
&& startOpt != StartupOption.ROLLBACK) {
// read and verify consistency with other directories
storage.readProperties(sd);
isFormatted = true;
}
if (startOpt == StartupOption.IMPORT && isFormatted)
// import of a checkpoint is allowed only into empty image directories
throw new IOException("Cannot import image from a checkpoint. "
+ " NameNode already contains an image in " + sd.getRoot());
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
dataDirStates.put(sd,curState);
}
return isFormatted;
}
示例14: recoverStorageDirs
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
/**
* For each storage directory, performs recovery of incomplete transitions
* (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
* state into the dataDirStates map.
* @param dataDirStates output of storage directory states
* @return true if there is at least one valid formatted storage directory
*/
private boolean recoverStorageDirs(StartupOption startOpt,
Map<StorageDirectory, StorageState> dataDirStates) throws IOException {
boolean isFormatted = false;
// This loop needs to be over all storage dirs, even shared dirs, to make
// sure that we properly examine their state, but we make sure we don't
// mutate the shared dir below in the actual loop.
for (Iterator<StorageDirectory> it =
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
StorageState curState;
if (startOpt == StartupOption.METADATAVERSION) {
/* All we need is the layout version. */
storage.readProperties(sd);
return true;
}
try {
curState = sd.analyzeStorage(startOpt, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
// name-node fails if any of the configured storage dirs are missing
throw new InconsistentFSStateException(sd.getRoot(),
"storage directory does not exist or is not accessible.");
case NOT_FORMATTED:
break;
case NORMAL:
break;
default: // recovery is possible
sd.doRecover(curState);
}
if (curState != StorageState.NOT_FORMATTED
&& startOpt != StartupOption.ROLLBACK) {
// read and verify consistency with other directories
storage.readProperties(sd, startOpt);
isFormatted = true;
}
if (startOpt == StartupOption.IMPORT && isFormatted)
// import of a checkpoint is allowed only into empty image directories
throw new IOException("Cannot import image from a checkpoint. "
+ " NameNode already contains an image in " + sd.getRoot());
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
dataDirStates.put(sd,curState);
}
return isFormatted;
}
示例15: recoverStorageDirs
import org.apache.hadoop.hdfs.server.common.Storage.StorageState; //导入依赖的package包/类
/**
* For each storage directory, performs recovery of incomplete transitions
* (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
* state into the dataDirStates map.
* @param dataDirStates output of storage directory states
* @return true if there is at least one valid formatted storage directory
*/
private boolean recoverStorageDirs(StartupOption startOpt,
Map<StorageDirectory, StorageState> dataDirStates) throws IOException {
boolean isFormatted = false;
// This loop needs to be over all storage dirs, even shared dirs, to make
// sure that we properly examine their state, but we make sure we don't
// mutate the shared dir below in the actual loop.
for (Iterator<StorageDirectory> it =
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
// name-node fails if any of the configured storage dirs are missing
throw new InconsistentFSStateException(sd.getRoot(),
"storage directory does not exist or is not accessible.");
case NOT_FORMATTED:
break;
case NORMAL:
break;
default: // recovery is possible
sd.doRecover(curState);
}
if (curState != StorageState.NOT_FORMATTED
&& startOpt != StartupOption.ROLLBACK) {
// read and verify consistency with other directories
storage.readProperties(sd);
isFormatted = true;
}
if (startOpt == StartupOption.IMPORT && isFormatted)
// import of a checkpoint is allowed only into empty image directories
throw new IOException("Cannot import image from a checkpoint. "
+ " NameNode already contains an image in " + sd.getRoot());
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
dataDirStates.put(sd,curState);
}
return isFormatted;
}