本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.StorageInfo类的典型用法代码示例。如果您正苦于以下问题:Java StorageInfo类的具体用法?Java StorageInfo怎么用?Java StorageInfo使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StorageInfo类属于org.apache.hadoop.hdfs.server.common包,在下文中一共展示了StorageInfo类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: canRollBack
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
/**
* Return true if this storage dir can roll back to the previous storage
* state, false otherwise. The NN will refuse to run the rollback operation
* unless at least one JM or fsimage storage directory can roll back.
*
* @param storage the storage info for the current state
* @param prevStorage the storage info for the previous (unupgraded) state
* @param targetLayoutVersion the layout version we intend to roll back to
* @return true if this JM can roll back, false otherwise.
* @throws IOException in the event of error
*/
static boolean canRollBack(StorageDirectory sd, StorageInfo storage,
StorageInfo prevStorage, int targetLayoutVersion) throws IOException {
File prevDir = sd.getPreviousDir();
if (!prevDir.exists()) { // use current directory then
LOG.info("Storage directory " + sd.getRoot()
+ " does not contain previous fs state.");
// read and verify consistency with other directories
storage.readProperties(sd);
return false;
}
// read and verify consistency of the prev dir
prevStorage.readPreviousVersionProperties(sd);
if (prevStorage.getLayoutVersion() != targetLayoutVersion) {
throw new IOException(
"Cannot rollback to storage version " +
prevStorage.getLayoutVersion() +
" using this version of the NameNode, which uses storage version " +
targetLayoutVersion + ". " +
"Please use the previous version of HDFS to perform the rollback.");
}
return true;
}
示例2: checkStorageInfoOrSendError
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
private boolean checkStorageInfoOrSendError(JNStorage storage,
HttpServletRequest request, HttpServletResponse response)
throws IOException {
int myNsId = storage.getNamespaceID();
String myClusterId = storage.getClusterID();
String theirStorageInfoString = StringEscapeUtils.escapeHtml(
request.getParameter(STORAGEINFO_PARAM));
if (theirStorageInfoString != null) {
int theirNsId = StorageInfo.getNsIdFromColonSeparatedString(
theirStorageInfoString);
String theirClusterId = StorageInfo.getClusterIdFromColonSeparatedString(
theirStorageInfoString);
if (myNsId != theirNsId || !myClusterId.equals(theirClusterId)) {
String msg = "This node has namespaceId '" + myNsId + " and clusterId '"
+ myClusterId + "' but the requesting node expected '" + theirNsId
+ "' and '" + theirClusterId + "'";
response.sendError(HttpServletResponse.SC_FORBIDDEN, msg);
LOG.warn("Received an invalid request file transfer request from " +
request.getRemoteAddr() + ": " + msg);
return false;
}
}
return true;
}
示例3: canRollBack
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
@Override
public Boolean canRollBack(String journalId, StorageInfo storage,
StorageInfo prevStorage, int targetLayoutVersion) throws IOException {
try {
CanRollBackResponseProto response = rpcProxy.canRollBack(
NULL_CONTROLLER,
CanRollBackRequestProto.newBuilder()
.setJid(convertJournalId(journalId))
.setStorage(PBHelper.convert(storage))
.setPrevStorage(PBHelper.convert(prevStorage))
.setTargetLayoutVersion(targetLayoutVersion)
.build());
return response.getCanRollBack();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
示例4: testConvertNamenodeRegistration
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
@Test
public void testConvertNamenodeRegistration() {
StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
NamenodeRegistration reg = new NamenodeRegistration("address:999",
"http:1000", info, NamenodeRole.NAMENODE);
NamenodeRegistrationProto regProto = PBHelper.convert(reg);
NamenodeRegistration reg2 = PBHelper.convert(regProto);
assertEquals(reg.getAddress(), reg2.getAddress());
assertEquals(reg.getClusterID(), reg2.getClusterID());
assertEquals(reg.getCTime(), reg2.getCTime());
assertEquals(reg.getHttpAddress(), reg2.getHttpAddress());
assertEquals(reg.getLayoutVersion(), reg2.getLayoutVersion());
assertEquals(reg.getNamespaceID(), reg2.getNamespaceID());
assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
assertEquals(reg.getRole(), reg2.getRole());
assertEquals(reg.getVersion(), reg2.getVersion());
}
示例5: createDataNodeVersionFile
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
/**
* Create a <code>version</code> file for datanode inside the specified parent
* directory. If such a file already exists, it will be overwritten.
* The given version string will be written to the file as the layout
* version. None of the parameters may be null.
*
* @param parent directory where namenode VERSION file is stored
* @param version StorageInfo to create VERSION file from
* @param bpid Block pool Id
* @param bpidToWrite Block pool Id to write into the version file
*/
public static void createDataNodeVersionFile(File[] parent,
StorageInfo version, String bpid, String bpidToWrite) throws IOException {
DataStorage storage = new DataStorage(version);
storage.setDatanodeUuid("FixedDatanodeUuid");
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {
File versionFile = new File(parent[i], "VERSION");
StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
storage.createStorageID(sd, false);
storage.writeProperties(versionFile, sd);
versionFiles[i] = versionFile;
File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
createBlockPoolVersionFile(bpDir, version, bpidToWrite);
}
}
示例6: getParamStringForImage
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
static String getParamStringForImage(NameNodeFile nnf, long txid,
StorageInfo remoteStorageInfo) {
final String imageType = nnf == null ? "" : "&" + IMAGE_FILE_TYPE + "="
+ nnf.name();
return "getimage=1&" + TXID_PARAM + "=" + txid
+ imageType
+ "&" + STORAGEINFO_PARAM + "=" +
remoteStorageInfo.toColonSeparatedString();
}
示例7: getParamStringForLog
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
static String getParamStringForLog(RemoteEditLog log,
StorageInfo remoteStorageInfo) {
return "getedit=1&" + START_TXID_PARAM + "=" + log.getStartTxId()
+ "&" + END_TXID_PARAM + "=" + log.getEndTxId()
+ "&" + STORAGEINFO_PARAM + "=" +
remoteStorageInfo.toColonSeparatedString();
}
示例8: CheckpointSignature
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
public CheckpointSignature(StorageInfo info, String blockpoolID,
long mostRecentCheckpointTxId, long curSegmentTxId) {
super(info);
this.blockpoolID = blockpoolID;
this.mostRecentCheckpointTxId = mostRecentCheckpointTxId;
this.curSegmentTxId = curSegmentTxId;
}
示例9: canRollBackSharedLog
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
public synchronized boolean canRollBackSharedLog(StorageInfo prevStorage,
int targetLayoutVersion) throws IOException {
for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
if (jas.isShared()) {
return jas.getManager().canRollBack(storage, prevStorage,
targetLayoutVersion);
}
}
throw new IOException("No shared log found.");
}
示例10: NamenodeRegistration
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
public NamenodeRegistration(String address,
String httpAddress,
StorageInfo storageInfo,
NamenodeRole role) {
super(storageInfo);
this.rpcAddress = address;
this.httpAddress = httpAddress;
this.role = role;
}
示例11: DatanodeRegistration
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
ExportedBlockKeys keys, String softwareVersion) {
super(dn);
this.storageInfo = info;
this.exportedKeys = keys;
this.softwareVersion = softwareVersion;
}
示例12: initStorage
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
/**
* Initializes the {@link #data}. The initialization is done only once, when
* handshake with the the first namenode is completed.
*/
private void initStorage(final NamespaceInfo nsInfo) throws IOException {
final FsDatasetSpi.Factory<? extends FsDatasetSpi<?>> factory
= FsDatasetSpi.Factory.getFactory(conf);
if (!factory.isSimulated()) {
final StartupOption startOpt = getStartupOption(conf);
if (startOpt == null) {
throw new IOException("Startup option not set.");
}
final String bpid = nsInfo.getBlockPoolID();
//read storage info, lock data dirs and transition fs state if necessary
synchronized (this) {
storage.recoverTransitionRead(this, nsInfo, dataDirs, startOpt);
}
final StorageInfo bpStorage = storage.getBPStorage(bpid);
LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID()
+ ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion()
+ ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
}
// If this is a newly formatted DataNode then assign a new DatanodeUuid.
checkDatanodeUuid();
synchronized(this) {
if (data == null) {
data = factory.newInstance(this, storage, conf);
}
}
}
示例13: convert
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
public static CheckpointSignatureProto convert(CheckpointSignature s) {
return CheckpointSignatureProto.newBuilder()
.setBlockPoolId(s.getBlockpoolID())
.setCurSegmentTxId(s.getCurSegmentTxId())
.setMostRecentCheckpointTxId(s.getMostRecentCheckpointTxId())
.setStorageInfo(PBHelper.convert((StorageInfo) s)).build();
}
示例14: doUpgrade
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
public QuorumCall<AsyncLogger, Void> doUpgrade(StorageInfo sInfo) {
Map<AsyncLogger, ListenableFuture<Void>> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture<Void> future =
logger.doUpgrade(sInfo);
calls.put(logger, future);
}
return QuorumCall.create(calls);
}
示例15: canRollBack
import org.apache.hadoop.hdfs.server.common.StorageInfo; //导入依赖的package包/类
public QuorumCall<AsyncLogger, Boolean> canRollBack(StorageInfo storage,
StorageInfo prevStorage, int targetLayoutVersion) {
Map<AsyncLogger, ListenableFuture<Boolean>> calls =
Maps.newHashMap();
for (AsyncLogger logger : loggers) {
ListenableFuture<Boolean> future =
logger.canRollBack(storage, prevStorage, targetLayoutVersion);
calls.put(logger, future);
}
return QuorumCall.create(calls);
}