本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.LayoutVersion类的典型用法代码示例。如果您正苦于以下问题:Java LayoutVersion类的具体用法?Java LayoutVersion怎么用?Java LayoutVersion使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LayoutVersion类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了LayoutVersion类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getAndUpdateLastInodeId
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion,
long lastInodeId) throws IOException {
long inodeId = inodeIdFromOp;
if (inodeId == INodeId.GRANDFATHER_INODE_ID) {
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
throw new IOException("The layout version " + logVersion
+ " supports inodeId but gave bogus inodeId");
}
inodeId = fsNamesys.dir.allocateNewInodeId();
} else {
// need to reset lastInodeId. fsnamesys gets lastInodeId firstly from
// fsimage but editlog captures more recent inodeId allocations
if (inodeId > lastInodeId) {
fsNamesys.dir.resetLastInodeId(inodeId);
}
}
return inodeId;
}
示例2: setFieldsFromProperties
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
@Override // Storage
protected void setFieldsFromProperties(
Properties props, StorageDirectory sd) throws IOException {
super.setFieldsFromProperties(props, sd);
if (layoutVersion == 0) {
throw new IOException("NameNode directory "
+ sd.getRoot() + " is not formatted.");
}
// Set Block pool ID in version with federation support
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, getLayoutVersion())) {
String sbpid = props.getProperty("blockpoolID");
setBlockPoolID(sd.getRoot(), sbpid);
}
setDeprecatedPropertiesForUpgrade(props);
}
示例3: readFields
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
if (this.length != 3) {
throw new IOException("Incorrect data format. "
+ "Old rename operation.");
}
}
this.src = FSImageSerialization.readString(in);
this.dst = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.timestamp = FSImageSerialization.readLong(in);
} else {
this.timestamp = readLong(in);
}
// read RPC ids if necessary
readRpcIds(in, logVersion);
}
示例4: Reader
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
/**
* Construct the reader
* @param in The stream to read from.
* @param logVersion The version of the data coming from the stream.
*/
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
this.logVersion = logVersion;
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
this.checksum = DataChecksum.newCrc32();
} else {
this.checksum = null;
}
// It is possible that the logVersion is actually a future layoutversion
// during the rolling upgrade (e.g., the NN gets upgraded first). We
// assume future layout will also support length of editlog op.
this.supportEditLogLength = NameNodeLayoutVersion.supports(
NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
|| logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
if (this.checksum != null) {
this.in = new DataInputStream(
new CheckedInputStream(in, this.checksum));
} else {
this.in = in;
}
this.limiter = limiter;
this.cache = new OpInstanceCache();
this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
示例5: loadINodeFileAttributes
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asFile();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
final long accessTime = in.readLong();
final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort());
final long preferredBlockSize = in.readLong();
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
accessTime, replication, preferredBlockSize, (byte) 0, null);
}
示例6: loadINodeDirectoryAttributes
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asDirectory();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
// Read quotas: quota by storage type does not need to be processed below.
// It is handled only in protobuf based FsImagePBINode class for newer
// fsImages. Tools using this class such as legacy-mode of offline image viewer
// should only load legacy FSImages without newer features.
final long nsQuota = in.readLong();
final long dsQuota = in.readLong();
return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
name, permissions, null, modificationTime, null)
: new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
null, modificationTime, nsQuota, dsQuota, null, null);
}
示例7: cleanupDetachDir
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
/**
* Cleanup the detachDir.
*
* If the directory is not empty report an error; Otherwise remove the
* directory.
*
* @param detachDir detach directory
* @throws IOException if the directory is not empty or it can not be removed
*/
private void cleanupDetachDir(File detachDir) throws IOException {
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion)
&& detachDir.exists() && detachDir.isDirectory()) {
if (FileUtil.list(detachDir).length != 0) {
throw new IOException("Detached directory " + detachDir
+ " is not empty. Please manually move each file under this "
+ "directory to the finalized directory if the finalized "
+ "directory tree does not have the file.");
} else if (!detachDir.delete()) {
throw new IOException("Cannot remove directory " + detachDir);
}
}
}
示例8: setPropertiesFromFields
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
@Override
protected void setPropertiesFromFields(Properties props,
StorageDirectory sd
) throws IOException {
props.setProperty("storageType", storageType.toString());
props.setProperty("clusterID", clusterID);
props.setProperty("cTime", String.valueOf(cTime));
props.setProperty("layoutVersion", String.valueOf(layoutVersion));
props.setProperty("storageID", sd.getStorageUuid());
String datanodeUuid = getDatanodeUuid();
if (datanodeUuid != null) {
props.setProperty("datanodeUuid", datanodeUuid);
}
// Set NamespaceID in version before federation
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
props.setProperty("namespaceID", String.valueOf(namespaceID));
}
}
示例9: cleanupDetachDir
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
/**
* Cleanup the detachDir.
*
* If the directory is not empty report an error;
* Otherwise remove the directory.
*
* @param detachDir detach directory
* @throws IOException if the directory is not empty or it can not be removed
*/
private void cleanupDetachDir(File detachDir) throws IOException {
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion) &&
detachDir.exists() && detachDir.isDirectory() ) {
if (FileUtil.list(detachDir).length != 0 ) {
throw new IOException("Detached directory " + detachDir +
" is not empty. Please manually move each file under this " +
"directory to the finalized directory if the finalized " +
"directory tree does not have the file.");
} else if (!detachDir.delete()) {
throw new IOException("Cannot remove directory " + detachDir);
}
}
}
示例10: getAndUpdateLastInodeId
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion,
long lastInodeId) throws IOException {
long inodeId = inodeIdFromOp;
if (inodeId == HdfsConstants.GRANDFATHER_INODE_ID) {
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
throw new IOException("The layout version " + logVersion
+ " supports inodeId but gave bogus inodeId");
}
inodeId = fsNamesys.dir.allocateNewInodeId();
} else {
// need to reset lastInodeId. fsnamesys gets lastInodeId firstly from
// fsimage but editlog captures more recent inodeId allocations
if (inodeId > lastInodeId) {
fsNamesys.dir.resetLastInodeId(inodeId);
}
}
return inodeId;
}
示例11: create
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
public static Reader create(DataInputStream in, StreamLimiter limiter,
int logVersion) {
if (logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION) {
// Use the LengthPrefixedReader on edit logs which are newer than what
// we can parse. (Newer layout versions are represented by smaller
// negative integers, for historical reasons.) Even though we can't
// parse the Ops contained in them, we should still be able to call
// scanOp on them. This is important for the JournalNode during rolling
// upgrade.
return new LengthPrefixedReader(in, limiter, logVersion);
} else if (NameNodeLayoutVersion.supports(
NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)) {
return new LengthPrefixedReader(in, limiter, logVersion);
} else if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITS_CHECKSUM, logVersion)) {
Checksum checksum = DataChecksum.newCrc32();
return new ChecksummedReader(checksum, in, limiter, logVersion);
} else {
return new LegacyReader(in, limiter, logVersion);
}
}
示例12: loadINodeFileAttributes
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asFile();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
final long accessTime = in.readLong();
final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort());
final long preferredBlockSize = in.readLong();
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
accessTime, replication, preferredBlockSize, (byte) 0, null, false);
}
示例13: getAndUpdateLastInodeId
import org.apache.hadoop.hdfs.protocol.LayoutVersion; //导入依赖的package包/类
private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion,
long lastInodeId) throws IOException {
long inodeId = inodeIdFromOp;
if (inodeId == INodeId.GRANDFATHER_INODE_ID) {
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
throw new IOException("The layout version " + logVersion
+ " supports inodeId but gave bogus inodeId");
}
inodeId = fsNamesys.allocateNewInodeId();
} else {
// need to reset lastInodeId. fsnamesys gets lastInodeId firstly from
// fsimage but editlog captures more recent inodeId allocations
if (inodeId > lastInodeId) {
fsNamesys.resetLastInodeId(inodeId);
}
}
return inodeId;
}