当前位置: 首页>>代码示例>>Java>>正文


Java Snapshot类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot的典型用法代码示例。如果您正苦于以下问题:Java Snapshot类的具体用法?Java Snapshot怎么用?Java Snapshot使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Snapshot类属于org.apache.hadoop.hdfs.server.namenode.snapshot包,在下文中一共展示了Snapshot类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getPriorSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
/**
 * When destroying a reference node (WithName or DstReference), we call this
 * method to identify the snapshot which is the latest snapshot before the
 * reference node's creation. 
 */
static int getPriorSnapshot(INodeReference ref) {
  WithCount wc = (WithCount) ref.getReferredINode();
  WithName wn = null;
  if (ref instanceof DstReference) {
    wn = wc.getLastWithName();
  } else if (ref instanceof WithName) {
    wn = wc.getPriorWithName((WithName) ref);
  }
  if (wn != null) {
    INode referred = wc.getReferredINode();
    if (referred.isFile() && referred.asFile().isWithSnapshot()) {
      return referred.asFile().getDiffs().getPrior(wn.lastSnapshotId);
    } else if (referred.isDirectory()) {
      DirectoryWithSnapshotFeature sf = referred.asDirectory()
          .getDirectoryWithSnapshotFeature();
      if (sf != null) {
        return sf.getDiffs().getPrior(wn.lastSnapshotId);
      }
    }
  }
  return Snapshot.NO_SNAPSHOT_ID;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:INodeReference.java

示例2: computeQuotaUsage

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
@Override
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
    byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
    int lastSnapshotId) {
  // if this.lastSnapshotId < lastSnapshotId, the rename of the referred 
  // node happened before the rename of its ancestor. This should be 
  // impossible since for WithName node we only count its children at the 
  // time of the rename. 
  Preconditions.checkState(lastSnapshotId == Snapshot.CURRENT_STATE_ID
      || this.lastSnapshotId >= lastSnapshotId);
  final INode referred = this.getReferredINode().asReference()
      .getReferredINode();
  // We will continue the quota usage computation using the same snapshot id
  // as time line (if the given snapshot id is valid). Also, we cannot use 
  // cache for the referred node since its cached quota may have already 
  // been updated by changes in the current tree.
  int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ? 
      lastSnapshotId : this.lastSnapshotId;
  return referred.computeQuotaUsage(bsps, blockStoragePolicyId, counts,
      false, id);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:INodeReference.java

示例3: getSelfSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
private int getSelfSnapshot(final int prior) {
  WithCount wc = (WithCount) getReferredINode().asReference();
  INode referred = wc.getReferredINode();
  int lastSnapshot = Snapshot.CURRENT_STATE_ID;
  if (referred.isFile() && referred.asFile().isWithSnapshot()) {
    lastSnapshot = referred.asFile().getDiffs().getLastSnapshotId();
  } else if (referred.isDirectory()) {
    DirectoryWithSnapshotFeature sf = referred.asDirectory()
        .getDirectoryWithSnapshotFeature();
    if (sf != null) {
      lastSnapshot = sf.getLastSnapshotId();
    }
  }
  if (lastSnapshot != Snapshot.CURRENT_STATE_ID && lastSnapshot != prior) {
    return lastSnapshot;
  } else {
    return Snapshot.CURRENT_STATE_ID;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:INodeReference.java

示例4: cleanDst

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks)
    throws QuotaExceededException {
  Preconditions.checkState(oldDstChild != null);
  List<INode> removedINodes = new ChunkedArrayList<>();
  final boolean filesDeleted;
  if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
    oldDstChild.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
    filesDeleted = true;
  } else {
    filesDeleted = oldDstChild.cleanSubtree(bsps, Snapshot.CURRENT_STATE_ID,
        dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes)
        .getNameSpace() >= 0;
  }
  fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
  return filesDeleted;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:FSDirRenameOp.java

示例5: checkSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
/**
 * Check if the given INode (or one of its descendants) is snapshottable and
 * already has snapshots.
 *
 * @param target The given INode
 * @param snapshottableDirs The list of directories that are snapshottable
 *                          but do not have snapshots yet
 */
static void checkSnapshot(
    INode target, List<INodeDirectory> snapshottableDirs)
    throws SnapshotException {
  if (target.isDirectory()) {
    INodeDirectory targetDir = target.asDirectory();
    DirectorySnapshottableFeature sf = targetDir
        .getDirectorySnapshottableFeature();
    if (sf != null) {
      if (sf.getNumSnapshots() > 0) {
        String fullPath = targetDir.getFullPathName();
        throw new SnapshotException("The directory " + fullPath
            + " cannot be deleted since " + fullPath
            + " is snapshottable and already has snapshots");
      } else {
        if (snapshottableDirs != null) {
          snapshottableDirs.add(targetDir);
        }
      }
    }
    for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
      checkSnapshot(child, snapshottableDirs);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:FSDirSnapshotOp.java

示例6: closeFileCommitBlocks

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
/**
 * @param pendingFile open file that needs to be closed
 * @param storedBlock last block
 * @return Path of the file that was closed.
 * @throws IOException on error
 */
@VisibleForTesting
String closeFileCommitBlocks(INodeFile pendingFile, BlockInfoContiguous storedBlock)
    throws IOException {
  final INodesInPath iip = INodesInPath.fromINode(pendingFile);
  final String src = iip.getPath();

  // commit the last block and complete it if it has minimum replicas
  commitOrCompleteLastBlock(pendingFile, iip, storedBlock);

  //remove lease, close file
  finalizeINodeFileUnderConstruction(src, pendingFile,
      Snapshot.findLatestSnapshot(pendingFile, Snapshot.CURRENT_STATE_ID));

  return src;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FSNamesystem.java

示例7: renameSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
/**
 * Rename a snapshot
 * @param path The directory path where the snapshot was taken
 * @param snapshotOldName Old snapshot name
 * @param snapshotNewName New snapshot name
 * @throws SafeModeException
 * @throws IOException 
 */
void renameSnapshot(
    String path, String snapshotOldName, String snapshotNewName,
    boolean logRetryCache) throws IOException {
  checkOperation(OperationCategory.WRITE);
  boolean success = false;
  writeLock();
  try {
    checkOperation(OperationCategory.WRITE);
    checkNameNodeSafeMode("Cannot rename snapshot for " + path);
    FSDirSnapshotOp.renameSnapshot(dir, snapshotManager, path,
        snapshotOldName, snapshotNewName, logRetryCache);
    success = true;
  } finally {
    writeUnlock();
  }
  getEditLog().logSync();
  String oldSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotOldName);
  String newSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotNewName);
  logAuditEvent(success, "renameSnapshot", oldSnapshotRoot,
      newSnapshotRoot, null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:FSNamesystem.java

示例8: isInLatestSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
/** Is this inode in the latest snapshot? */
public final boolean isInLatestSnapshot(final int latestSnapshotId) {
  if (latestSnapshotId == Snapshot.CURRENT_STATE_ID || latestSnapshotId == Snapshot.NO_SNAPSHOT_ID) {
    return false;
  }
  // if parent is a reference node, parent must be a renamed node. We can 
  // stop the check at the reference node.
  if (parent != null && parent.isReference()) {
    return true;
  }
  final INodeDirectory parentDir = getParent();
  if (parentDir == null) { // root
    return true;
  }
  if (!parentDir.isInLatestSnapshot(latestSnapshotId)) {
    return false;
  }
  final INode child = parentDir.getChild(getLocalNameBytes(), latestSnapshotId);
  if (this == child) {
    return true;
  }
  return child != null && child.isReference() &&
      this == child.asReference().getReferredINode();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:INode.java

示例9: shouldRecordInSrcSnapshot

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
/**
 * When {@link #recordModification} is called on a referred node,
 * this method tells which snapshot the modification should be
 * associated with: the snapshot that belongs to the SRC tree of the rename
 * operation, or the snapshot belonging to the DST tree.
 * 
 * @param latestInDst
 *          id of the latest snapshot in the DST tree above the reference node
 * @return True: the modification should be recorded in the snapshot that
 *         belongs to the SRC tree. False: the modification should be
 *         recorded in the snapshot that belongs to the DST tree.
 */
public final boolean shouldRecordInSrcSnapshot(final int latestInDst) {
  Preconditions.checkState(!isReference());

  if (latestInDst == Snapshot.CURRENT_STATE_ID) {
    return true;
  }
  INodeReference withCount = getParentReference();
  if (withCount != null) {
    int dstSnapshotId = withCount.getParentReference().getDstSnapshotId();
    if (dstSnapshotId != Snapshot.CURRENT_STATE_ID
        && dstSnapshotId >= latestInDst) {
      return true;
    }
  }
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:INode.java

示例10: replaceChild4ReferenceWithName

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
    int latestSnapshotId) {
  Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
  if (oldChild instanceof INodeReference.WithName) {
    return (INodeReference.WithName)oldChild;
  }

  final INodeReference.WithCount withCount;
  if (oldChild.isReference()) {
    Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
    withCount = (INodeReference.WithCount) oldChild.asReference()
        .getReferredINode();
  } else {
    withCount = new INodeReference.WithCount(null, oldChild);
  }
  final INodeReference.WithName ref = new INodeReference.WithName(this,
      withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
  replaceChild(oldChild, ref, null);
  return ref;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:INodeDirectory.java

示例11: undoRename4DstParent

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
/**
 * Undo the rename operation for the dst tree, i.e., if the rename operation
 * (with OVERWRITE option) removes a file/dir from the dst tree, add it back
 * and delete possible record in the deleted list.  
 */
public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
    final INode deletedChild,
    int latestSnapshotId) throws QuotaExceededException {
  DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
  Preconditions.checkState(sf != null,
      "Directory does not have snapshot feature");
  boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
      deletedChild);
  int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId;
  final boolean added = addChild(deletedChild, true, sid);
  // update quota usage if adding is successfully and the old child has not
  // been stored in deleted list before
  if (added && !removeDeletedChild) {
    final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
    addSpaceConsumed(counts, false);

  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:INodeDirectory.java

示例12: cleanSubtreeRecursively

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
/** Call cleanSubtree(..) recursively down the subtree. */
public QuotaCounts cleanSubtreeRecursively(final BlockStoragePolicySuite bsps,
    final int snapshot,
    int prior, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes, final Map<INode, INode> excludedNodes) {
  QuotaCounts counts = new QuotaCounts.Builder().build();
  // in case of deletion snapshot, since this call happens after we modify
  // the diff list, the snapshot to be deleted has been combined or renamed
  // to its latest previous snapshot. (besides, we also need to consider nodes
  // created after prior but before snapshot. this will be done in 
  // DirectoryWithSnapshotFeature)
  int s = snapshot != Snapshot.CURRENT_STATE_ID
      && prior != Snapshot.NO_SNAPSHOT_ID ? prior : snapshot;
  for (INode child : getChildrenList(s)) {
    if (snapshot != Snapshot.CURRENT_STATE_ID && excludedNodes != null
        && excludedNodes.containsKey(child)) {
      continue;
    } else {
      QuotaCounts childCounts = child.cleanSubtree(bsps, snapshot, prior,
          collectedBlocks, removedINodes);
      counts.add(childCounts);
    }
  }
  return counts;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:INodeDirectory.java

示例13: destroyAndCollectBlocks

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
@Override
public void destroyAndCollectBlocks(final BlockStoragePolicySuite bsps,
    final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
  if (sf != null) {
    sf.clear(bsps, this, collectedBlocks, removedINodes);
  }
  for (INode child : getChildrenList(Snapshot.CURRENT_STATE_ID)) {
    child.destroyAndCollectBlocks(bsps, collectedBlocks, removedINodes);
  }
  if (getAclFeature() != null) {
    AclStorage.removeAclFeature(getAclFeature());
  }
  clear();
  removedINodes.add(this);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:INodeDirectory.java

示例14: testReset

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
@Test
public void testReset() throws Exception {
  Configuration conf = new Configuration();
  FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
  FSImage fsImage = Mockito.mock(FSImage.class);
  Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
  FSNamesystem fsn = new FSNamesystem(conf, fsImage);
  fsn.imageLoadComplete();
  assertTrue(fsn.isImageLoaded());
  fsn.clear();
  assertFalse(fsn.isImageLoaded());
  final INodeDirectory root = (INodeDirectory) fsn.getFSDirectory()
          .getINode("/");
  assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
  fsn.imageLoadComplete();
  assertTrue(fsn.isImageLoaded());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestFSNamesystem.java

示例15: createTreeOfInodes

import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; //导入依赖的package包/类
/**
 * For a given path, build a tree of INodes and return the leaf node.
 */
private INode createTreeOfInodes(String path) throws QuotaExceededException {
  byte[][] components = INode.getPathComponents(path);
  FsPermission perm = FsPermission.createImmutable((short)0755);
  PermissionStatus permstatus = PermissionStatus.createImmutable("", "", perm);
  
  long id = 0;
  INodeDirectory prev = new INodeDirectory(++id, new byte[0], permstatus, 0);
  INodeDirectory dir = null;
  for (byte[] component : components) {
    if (component.length == 0) {
      continue;
    }
    System.out.println("Adding component " + DFSUtil.bytes2String(component));
    dir = new INodeDirectory(++id, component, permstatus, 0);
    prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID);
    prev = dir;
  }
  return dir; // Last Inode in the chain
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestINodeFile.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。