当前位置: 首页>>代码示例>>Java>>正文


Java SnapshotException类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.SnapshotException的典型用法代码示例。如果您正苦于以下问题:Java SnapshotException类的具体用法?Java SnapshotException怎么用?Java SnapshotException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


SnapshotException类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了SnapshotException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkSnapshot

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Check if the given INode (or one of its descendants) is snapshottable and
 * already has snapshots.
 *
 * @param target The given INode
 * @param snapshottableDirs The list of directories that are snapshottable
 *                          but do not have snapshots yet
 */
static void checkSnapshot(
    INode target, List<INodeDirectory> snapshottableDirs)
    throws SnapshotException {
  if (target.isDirectory()) {
    INodeDirectory targetDir = target.asDirectory();
    DirectorySnapshottableFeature sf = targetDir
        .getDirectorySnapshottableFeature();
    if (sf != null) {
      if (sf.getNumSnapshots() > 0) {
        String fullPath = targetDir.getFullPathName();
        throw new SnapshotException("The directory " + fullPath
            + " cannot be deleted since " + fullPath
            + " is snapshottable and already has snapshots");
      } else {
        if (snapshottableDirs != null) {
          snapshottableDirs.add(targetDir);
        }
      }
    }
    for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
      checkSnapshot(child, snapshottableDirs);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:FSDirSnapshotOp.java

示例2: checkNestedSnapshottable

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
private void checkNestedSnapshottable(INodeDirectory dir, String path)
    throws SnapshotException {
  if (allowNestedSnapshots) {
    return;
  }

  for(INodeDirectory s : snapshottables.values()) {
    if (s.isAncestorDirectory(dir)) {
      throw new SnapshotException(
          "Nested snapshottable directories not allowed: path=" + path
          + ", the subdirectory " + s.getFullPathName()
          + " is already a snapshottable directory.");
    }
    if (dir.isAncestorDirectory(s)) {
      throw new SnapshotException(
          "Nested snapshottable directories not allowed: path=" + path
          + ", the ancestor " + s.getFullPathName()
          + " is already a snapshottable directory.");
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:SnapshotManager.java

示例3: resetSnapshottable

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Set the given snapshottable directory to non-snapshottable.
 * 
 * @throws SnapshotException if there are snapshots in the directory.
 */
public void resetSnapshottable(final String path) throws IOException {
  final INodesInPath iip = fsdir.getINodesInPath4Write(path);
  final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
  DirectorySnapshottableFeature sf = d.getDirectorySnapshottableFeature();
  if (sf == null) {
    // the directory is already non-snapshottable
    return;
  }
  if (sf.getNumSnapshots() > 0) {
    throw new SnapshotException("The directory " + path + " has snapshot(s). "
        + "Please redo the operation after removing all the snapshots.");
  }

  if (d == fsdir.getRoot()) {
    d.setSnapshotQuota(0);
  } else {
    d.removeSnapshottableFeature();
  }
  removeSnapshottable(d);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:SnapshotManager.java

示例4: createSnapshot

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Create a snapshot of the given path.
 * It is assumed that the caller will perform synchronization.
 *
 * @param iip the INodes resolved from the snapshottable directory's path
 * @param snapshotName
 *          The name of the snapshot.
 * @throws IOException
 *           Throw IOException when 1) the given path does not lead to an
 *           existing snapshottable directory, and/or 2) there exists a
 *           snapshot with the given name for the directory, and/or 3)
 *           snapshot number exceeds quota
 */
public String createSnapshot(final INodesInPath iip, String snapshotRoot,
    String snapshotName) throws IOException {
  INodeDirectory srcRoot = getSnapshottableRoot(iip);

  if (snapshotCounter == getMaxSnapshotID()) {
    // We have reached the maximum allowable snapshot ID and since we don't
    // handle rollover we will fail all subsequent snapshot creation
    // requests.
    //
    throw new SnapshotException(
        "Failed to create the snapshot. The FileSystem has run out of " +
        "snapshot IDs and ID rollover is not supported.");
  }

  srcRoot.addSnapshot(snapshotCounter, snapshotName);
    
  //create success, update id
  snapshotCounter++;
  numSnapshots.getAndIncrement();
  return Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:SnapshotManager.java

示例5: createSnapshot

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Create a snapshot of the given path.
 * It is assumed that the caller will perform synchronization.
 *
 * @param iip the INodes resolved from the snapshottable directory's path
 * @param snapshotName
 *          The name of the snapshot.
 * @throws IOException
 *           Throw IOException when 1) the given path does not lead to an
 *           existing snapshottable directory, and/or 2) there exists a
 *           snapshot with the given name for the directory, and/or 3)
 *           snapshot number exceeds quota
 */
public String createSnapshot(final INodesInPath iip, String snapshotRoot,
    String snapshotName) throws IOException {
  INodeDirectory srcRoot = getSnapshottableRoot(iip);

  if (snapshotCounter == getMaxSnapshotID()) {
    // We have reached the maximum allowable snapshot ID and since we don't
    // handle rollover we will fail all subsequent snapshot creation
    // requests.
    throw new SnapshotException(
        "Failed to create the snapshot. The FileSystem has run out of " +
        "snapshot IDs and ID rollover is not supported.");
  }

  srcRoot.addSnapshot(snapshotCounter, snapshotName);
    
  //create success, update id
  snapshotCounter++;
  numSnapshots.getAndIncrement();
  return Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:SnapshotManager.java

示例6: removeSnapshot

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Remove the snapshot with the given name from {@link #snapshotsByNames},
 * and delete all the corresponding DirectoryDiff.
 *
 * @param reclaimContext records blocks and inodes that need to be reclaimed
 * @param snapshotRoot The directory where we take snapshots
 * @param snapshotName The name of the snapshot to be removed
 * @return The removed snapshot. Null if no snapshot with the given name
 *         exists.
 */
public Snapshot removeSnapshot(
    INode.ReclaimContext reclaimContext, INodeDirectory snapshotRoot,
    String snapshotName) throws SnapshotException {
  final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
  if (i < 0) {
    throw new SnapshotException("Cannot delete snapshot " + snapshotName
        + " from path " + snapshotRoot.getFullPathName()
        + ": the snapshot does not exist.");
  } else {
    final Snapshot snapshot = snapshotsByNames.get(i);
    int prior = Snapshot.findLatestSnapshot(snapshotRoot, snapshot.getId());
    snapshotRoot.cleanSubtree(reclaimContext, snapshot.getId(), prior);
    // remove from snapshotsByNames after successfully cleaning the subtree
    snapshotsByNames.remove(i);
    return snapshot;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:28,代码来源:DirectorySnapshottableFeature.java

示例7: testCreateAndDeleteSnapshot

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
@Test(timeout = 60000)
public void testCreateAndDeleteSnapshot() throws Exception {
  DFSTestUtil.createFile(dfs, filePath, BLOCKSIZE, REPLICATION, SEED);
  // disallow snapshot on dir
  dfs.disallowSnapshot(snapRootPath);
  try {
    fileContext.createSnapshot(snapRootPath, "s1");
  } catch (SnapshotException e) {
    GenericTestUtils.assertExceptionContains(
        "Directory is not a snapshottable directory: " + snapRootPath, e);
  }

  // allow snapshot on dir
  dfs.allowSnapshot(snapRootPath);
  Path ssPath = fileContext.createSnapshot(snapRootPath, "s1");
  assertTrue("Failed to create snapshot", dfs.exists(ssPath));
  fileContext.deleteSnapshot(snapRootPath, "s1");
  assertFalse("Failed to delete snapshot", dfs.exists(ssPath));
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:TestFileContextSnapshot.java

示例8: checkSnapshot

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Check if the given INode (or one of its descendants) is snapshottable and
 * already has snapshots.
 * 
 * @param target The given INode
 * @param snapshottableDirs The list of directories that are snapshottable 
 *                          but do not have snapshots yet
 */
private static void checkSnapshot(INode target,
    List<INodeDirectory> snapshottableDirs) throws SnapshotException {
  if (target.isDirectory()) {
    INodeDirectory targetDir = target.asDirectory();
    DirectorySnapshottableFeature sf = targetDir
        .getDirectorySnapshottableFeature();
    if (sf != null) {
      if (sf.getNumSnapshots() > 0) {
        String fullPath = targetDir.getFullPathName();
        throw new SnapshotException("The directory " + fullPath
            + " cannot be deleted since " + fullPath
            + " is snapshottable and already has snapshots");
      } else {
        if (snapshottableDirs != null) {
          snapshottableDirs.add(targetDir);
        }
      }
    } 
    for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
      checkSnapshot(child, snapshottableDirs);
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:32,代码来源:FSDirectory.java

示例9: createSnapshot

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Create a snapshot of the given path.
 * It is assumed that the caller will perform synchronization.
 *
 * @param path
 *          The directory path where the snapshot will be taken.
 * @param snapshotName
 *          The name of the snapshot.
 * @throws IOException
 *           Throw IOException when 1) the given path does not lead to an
 *           existing snapshottable directory, and/or 2) there exists a
 *           snapshot with the given name for the directory, and/or 3)
 *           snapshot number exceeds quota
 */
public String createSnapshot(final String path, String snapshotName
    ) throws IOException {
  INodeDirectory srcRoot = getSnapshottableRoot(path);

  if (snapshotCounter == getMaxSnapshotID()) {
    // We have reached the maximum allowable snapshot ID and since we don't
    // handle rollover we will fail all subsequent snapshot creation
    // requests.
    //
    throw new SnapshotException(
        "Failed to create the snapshot. The FileSystem has run out of " +
        "snapshot IDs and ID rollover is not supported.");
  }

  AuthorizationProvider.get().createSnapshot(srcRoot, snapshotCounter);
  srcRoot.addSnapshot(snapshotCounter, snapshotName);
    
  //create success, update id
  snapshotCounter++;
  numSnapshots.getAndIncrement();
  return Snapshot.getSnapshotPath(path, snapshotName);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:37,代码来源:SnapshotManager.java

示例10: concat

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Concat all the blocks from srcs to trg and delete the srcs files
 */
void concat(String target, String [] srcs, boolean supportRetryCache) 
    throws UnresolvedLinkException, QuotaExceededException,
    SnapshotAccessControlException, SnapshotException {
  writeLock();
  try {
    // actual move
    waitForReady();
    long timestamp = now();
    unprotectedConcat(target, srcs, timestamp);
    // do the commit
    fsImage.getEditLog().logConcat(target, srcs, timestamp, 
        supportRetryCache);
  } finally {
    writeUnlock();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:FSDirectory.java

示例11: checkNestedSnapshottable

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
private void checkNestedSnapshottable(INodeDirectory dir, String path)
    throws SnapshotException {
  if (allowNestedSnapshots) {
    return;
  }

  for(INodeDirectorySnapshottable s : snapshottables.values()) {
    if (s.isAncestorDirectory(dir)) {
      throw new SnapshotException(
          "Nested snapshottable directories not allowed: path=" + path
          + ", the subdirectory " + s.getFullPathName()
          + " is already a snapshottable directory.");
    }
    if (dir.isAncestorDirectory(s)) {
      throw new SnapshotException(
          "Nested snapshottable directories not allowed: path=" + path
          + ", the ancestor " + s.getFullPathName()
          + " is already a snapshottable directory.");
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:SnapshotManager.java

示例12: resetSnapshottable

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Set the given snapshottable directory to non-snapshottable.
 * 
 * @throws SnapshotException if there are snapshots in the directory.
 */
public void resetSnapshottable(final String path) throws IOException {
  final INodesInPath iip = fsdir.getINodesInPath4Write(path);
  final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
  if (!d.isSnapshottable()) {
    // the directory is already non-snapshottable
    return;
  }
  final INodeDirectorySnapshottable s = (INodeDirectorySnapshottable) d;
  if (s.getNumSnapshots() > 0) {
    throw new SnapshotException("The directory " + path + " has snapshot(s). "
        + "Please redo the operation after removing all the snapshots.");
  }

  if (s == fsdir.getRoot()) {
    s.setSnapshotQuota(0); 
  } else {
    s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap());
  }
  removeSnapshottable(s);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:SnapshotManager.java

示例13: createSnapshot

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Create a snapshot of the given path.
 * It is assumed that the caller will perform synchronization.
 *
 * @param path
 *          The directory path where the snapshot will be taken.
 * @param snapshotName
 *          The name of the snapshot.
 * @throws IOException
 *           Throw IOException when 1) the given path does not lead to an
 *           existing snapshottable directory, and/or 2) there exists a
 *           snapshot with the given name for the directory, and/or 3)
 *           snapshot number exceeds quota
 */
public String createSnapshot(final String path, String snapshotName
    ) throws IOException {
  INodeDirectorySnapshottable srcRoot = getSnapshottableRoot(path);

  if (snapshotCounter == getMaxSnapshotID()) {
    // We have reached the maximum allowable snapshot ID and since we don't
    // handle rollover we will fail all subsequent snapshot creation
    // requests.
    //
    throw new SnapshotException(
        "Failed to create the snapshot. The FileSystem has run out of " +
        "snapshot IDs and ID rollover is not supported.");
  }

  srcRoot.addSnapshot(snapshotCounter, snapshotName);
    
  //create success, update id
  snapshotCounter++;
  numSnapshots.getAndIncrement();
  return Snapshot.getSnapshotPath(path, snapshotName);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:36,代码来源:SnapshotManager.java

示例14: createSnapshot

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Create a snapshot of the given path.
 * It is assumed that the caller will perform synchronization.
 *
 * @param path
 *          The directory path where the snapshot will be taken.
 * @param snapshotName
 *          The name of the snapshot.
 * @throws IOException
 *           Throw IOException when 1) the given path does not lead to an
 *           existing snapshottable directory, and/or 2) there exists a
 *           snapshot with the given name for the directory, and/or 3)
 *           snapshot number exceeds quota
 */
public String createSnapshot(final String path, String snapshotName
    ) throws IOException {
  INodeDirectory srcRoot = getSnapshottableRoot(path);

  if (snapshotCounter == getMaxSnapshotID()) {
    // We have reached the maximum allowable snapshot ID and since we don't
    // handle rollover we will fail all subsequent snapshot creation
    // requests.
    //
    throw new SnapshotException(
        "Failed to create the snapshot. The FileSystem has run out of " +
        "snapshot IDs and ID rollover is not supported.");
  }

  srcRoot.addSnapshot(snapshotCounter, snapshotName);
    
  //create success, update id
  snapshotCounter++;
  numSnapshots.getAndIncrement();
  return Snapshot.getSnapshotPath(path, snapshotName);
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:36,代码来源:SnapshotManager.java

示例15: resetSnapshottable

import org.apache.hadoop.hdfs.protocol.SnapshotException; //导入依赖的package包/类
/**
 * Set the given snapshottable directory to non-snapshottable.
 * 
 * @throws SnapshotException if there are snapshots in the directory.
 */
public void resetSnapshottable(final String path) throws IOException {
  final INodesInPath iip = fsdir.getINodesInPath4Write(path);
  final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
  if (!d.isSnapshottable()) {
    // the directory is already non-snapshottable
    return;
  }
  final INodeDirectorySnapshottable s = (INodeDirectorySnapshottable) d;
  if (s.getNumSnapshots() > 0) {
    throw new SnapshotException("The directory " + path + " has snapshot(s). "
        + "Please redo the operation after removing all the snapshots.");
  }

  if (s == fsdir.getRoot()) {
    s.setSnapshotQuota(0); 
  } else {
    s.replaceSelf(iip.getLatestSnapshotId(), fsdir.getINodeMap());
  }
  removeSnapshottable(s);
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:26,代码来源:SnapshotManager.java


注:本文中的org.apache.hadoop.hdfs.protocol.SnapshotException类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。