当前位置: 首页>>代码示例>>Java>>正文


Java UnresolvedLinkException类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.UnresolvedLinkException的典型用法代码示例。如果您正苦于以下问题:Java UnresolvedLinkException类的具体用法?Java UnresolvedLinkException怎么用?Java UnresolvedLinkException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


UnresolvedLinkException类属于org.apache.hadoop.fs包,在下文中一共展示了UnresolvedLinkException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createAFileWithCorruptedBlockReplicas

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
/**
 * Create a file with one block and corrupt some/all of the block replicas.
 */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
    int corruptBlockCount) throws IOException, AccessControlException,
    FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
  DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
  DFSTestUtil.waitReplication(dfs, filePath, repl);
  // Locate the file blocks by asking name node
  final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
  Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
  // The file only has one block
  LocatedBlock lblock = locatedblocks.get(0);
  DatanodeInfo[] datanodeinfos = lblock.getLocations();
  ExtendedBlock block = lblock.getBlock();
  // corrupt some /all of the block replicas
  for (int i = 0; i < corruptBlockCount; i++) {
    DatanodeInfo dninfo = datanodeinfos[i];
    final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
    corruptBlock(block, dn);
    LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
        + dninfo);

  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestClientReportBadBlock.java

示例2: mkdirs

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public boolean mkdirs(String src, FsPermission masked, boolean createParent)
    throws AccessControlException, FileAlreadyExistsException,
    FileNotFoundException, NSQuotaExceededException,
    ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
    IOException {
  MkdirsRequestProto req = MkdirsRequestProto.newBuilder()
      .setSrc(src)
      .setMasked(PBHelper.convert(masked))
      .setCreateParent(createParent).build();

  try {
    return rpcProxy.mkdirs(null, req).getResult();
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:ClientNamenodeProtocolTranslatorPB.java

示例3: getAdditionalDatanode

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public LocatedBlock getAdditionalDatanode(String src, long fileId,
    ExtendedBlock blk, DatanodeInfo[] existings, String[] existingStorageIDs,
    DatanodeInfo[] excludes,
    int numAdditionalNodes, String clientName) throws AccessControlException,
    FileNotFoundException, SafeModeException, UnresolvedLinkException,
    IOException {
  GetAdditionalDatanodeRequestProto req = GetAdditionalDatanodeRequestProto
      .newBuilder()
      .setSrc(src)
      .setFileId(fileId)
      .setBlk(PBHelper.convert(blk))
      .addAllExistings(PBHelper.convert(existings))
      .addAllExistingStorageUuids(Arrays.asList(existingStorageIDs))
      .addAllExcludes(PBHelper.convert(excludes))
      .setNumAdditionalNodes(numAdditionalNodes)
      .setClientName(clientName)
      .build();
  try {
    return PBHelper.convert(rpcProxy.getAdditionalDatanode(null, req)
        .getBlock());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:ClientNamenodeProtocolTranslatorPB.java

示例4: createInternal

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public FSDataOutputStream createInternal(final Path f,
    final EnumSet<CreateFlag> flag, final FsPermission absolutePermission,
    final int bufferSize, final short replication, final long blockSize,
    final Progressable progress, final ChecksumOpt checksumOpt,
    final boolean createParent) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException,
    UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res;
  try {
    res = fsState.resolve(getUriPath(f), false);
  } catch (FileNotFoundException e) {
    if (createParent) {
      throw readOnlyMountTable("create", f);
    } else {
      throw e;
    }
  }
  assert(res.remainingPath != null);
  return res.targetFileSystem.createInternal(res.remainingPath, flag,
      absolutePermission, bufferSize, replication,
      blockSize, progress, checksumOpt,
      createParent);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:26,代码来源:ViewFs.java

示例5: listStatus

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public FileStatus[] listStatus(final Path f) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res =
    fsState.resolve(getUriPath(f), true);
  
  FileStatus[] statusLst = res.targetFileSystem.listStatus(res.remainingPath);
  if (!res.isInternalDir()) {
    // We need to change the name in the FileStatus as described in
    // {@link #getFileStatus }
    ChRootedFs targetFs;
    targetFs = (ChRootedFs) res.targetFileSystem;
    int i = 0;
    for (FileStatus status : statusLst) {
        String suffix = targetFs.stripOutRoot(status.getPath());
        statusLst[i++] = new ViewFsFileStatus(status, this.makeQualified(
            suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix)));
    }
  }
  return statusLst;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:22,代码来源:ViewFs.java

示例6: createSymlink

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public void createSymlink(final Path target, final Path link,
    final boolean createParent) throws IOException, UnresolvedLinkException {
  InodeTree.ResolveResult<AbstractFileSystem> res;
  try {
    res = fsState.resolve(getUriPath(link), false);
  } catch (FileNotFoundException e) {
    if (createParent) {
      throw readOnlyMountTable("createSymlink", link);
    } else {
      throw e;
    }
  }
  assert(res.remainingPath != null);
  res.targetFileSystem.createSymlink(target, res.remainingPath,
      createParent);  
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:18,代码来源:ViewFs.java

示例7: testGetFileChecksum

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Test
public void testGetFileChecksum() throws AccessControlException
  , UnresolvedLinkException, IOException {
  AbstractFileSystem mockAFS = Mockito.mock(AbstractFileSystem.class);
  InodeTree.ResolveResult<AbstractFileSystem> res =
    new InodeTree.ResolveResult<AbstractFileSystem>(null, mockAFS , null,
      new Path("someFile"));
  @SuppressWarnings("unchecked")
  InodeTree<AbstractFileSystem> fsState = Mockito.mock(InodeTree.class);
  Mockito.when(fsState.resolve(Mockito.anyString()
    , Mockito.anyBoolean())).thenReturn(res);
  ViewFs vfs = Mockito.mock(ViewFs.class);
  vfs.fsState = fsState;

  Mockito.when(vfs.getFileChecksum(new Path("/tmp/someFile")))
    .thenCallRealMethod();
  vfs.getFileChecksum(new Path("/tmp/someFile"));

  Mockito.verify(mockAFS).getFileChecksum(new Path("someFile"));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:ViewFsBaseTest.java

示例8: complete

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public boolean complete(String src, String clientName,
                        ExtendedBlock last, long fileId)
    throws AccessControlException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  CompleteRequestProto.Builder req = CompleteRequestProto.newBuilder()
      .setSrc(src)
      .setClientName(clientName)
      .setFileId(fileId);
  if (last != null)
    req.setLast(PBHelper.convert(last));
  try {
    return rpcProxy.complete(null, req.build()).getResult();
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:ClientNamenodeProtocolTranslatorPB.java

示例9: getFileStatus

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public FileStatus getFileStatus(final Path f) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res = 
    fsState.resolve(getUriPath(f), true);

  //  FileStatus#getPath is a fully qualified path relative to the root of 
  // target file system.
  // We need to change it to viewfs URI - relative to root of mount table.
  
  // The implementors of RawLocalFileSystem were trying to be very smart.
  // They implement FileStatus#getOwener lazily -- the object
  // returned is really a RawLocalFileSystem that expect the
  // FileStatus#getPath to be unchanged so that it can get owner when needed.
  // Hence we need to interpose a new ViewFsFileStatus that works around.
  
  
  FileStatus status =  res.targetFileSystem.getFileStatus(res.remainingPath);
  return new ViewFsFileStatus(status, this.makeQualified(f));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:ViewFs.java

示例10: primitiveCreate

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
/**
 * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
 *  Progressable, int, ChecksumOpt)} except that the permission
 *  is absolute (ie has already been masked with umask.
 */
public DFSOutputStream primitiveCreate(String src, 
                           FsPermission absPermission,
                           EnumSet<CreateFlag> flag,
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt)
    throws IOException, UnresolvedLinkException {
  checkOpen();
  CreateFlag.validate(flag);
  DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
  if (result == null) {
    DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
    result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
        flag, createParent, replication, blockSize, progress, buffersize,
        checksum, null);
  }
  beginFileLease(result.getFileId(), result);
  return result;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:DFSClient.java

示例11: addBlock

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
    String[] favoredNodes)
    throws AccessControlException, FileNotFoundException,
    NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
    IOException {
  AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
      .setSrc(src).setClientName(clientName).setFileId(fileId);
  if (previous != null) 
    req.setPrevious(PBHelper.convert(previous)); 
  if (excludeNodes != null) 
    req.addAllExcludeNodes(PBHelper.convert(excludeNodes));
  if (favoredNodes != null) {
    req.addAllFavoredNodes(Arrays.asList(favoredNodes));
  }
  try {
    return PBHelper.convert(rpcProxy.addBlock(null, req.build()).getBlock());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:ClientNamenodeProtocolTranslatorPB.java

示例12: unprotectedSetOwner

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
static void unprotectedSetOwner(
    FSDirectory fsd, String src, String username, String groupname)
    throws FileNotFoundException, UnresolvedLinkException,
    QuotaExceededException, SnapshotAccessControlException {
  assert fsd.hasWriteLock();
  final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true);
  INode inode = inodesInPath.getLastINode();
  if (inode == null) {
    throw new FileNotFoundException("File does not exist: " + src);
  }
  if (username != null) {
    inode = inode.setUser(username, inodesInPath.getLatestSnapshotId());
  }
  if (groupname != null) {
    inode.setGroup(groupname, inodesInPath.getLatestSnapshotId());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:FSDirAttrOp.java

示例13: getListing

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public DirectoryListing getListing(String src, byte[] startAfter,
    boolean needLocation) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  GetListingRequestProto req = GetListingRequestProto.newBuilder()
      .setSrc(src)
      .setStartAfter(ByteString.copyFrom(startAfter))
      .setNeedLocation(needLocation).build();
  try {
    GetListingResponseProto result = rpcProxy.getListing(null, req);
    
    if (result.hasDirList()) {
      return PBHelper.convert(result.getDirList());
    }
    return null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:ClientNamenodeProtocolTranslatorPB.java

示例14: delete

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public boolean delete(String src, boolean recursive)
    throws AccessControlException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  DeleteRequestProto req = DeleteRequestProto.newBuilder().setSrc(src).setRecursive(recursive).build();
  try {
    return rpcProxy.delete(null, req).getResult();
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:ClientNamenodeProtocolTranslatorPB.java

示例15: renameInternal

import org.apache.hadoop.fs.UnresolvedLinkException; //导入依赖的package包/类
@Override
public void renameInternal(final Path src, final Path dst)
  throws IOException, UnresolvedLinkException {
  // note fullPath will check that paths are relative to this FileSystem.
  // Hence both are in same file system and a rename is valid
  myFs.renameInternal(fullPath(src), fullPath(dst));
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:8,代码来源:ChRootedFs.java


注:本文中的org.apache.hadoop.fs.UnresolvedLinkException类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。