当前位置: 首页>>代码示例>>Java>>正文


Java HdfsFileStatus类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.HdfsFileStatus的典型用法代码示例。如果您正苦于以下问题:Java HdfsFileStatus类的具体用法?Java HdfsFileStatus怎么用?Java HdfsFileStatus使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HdfsFileStatus类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了HdfsFileStatus类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getNfs3FileAttrFromFileStatus

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus(
    HdfsFileStatus fs, IdMappingServiceProvider iug) {
  /**
   * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit
   * client takes only the lower 32bit of the fileId and treats it as signed
   * int. When the 32th bit is 1, the client considers it invalid.
   */
  NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
  fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
  int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
  long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
      .getChildrenNum()) : fs.getLen();
  return new Nfs3FileAttributes(fileType, nlink,
      fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()),
      iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */,
      fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(),
      new Nfs3FileAttributes.Specdata3());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:Nfs3Utils.java

示例2: testPolicyPersistenceInFsImage

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test
public void testPolicyPersistenceInFsImage() throws IOException {
  startUpCluster(false, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, 0, true);
  // checkpoint
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
  cluster.restartNameNode(true);

  // Stat the file and check that the lazyPersist flag is returned back.
  HdfsFileStatus status = client.getFileInfo(path.toString());
  assertThat(status.getStoragePolicy(), is(LAZY_PERSIST_POLICY_ID));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestLazyPersistFiles.java

示例3: testGetattr

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testGetattr() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  GETATTR3Request req = new GETATTR3Request(handle);
  req.serialize(xdr_req);
  
  // Attempt by an unpriviledged user should fail.
  GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  GETATTR3Response response2 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestRpcProgramNfs3.java

示例4: testSetattr

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testSetattr() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null,
      EnumSet.of(SetAttrField.UID));
  SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null);
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestRpcProgramNfs3.java

示例5: testLookup

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testLookup() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar");
  XDR xdr_req = new XDR();
  lookupReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestRpcProgramNfs3.java

示例6: testRead

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testRead() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);

  READ3Request readReq = new READ3Request(handle, 0, 5);
  XDR xdr_req = new XDR();
  readReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestRpcProgramNfs3.java

示例7: getFileContentsUsingNfs

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
private byte[] getFileContentsUsingNfs(String fileName, int len)
    throws Exception {
  final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
  final long dirId = status.getFileId();
  final FileHandle handle = new FileHandle(dirId);

  final READ3Request readReq = new READ3Request(handle, 0, len);
  final XDR xdr_req = new XDR();
  readReq.serialize(xdr_req);

  final READ3Response response = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code: ", Nfs3Status.NFS3_OK,
      response.getStatus());
  assertTrue("expected full read", response.isEof());
  return response.getData().array();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestRpcProgramNfs3.java

示例8: testMkdir

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testMkdir() throws Exception {//FixME
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3());
  req.serialize(xdr_req);
  
  // Attempt to mkdir by an unprivileged user should fail.
  MKDIR3Response response1 = nfsd.mkdir(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  XDR xdr_req2 = new XDR();
  MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3());
  req2.serialize(xdr_req2);
  
  // Attempt to mkdir by a privileged user should pass.
  MKDIR3Response response2 = nfsd.mkdir(xdr_req2.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestRpcProgramNfs3.java

示例9: testSymlink

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testSymlink() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
      "bar");
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestRpcProgramNfs3.java

示例10: testRmdir

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testRmdir() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  XDR xdr_req = new XDR();
  FileHandle handle = new FileHandle(dirId);
  RMDIR3Request req = new RMDIR3Request(handle, "foo");
  req.serialize(xdr_req);

  // Attempt by an unprivileged user should fail.
  RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestRpcProgramNfs3.java

示例11: append

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:ClientNamenodeProtocolTranslatorPB.java

示例12: testReaddir

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testReaddir() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  READDIR3Request req = new READDIR3Request(handle, 0, 0, 100);
  req.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  READDIR3Response response2 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestRpcProgramNfs3.java

示例13: testReaddirplus

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testReaddirplus() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
  req.serialize(xdr_req);
  
  // Attempt by an unprivileged user should fail.
  READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a privileged user should pass.
  READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestRpcProgramNfs3.java

示例14: testFsstat

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testFsstat() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  FSSTAT3Request req = new FSSTAT3Request(handle);
  req.serialize(xdr_req);
  
  // Attempt by an unpriviledged user should fail.
  FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  FSSTAT3Response response2 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestRpcProgramNfs3.java

示例15: testFsinfo

import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; //导入依赖的package包/类
@Test(timeout = 60000)
public void testFsinfo() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);
  XDR xdr_req = new XDR();
  FSINFO3Request req = new FSINFO3Request(handle);
  req.serialize(xdr_req);
  
  // Attempt by an unpriviledged user should fail.
  FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  FSINFO3Response response2 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestRpcProgramNfs3.java


注:本文中的org.apache.hadoop.hdfs.protocol.HdfsFileStatus类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。