当前位置: 首页>>代码示例>>Java>>正文


Java READ3Response类代码示例

本文整理汇总了Java中org.apache.hadoop.nfs.nfs3.response.READ3Response的典型用法代码示例。如果您正苦于以下问题:Java READ3Response类的具体用法?Java READ3Response怎么用?Java READ3Response使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


READ3Response类属于org.apache.hadoop.nfs.nfs3.response包,在下文中一共展示了READ3Response类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRead

import org.apache.hadoop.nfs.nfs3.response.READ3Response; //导入依赖的package包/类
@Test(timeout = 60000)
public void testRead() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);

  READ3Request readReq = new READ3Request(handle, 0, 5);
  XDR xdr_req = new XDR();
  readReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
      response2.getStatus());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestRpcProgramNfs3.java

示例2: getFileContentsUsingNfs

import org.apache.hadoop.nfs.nfs3.response.READ3Response; //导入依赖的package包/类
private byte[] getFileContentsUsingNfs(String fileName, int len)
    throws Exception {
  final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
  final long dirId = status.getFileId();
  final FileHandle handle = new FileHandle(dirId);

  final READ3Request readReq = new READ3Request(handle, 0, len);
  final XDR xdr_req = new XDR();
  readReq.serialize(xdr_req);

  final READ3Response response = nfsd.read(xdr_req.asReadOnlyWrap(),
      securityHandler, new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code: ", Nfs3Status.NFS3_OK,
      response.getStatus());
  assertTrue("expected full read", response.isEof());
  return response.getData().array();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestRpcProgramNfs3.java

示例3: read

import org.apache.hadoop.nfs.nfs3.response.READ3Response; //导入依赖的package包/类
@Override
public READ3Response read(XDR xdr, RpcInfo info) {
  return read(xdr, getSecurityHandler(info), info.remoteAddress());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:RpcProgramNfs3.java

示例4: testOOOWrites

import org.apache.hadoop.nfs.nfs3.response.READ3Response; //导入依赖的package包/类
@Test
public void testOOOWrites() throws IOException, InterruptedException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;
  RpcProgramNfs3 nfsd;
  final int bufSize = 32;
  final int numOOO = 3;
  SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
  String currentUser = System.getProperty("user.name");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(currentUser),
      "*");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(currentUser),
      "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

    DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config);
    HdfsFileStatus status = dfsClient.getFileInfo("/");
    FileHandle rootHandle = new FileHandle(status.getFileId());

    CREATE3Request createReq = new CREATE3Request(rootHandle,
        "out-of-order-write" + System.currentTimeMillis(),
        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    XDR createXdr = new XDR();
    createReq.serialize(createXdr);
    CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    FileHandle handle = createRsp.getObjHandle();

    byte[][] oooBuf = new byte[numOOO][bufSize];
    for (int i = 0; i < numOOO; i++) {
      Arrays.fill(oooBuf[i], (byte) i);
    }

    for (int i = 0; i < numOOO; i++) {
      final long offset = (numOOO - 1 - i) * bufSize;
      WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
          WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));
    }

    waitWrite(nfsd, handle, 60000);
    READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
    XDR readXdr = new XDR();
    readReq.serialize(readXdr);
    READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", config.getInt(
            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
    assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:TestWrites.java

示例5: testOOOWrites

import org.apache.hadoop.nfs.nfs3.response.READ3Response; //导入依赖的package包/类
@Test
public void testOOOWrites() throws IOException, InterruptedException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;
  RpcProgramNfs3 nfsd;
  final int bufSize = 32;
  final int numOOO = 3;
  SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
  String currentUser = System.getProperty("user.name");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(currentUser),
      "*");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(currentUser),
      "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

    DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config),
        config);
    HdfsFileStatus status = dfsClient.getFileInfo("/");
    FileHandle rootHandle = new FileHandle(status.getFileId());

    CREATE3Request createReq = new CREATE3Request(rootHandle,
        "out-of-order-write" + System.currentTimeMillis(),
        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    XDR createXdr = new XDR();
    createReq.serialize(createXdr);
    CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    FileHandle handle = createRsp.getObjHandle();

    byte[][] oooBuf = new byte[numOOO][bufSize];
    for (int i = 0; i < numOOO; i++) {
      Arrays.fill(oooBuf[i], (byte) i);
    }

    for (int i = 0; i < numOOO; i++) {
      final long offset = (numOOO - 1 - i) * bufSize;
      WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
          WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));
    }

    waitWrite(nfsd, handle, 60000);
    READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
    XDR readXdr = new XDR();
    readReq.serialize(readXdr);
    READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", config.getInt(
            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
    assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:77,代码来源:TestWrites.java


注:本文中的org.apache.hadoop.nfs.nfs3.response.READ3Response类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。