当前位置: 首页>>代码示例>>Java>>正文


Java WRITE3Request.serialize方法代码示例

本文整理汇总了Java中org.apache.hadoop.nfs.nfs3.request.WRITE3Request.serialize方法的典型用法代码示例。如果您正苦于以下问题:Java WRITE3Request.serialize方法的具体用法?Java WRITE3Request.serialize怎么用?Java WRITE3Request.serialize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.nfs.nfs3.request.WRITE3Request的用法示例。


在下文中一共展示了WRITE3Request.serialize方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createFileUsingNfs

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
private void createFileUsingNfs(String fileName, byte[] buffer)
    throws Exception {
  DFSTestUtil.createFile(hdfs, new Path(fileName), 0, (short) 1, 0);

  final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
  final long dirId = status.getFileId();
  final FileHandle handle = new FileHandle(dirId);

  final WRITE3Request writeReq = new WRITE3Request(handle, 0,
      buffer.length, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
  final XDR xdr_req = new XDR();
  writeReq.serialize(xdr_req);

  final WRITE3Response response = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandler,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect response: ", null, response);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestRpcProgramNfs3.java

示例2: write

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
    byte[] data) {
  XDR request = new XDR();
  RpcCall.write(request, xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
      Nfs3Constant.NFSPROC3_WRITE);

  // credentials
  request.writeInt(0); // auth null
  request.writeInt(0); // length zero
  // verifier
  request.writeInt(0); // auth null
  request.writeInt(0); // length zero
  WRITE3Request write1 = new WRITE3Request(handle, offset, count,
      WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
  write1.serialize(request);
  return request;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:TestOutOfOrderWrite.java

示例3: write

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
    byte[] data) {
  XDR request = new XDR();
  RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
      Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
      new VerifierNone()).write(request);

  WRITE3Request write1 = new WRITE3Request(handle, offset, count,
      WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
  write1.serialize(request);
  return request;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestOutOfOrderWrite.java

示例4: testWrite

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testWrite() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);

  byte[] buffer = new byte[10];
  for (int i = 0; i < 10; i++) {
    buffer[i] = (byte) i;
  }

  WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
      WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
  XDR xdr_req = new XDR();
  writeReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandler,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect response:", null, response2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestRpcProgramNfs3.java

示例5: write

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
    byte[] data) {
  XDR request = new XDR();
  RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
      Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
      new VerifierNone()).write(request);

  WRITE3Request write1 =
      new WRITE3Request(handle, offset, count, WriteStableHow.UNSTABLE,
          ByteBuffer.wrap(data));
  write1.serialize(request);
  return request;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:14,代码来源:TestOutOfOrderWrite.java

示例6: testOOOWrites

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
@Test
public void testOOOWrites() throws IOException, InterruptedException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;
  RpcProgramNfs3 nfsd;
  final int bufSize = 32;
  final int numOOO = 3;
  SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
  String currentUser = System.getProperty("user.name");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(currentUser),
      "*");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(currentUser),
      "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

    DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config);
    HdfsFileStatus status = dfsClient.getFileInfo("/");
    FileHandle rootHandle = new FileHandle(status.getFileId());

    CREATE3Request createReq = new CREATE3Request(rootHandle,
        "out-of-order-write" + System.currentTimeMillis(),
        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    XDR createXdr = new XDR();
    createReq.serialize(createXdr);
    CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    FileHandle handle = createRsp.getObjHandle();

    byte[][] oooBuf = new byte[numOOO][bufSize];
    for (int i = 0; i < numOOO; i++) {
      Arrays.fill(oooBuf[i], (byte) i);
    }

    for (int i = 0; i < numOOO; i++) {
      final long offset = (numOOO - 1 - i) * bufSize;
      WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
          WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));
    }

    waitWrite(nfsd, handle, 60000);
    READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
    XDR readXdr = new XDR();
    readReq.serialize(readXdr);
    READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", config.getInt(
            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
    assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:TestWrites.java

示例7: testOOOWrites

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
@Test
public void testOOOWrites() throws IOException, InterruptedException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;
  RpcProgramNfs3 nfsd;
  final int bufSize = 32;
  final int numOOO = 3;
  SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
  String currentUser = System.getProperty("user.name");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(currentUser),
      "*");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(currentUser),
      "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

    DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config),
        config);
    HdfsFileStatus status = dfsClient.getFileInfo("/");
    FileHandle rootHandle = new FileHandle(status.getFileId());

    CREATE3Request createReq = new CREATE3Request(rootHandle,
        "out-of-order-write" + System.currentTimeMillis(),
        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    XDR createXdr = new XDR();
    createReq.serialize(createXdr);
    CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    FileHandle handle = createRsp.getObjHandle();

    byte[][] oooBuf = new byte[numOOO][bufSize];
    for (int i = 0; i < numOOO; i++) {
      Arrays.fill(oooBuf[i], (byte) i);
    }

    for (int i = 0; i < numOOO; i++) {
      final long offset = (numOOO - 1 - i) * bufSize;
      WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
          WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));
    }

    waitWrite(nfsd, handle, 60000);
    READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
    XDR readXdr = new XDR();
    readReq.serialize(readXdr);
    READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", config.getInt(
            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
    assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:77,代码来源:TestWrites.java


注:本文中的org.apache.hadoop.nfs.nfs3.request.WRITE3Request.serialize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。