当前位置: 首页>>代码示例>>Java>>正文


Java WRITE3Request类代码示例

本文整理汇总了Java中org.apache.hadoop.nfs.nfs3.request.WRITE3Request的典型用法代码示例。如果您正苦于以下问题:Java WRITE3Request类的具体用法?Java WRITE3Request怎么用?Java WRITE3Request使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


WRITE3Request类属于org.apache.hadoop.nfs.nfs3.request包,在下文中一共展示了WRITE3Request类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkRepeatedWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
private WriteCtx checkRepeatedWriteRequest(WRITE3Request request,
    Channel channel, int xid) {
  OffsetRange range = new OffsetRange(request.getOffset(),
      request.getOffset() + request.getCount());
  WriteCtx writeCtx = pendingWrites.get(range);
  if (writeCtx== null) {
    return null;
  } else {
    if (xid != writeCtx.getXid()) {
      LOG.warn("Got a repeated request, same range, with a different xid: "
          + xid + " xid in old request: " + writeCtx.getXid());
      //TODO: better handling.
    }
    return writeCtx;  
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:OpenFileCtx.java

示例2: alterWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
@VisibleForTesting
public static void alterWriteRequest(WRITE3Request request, long cachedOffset) {
  long offset = request.getOffset();
  int count = request.getCount();
  long smallerCount = offset + count - cachedOffset;
  if (LOG.isDebugEnabled()) {
    LOG.debug(String.format("Got overwrite with appended data (%d-%d),"
        + " current offset %d," + " drop the overlapped section (%d-%d)"
        + " and append new data (%d-%d).", offset, (offset + count - 1),
        cachedOffset, offset, (cachedOffset - 1), cachedOffset, (offset
            + count - 1)));
  }
  
  ByteBuffer data = request.getData();
  Preconditions.checkState(data.position() == 0,
      "The write request data has non-zero position");
  data.position((int) (cachedOffset - offset));
  Preconditions.checkState(data.limit() - data.position() == smallerCount,
      "The write request buffer has wrong limit/position regarding count");
  
  request.setOffset(cachedOffset);
  request.setCount((int) smallerCount);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:OpenFileCtx.java

示例3: createFileUsingNfs

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
private void createFileUsingNfs(String fileName, byte[] buffer)
    throws Exception {
  DFSTestUtil.createFile(hdfs, new Path(fileName), 0, (short) 1, 0);

  final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
  final long dirId = status.getFileId();
  final FileHandle handle = new FileHandle(dirId);

  final WRITE3Request writeReq = new WRITE3Request(handle, 0,
      buffer.length, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
  final XDR xdr_req = new XDR();
  writeReq.serialize(xdr_req);

  final WRITE3Response response = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandler,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect response: ", null, response);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestRpcProgramNfs3.java

示例4: alterWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
@VisibleForTesting
public static void alterWriteRequest(WRITE3Request request, long cachedOffset) {
  long offset = request.getOffset();
  int count = request.getCount();
  long smallerCount = offset + count - cachedOffset;
  if (LOG.isDebugEnabled()) {
    LOG.debug(String.format("Got overwrite with appended data [%d-%d),"
        + " current offset %d," + " drop the overlapped section [%d-%d)"
        + " and append new data [%d-%d).", offset, (offset + count),
        cachedOffset, offset, cachedOffset, cachedOffset, (offset
            + count)));
  }
  
  ByteBuffer data = request.getData();
  Preconditions.checkState(data.position() == 0,
      "The write request data has non-zero position");
  data.position((int) (cachedOffset - offset));
  Preconditions.checkState(data.limit() - data.position() == smallerCount,
      "The write request buffer has wrong limit/position regarding count");
  
  request.setOffset(cachedOffset);
  request.setCount((int) smallerCount);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:OpenFileCtx.java

示例5: checkRepeatedWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
private WriteCtx checkRepeatedWriteRequest(WRITE3Request request,
    Channel channel, int xid) {
  OffsetRange range = new OffsetRange(request.getOffset(),
      request.getOffset() + request.getCount());
  WriteCtx writeCtx = pendingWrites.get(range);
  if (writeCtx== null) {
    return null;
  } else {
    if (xid != writeCtx.getXid()) {
      LOG.warn("Got a repeated request, same range, with a different xid:"
          + xid + " xid in old request:" + writeCtx.getXid());
      //TODO: better handling.
    }
    return writeCtx;  
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:OpenFileCtx.java

示例6: write

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
    byte[] data) {
  XDR request = new XDR();
  RpcCall.write(request, xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
      Nfs3Constant.NFSPROC3_WRITE);

  // credentials
  request.writeInt(0); // auth null
  request.writeInt(0); // length zero
  // verifier
  request.writeInt(0); // auth null
  request.writeInt(0); // length zero
  WRITE3Request write1 = new WRITE3Request(handle, offset, count,
      WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
  write1.serialize(request);
  return request;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:TestOutOfOrderWrite.java

示例7: checkRepeatedWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
private WriteCtx checkRepeatedWriteRequest(WRITE3Request request,
    Channel channel, int xid) {
  OffsetRange range = new OffsetRange(request.getOffset(),
      request.getOffset() + request.getCount());
  WriteCtx writeCtx = pendingWrites.get(range);
  if (writeCtx == null) {
    return null;
  } else {
    if (xid != writeCtx.getXid()) {
      LOG.warn(
          "Got a repeated request, same range, with a different xid:" + xid +
              " xid in old request:" + writeCtx.getXid());
      //TODO: better handling.
    }
    return writeCtx;
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:18,代码来源:OpenFileCtx.java

示例8: alterWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
@VisibleForTesting
public static void alterWriteRequest(WRITE3Request request,
    long cachedOffset) {
  long offset = request.getOffset();
  int count = request.getCount();
  long smallerCount = offset + count - cachedOffset;
  if (LOG.isDebugEnabled()) {
    LOG.debug(String.format(
        "Got overwrite with appended data (%d-%d)," + " current offset %d," +
            " drop the overlapped section (%d-%d)" +
            " and append new data (%d-%d).", offset, (offset + count - 1),
        cachedOffset, offset, (cachedOffset - 1), cachedOffset,
        (offset + count - 1)));
  }
  
  ByteBuffer data = request.getData();
  Preconditions.checkState(data.position() == 0,
      "The write request data has non-zero position");
  data.position((int) (cachedOffset - offset));
  Preconditions.checkState(data.limit() - data.position() == smallerCount,
      "The write request buffer has wrong limit/position regarding count");
  
  request.setOffset(cachedOffset);
  request.setCount((int) smallerCount);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:OpenFileCtx.java

示例9: write

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
    byte[] data) {
  XDR request = new XDR();
  RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
      Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
      new VerifierNone()).write(request);

  WRITE3Request write1 = new WRITE3Request(handle, offset, count,
      WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
  write1.serialize(request);
  return request;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestOutOfOrderWrite.java

示例10: testWrite

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
@Test(timeout = 60000)
public void testWrite() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);

  byte[] buffer = new byte[10];
  for (int i = 0; i < 10; i++) {
    buffer[i] = (byte) i;
  }

  WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
      WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
  XDR xdr_req = new XDR();
  writeReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandler,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect response:", null, response2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestRpcProgramNfs3.java

示例11: write

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
    byte[] data) {
  XDR request = new XDR();
  RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
      Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
      new VerifierNone()).write(request);

  WRITE3Request write1 =
      new WRITE3Request(handle, offset, count, WriteStableHow.UNSTABLE,
          ByteBuffer.wrap(data));
  write1.serialize(request);
  return request;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:14,代码来源:TestOutOfOrderWrite.java

示例12: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdUserGroup iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The writes is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      checkDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (request.getStableHow() == WriteStableHow.UNSTABLE) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("UNSTABLE write request, send response for offset: "
              + writeCtx.getOffset());
        }
        WccData fileWcc = new WccData(preOpAttr, latestAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
            fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils
            .writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
                xid, new VerifierNone()), xid);
        writeCtx.setReplied(true);
      }
    }
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:40,代码来源:OpenFileCtx.java

示例13: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdMappingServiceProvider iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The write is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      waitForDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (stableHow != WriteStableHow.UNSTABLE) {
        LOG.info("Have to change stable write to unstable write: "
            + request.getStableHow());
        stableHow = WriteStableHow.UNSTABLE;
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug("UNSTABLE write request, send response for offset: "
            + writeCtx.getOffset());
      }
      WccData fileWcc = new WccData(preOpAttr, latestAttr);
      WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
          fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
      RpcProgramNfs3.metrics.addWrite(Nfs3Utils
          .getElapsedTime(writeCtx.startTime));
      Nfs3Utils
          .writeChannel(channel, response.serialize(new XDR(),
              xid, new VerifierNone()), xid);
      writeCtx.setReplied(true);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:OpenFileCtx.java

示例14: testOOOWrites

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
@Test
public void testOOOWrites() throws IOException, InterruptedException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;
  RpcProgramNfs3 nfsd;
  final int bufSize = 32;
  final int numOOO = 3;
  SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
  String currentUser = System.getProperty("user.name");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(currentUser),
      "*");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(currentUser),
      "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

    DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config);
    HdfsFileStatus status = dfsClient.getFileInfo("/");
    FileHandle rootHandle = new FileHandle(status.getFileId());

    CREATE3Request createReq = new CREATE3Request(rootHandle,
        "out-of-order-write" + System.currentTimeMillis(),
        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    XDR createXdr = new XDR();
    createReq.serialize(createXdr);
    CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    FileHandle handle = createRsp.getObjHandle();

    byte[][] oooBuf = new byte[numOOO][bufSize];
    for (int i = 0; i < numOOO; i++) {
      Arrays.fill(oooBuf[i], (byte) i);
    }

    for (int i = 0; i < numOOO; i++) {
      final long offset = (numOOO - 1 - i) * bufSize;
      WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
          WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));
    }

    waitWrite(nfsd, handle, 60000);
    READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
    XDR readXdr = new XDR();
    readReq.serialize(readXdr);
    READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", config.getInt(
            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
    assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:TestWrites.java

示例15: testOOOWrites

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入依赖的package包/类
@Test
public void testOOOWrites() throws IOException, InterruptedException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;
  RpcProgramNfs3 nfsd;
  final int bufSize = 32;
  final int numOOO = 3;
  SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
  String currentUser = System.getProperty("user.name");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(currentUser),
      "*");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(currentUser),
      "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

    DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config),
        config);
    HdfsFileStatus status = dfsClient.getFileInfo("/");
    FileHandle rootHandle = new FileHandle(status.getFileId());

    CREATE3Request createReq = new CREATE3Request(rootHandle,
        "out-of-order-write" + System.currentTimeMillis(),
        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    XDR createXdr = new XDR();
    createReq.serialize(createXdr);
    CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    FileHandle handle = createRsp.getObjHandle();

    byte[][] oooBuf = new byte[numOOO][bufSize];
    for (int i = 0; i < numOOO; i++) {
      Arrays.fill(oooBuf[i], (byte) i);
    }

    for (int i = 0; i < numOOO; i++) {
      final long offset = (numOOO - 1 - i) * bufSize;
      WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
          WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));
    }

    waitWrite(nfsd, handle, 60000);
    READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
    XDR readXdr = new XDR();
    readReq.serialize(readXdr);
    READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", config.getInt(
            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
    assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:77,代码来源:TestWrites.java


注:本文中的org.apache.hadoop.nfs.nfs3.request.WRITE3Request类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。