当前位置: 首页>>代码示例>>Java>>正文


Java WRITE3Response类代码示例

本文整理汇总了Java中org.apache.hadoop.nfs.nfs3.response.WRITE3Response的典型用法代码示例。如果您正苦于以下问题:Java WRITE3Response类的具体用法?Java WRITE3Response怎么用?Java WRITE3Response使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


WRITE3Response类属于org.apache.hadoop.nfs.nfs3.response包,在下文中一共展示了WRITE3Response类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createFileUsingNfs

import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; //导入依赖的package包/类
private void createFileUsingNfs(String fileName, byte[] buffer)
    throws Exception {
  DFSTestUtil.createFile(hdfs, new Path(fileName), 0, (short) 1, 0);

  final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName);
  final long dirId = status.getFileId();
  final FileHandle handle = new FileHandle(dirId);

  final WRITE3Request writeReq = new WRITE3Request(handle, 0,
      buffer.length, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
  final XDR xdr_req = new XDR();
  writeReq.serialize(xdr_req);

  final WRITE3Response response = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandler,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect response: ", null, response);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestRpcProgramNfs3.java

示例2: write

import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; //导入依赖的package包/类
@Override
public WRITE3Response write(XDR xdr, RpcInfo info) {
  SecurityHandler securityHandler = getSecurityHandler(info);
  RpcCall rpcCall = (RpcCall) info.header();
  int xid = rpcCall.getXid();
  SocketAddress remoteAddress = info.remoteAddress();
  return write(xdr, info.channel(), xid, securityHandler, remoteAddress);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:RpcProgramNfs3.java

示例3: testWrite

import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; //导入依赖的package包/类
@Test(timeout = 60000)
public void testWrite() throws Exception {
  HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
  long dirId = status.getFileId();
  FileHandle handle = new FileHandle(dirId);

  byte[] buffer = new byte[10];
  for (int i = 0; i < 10; i++) {
    buffer[i] = (byte) i;
  }

  WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
      WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
  XDR xdr_req = new XDR();
  writeReq.serialize(xdr_req);

  // Attempt by an unpriviledged user should fail.
  WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandlerUnpriviledged,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
      response1.getStatus());

  // Attempt by a priviledged user should pass.
  WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(),
      null, 1, securityHandler,
      new InetSocketAddress("localhost", 1234));
  assertEquals("Incorrect response:", null, response2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestRpcProgramNfs3.java

示例4: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; //导入依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdUserGroup iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The writes is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      checkDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (request.getStableHow() == WriteStableHow.UNSTABLE) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("UNSTABLE write request, send response for offset: "
              + writeCtx.getOffset());
        }
        WccData fileWcc = new WccData(preOpAttr, latestAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
            fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils
            .writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
                xid, new VerifierNone()), xid);
        writeCtx.setReplied(true);
      }
    }
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:40,代码来源:OpenFileCtx.java

示例5: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; //导入依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdMappingServiceProvider iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The write is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      waitForDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (stableHow != WriteStableHow.UNSTABLE) {
        LOG.info("Have to change stable write to unstable write: "
            + request.getStableHow());
        stableHow = WriteStableHow.UNSTABLE;
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug("UNSTABLE write request, send response for offset: "
            + writeCtx.getOffset());
      }
      WccData fileWcc = new WccData(preOpAttr, latestAttr);
      WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
          fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
      RpcProgramNfs3.metrics.addWrite(Nfs3Utils
          .getElapsedTime(writeCtx.startTime));
      Nfs3Utils
          .writeChannel(channel, response.serialize(new XDR(),
              xid, new VerifierNone()), xid);
      writeCtx.setReplied(true);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:OpenFileCtx.java

示例6: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; //导入依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdUserGroup iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The writes is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      checkDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (stableHow != WriteStableHow.UNSTABLE) {
        LOG.info("Have to change stable write to unstable write:" +
            request.getStableHow());
        stableHow = WriteStableHow.UNSTABLE;
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug("UNSTABLE write request, send response for offset: " +
            writeCtx.getOffset());
      }
      WccData fileWcc = new WccData(preOpAttr, latestAttr);
      WRITE3Response response =
          new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow,
              Nfs3Constant.WRITE_COMMIT_VERF);
      Nfs3Utils.writeChannel(channel,
          response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
          xid);
      writeCtx.setReplied(true);
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:45,代码来源:OpenFileCtx.java

示例7: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; //导入依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdUserGroup iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The writes is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      checkDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (stableHow != WriteStableHow.UNSTABLE) {
        LOG.info("Have to change stable write to unstable write:"
            + request.getStableHow());
        stableHow = WriteStableHow.UNSTABLE;
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug("UNSTABLE write request, send response for offset: "
            + writeCtx.getOffset());
      }
      WccData fileWcc = new WccData(preOpAttr, latestAttr);
      WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
          fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
      Nfs3Utils
          .writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
              xid, new VerifierNone()), xid);
      writeCtx.setReplied(true);
    }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:44,代码来源:OpenFileCtx.java


注:本文中的org.apache.hadoop.nfs.nfs3.response.WRITE3Response类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。