当前位置: 首页>>代码示例>>Java>>正文


Java WRITE3Request.getCount方法代码示例

本文整理汇总了Java中org.apache.hadoop.nfs.nfs3.request.WRITE3Request.getCount方法的典型用法代码示例。如果您正苦于以下问题:Java WRITE3Request.getCount方法的具体用法?Java WRITE3Request.getCount怎么用?Java WRITE3Request.getCount使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.nfs.nfs3.request.WRITE3Request的用法示例。


在下文中一共展示了WRITE3Request.getCount方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkRepeatedWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
private WriteCtx checkRepeatedWriteRequest(WRITE3Request request,
    Channel channel, int xid) {
  OffsetRange range = new OffsetRange(request.getOffset(),
      request.getOffset() + request.getCount());
  WriteCtx writeCtx = pendingWrites.get(range);
  if (writeCtx== null) {
    return null;
  } else {
    if (xid != writeCtx.getXid()) {
      LOG.warn("Got a repeated request, same range, with a different xid: "
          + xid + " xid in old request: " + writeCtx.getXid());
      //TODO: better handling.
    }
    return writeCtx;  
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:OpenFileCtx.java

示例2: alterWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
@VisibleForTesting
public static void alterWriteRequest(WRITE3Request request, long cachedOffset) {
  long offset = request.getOffset();
  int count = request.getCount();
  long smallerCount = offset + count - cachedOffset;
  if (LOG.isDebugEnabled()) {
    LOG.debug(String.format("Got overwrite with appended data (%d-%d),"
        + " current offset %d," + " drop the overlapped section (%d-%d)"
        + " and append new data (%d-%d).", offset, (offset + count - 1),
        cachedOffset, offset, (cachedOffset - 1), cachedOffset, (offset
            + count - 1)));
  }
  
  ByteBuffer data = request.getData();
  Preconditions.checkState(data.position() == 0,
      "The write request data has non-zero position");
  data.position((int) (cachedOffset - offset));
  Preconditions.checkState(data.limit() - data.position() == smallerCount,
      "The write request buffer has wrong limit/position regarding count");
  
  request.setOffset(cachedOffset);
  request.setCount((int) smallerCount);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:OpenFileCtx.java

示例3: alterWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
@VisibleForTesting
public static void alterWriteRequest(WRITE3Request request, long cachedOffset) {
  long offset = request.getOffset();
  int count = request.getCount();
  long smallerCount = offset + count - cachedOffset;
  if (LOG.isDebugEnabled()) {
    LOG.debug(String.format("Got overwrite with appended data [%d-%d),"
        + " current offset %d," + " drop the overlapped section [%d-%d)"
        + " and append new data [%d-%d).", offset, (offset + count),
        cachedOffset, offset, cachedOffset, cachedOffset, (offset
            + count)));
  }
  
  ByteBuffer data = request.getData();
  Preconditions.checkState(data.position() == 0,
      "The write request data has non-zero position");
  data.position((int) (cachedOffset - offset));
  Preconditions.checkState(data.limit() - data.position() == smallerCount,
      "The write request buffer has wrong limit/position regarding count");
  
  request.setOffset(cachedOffset);
  request.setCount((int) smallerCount);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:OpenFileCtx.java

示例4: checkRepeatedWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
private WriteCtx checkRepeatedWriteRequest(WRITE3Request request,
    Channel channel, int xid) {
  OffsetRange range = new OffsetRange(request.getOffset(),
      request.getOffset() + request.getCount());
  WriteCtx writeCtx = pendingWrites.get(range);
  if (writeCtx== null) {
    return null;
  } else {
    if (xid != writeCtx.getXid()) {
      LOG.warn("Got a repeated request, same range, with a different xid:"
          + xid + " xid in old request:" + writeCtx.getXid());
      //TODO: better handling.
    }
    return writeCtx;  
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:17,代码来源:OpenFileCtx.java

示例5: checkRepeatedWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
private WriteCtx checkRepeatedWriteRequest(WRITE3Request request,
    Channel channel, int xid) {
  OffsetRange range = new OffsetRange(request.getOffset(),
      request.getOffset() + request.getCount());
  WriteCtx writeCtx = pendingWrites.get(range);
  if (writeCtx == null) {
    return null;
  } else {
    if (xid != writeCtx.getXid()) {
      LOG.warn(
          "Got a repeated request, same range, with a different xid:" + xid +
              " xid in old request:" + writeCtx.getXid());
      //TODO: better handling.
    }
    return writeCtx;
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:18,代码来源:OpenFileCtx.java

示例6: alterWriteRequest

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
@VisibleForTesting
public static void alterWriteRequest(WRITE3Request request,
    long cachedOffset) {
  long offset = request.getOffset();
  int count = request.getCount();
  long smallerCount = offset + count - cachedOffset;
  if (LOG.isDebugEnabled()) {
    LOG.debug(String.format(
        "Got overwrite with appended data (%d-%d)," + " current offset %d," +
            " drop the overlapped section (%d-%d)" +
            " and append new data (%d-%d).", offset, (offset + count - 1),
        cachedOffset, offset, (cachedOffset - 1), cachedOffset,
        (offset + count - 1)));
  }
  
  ByteBuffer data = request.getData();
  Preconditions.checkState(data.position() == 0,
      "The write request data has non-zero position");
  data.position((int) (cachedOffset - offset));
  Preconditions.checkState(data.limit() - data.position() == smallerCount,
      "The write request buffer has wrong limit/position regarding count");
  
  request.setOffset(cachedOffset);
  request.setCount((int) smallerCount);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:26,代码来源:OpenFileCtx.java

示例7: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdUserGroup iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The writes is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      checkDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (request.getStableHow() == WriteStableHow.UNSTABLE) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("UNSTABLE write request, send response for offset: "
              + writeCtx.getOffset());
        }
        WccData fileWcc = new WccData(preOpAttr, latestAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
            fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils
            .writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
                xid, new VerifierNone()), xid);
        writeCtx.setReplied(true);
      }
    }
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:40,代码来源:OpenFileCtx.java

示例8: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdMappingServiceProvider iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The write is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      waitForDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (stableHow != WriteStableHow.UNSTABLE) {
        LOG.info("Have to change stable write to unstable write: "
            + request.getStableHow());
        stableHow = WriteStableHow.UNSTABLE;
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug("UNSTABLE write request, send response for offset: "
            + writeCtx.getOffset());
      }
      WccData fileWcc = new WccData(preOpAttr, latestAttr);
      WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
          fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
      RpcProgramNfs3.metrics.addWrite(Nfs3Utils
          .getElapsedTime(writeCtx.startTime));
      Nfs3Utils
          .writeChannel(channel, response.serialize(new XDR(),
              xid, new VerifierNone()), xid);
      writeCtx.setReplied(true);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:OpenFileCtx.java

示例9: addWritesToCache

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
/**
 * Creates and adds a WriteCtx into the pendingWrites map. This is a
 * synchronized method to handle concurrent writes.
 *
 * @return A non-null {@link WriteCtx} instance if the incoming write
 * request's offset >= nextOffset. Otherwise null.
 */
private synchronized WriteCtx addWritesToCache(WRITE3Request request,
    Channel channel, int xid) {
  long offset = request.getOffset();
  int count = request.getCount();
  long cachedOffset = nextOffset.get();
  int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
  
  if (LOG.isDebugEnabled()) {
    LOG.debug(
        "requesed offset=" + offset + " and current offset=" + cachedOffset);
  }

  // Handle a special case first
  if ((offset < cachedOffset) && (offset + count > cachedOffset)) {
    // One Linux client behavior: after a file is closed and reopened to
    // write, the client sometimes combines previous written data(could still
    // be in kernel buffer) with newly appended data in one write. This is
    // usually the first write after file reopened. In this
    // case, we log the event and drop the overlapped section.
    LOG.warn(String.format(
        "Got overwrite with appended data (%d-%d)," + " current offset %d," +
            " drop the overlapped section (%d-%d)" +
            " and append new data (%d-%d).", offset, (offset + count - 1),
        cachedOffset, offset, (cachedOffset - 1), cachedOffset,
        (offset + count - 1)));

    if (!pendingWrites.isEmpty()) {
      LOG.warn("There are other pending writes, fail this jumbo write");
      return null;
    }
    
    LOG.warn("Modify this write to write only the appended data");
    alterWriteRequest(request, cachedOffset);

    // Update local variable
    originalCount = count;
    offset = request.getOffset();
    count = request.getCount();
  }
  
  // Fail non-append call
  if (offset < cachedOffset) {
    LOG.warn("(offset,count,nextOffset):" + "(" + offset + "," + count + "," +
        nextOffset + ")");
    return null;
  } else {
    DataState dataState =
        offset == cachedOffset ? WriteCtx.DataState.NO_DUMP :
            WriteCtx.DataState.ALLOW_DUMP;
    WriteCtx writeCtx = new WriteCtx(request.getHandle(), request.getOffset(),
        request.getCount(), originalCount, request.getStableHow(),
        request.getData(), channel, xid, false, dataState);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Add new write to the list with nextOffset " + cachedOffset +
          " and requesed offset=" + offset);
    }
    if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
      // update the memory size
      updateNonSequentialWriteInMemory(count);
    }
    // check if there is a WriteCtx with the same range in pendingWrites
    WriteCtx oldWriteCtx = checkRepeatedWriteRequest(request, channel, xid);
    if (oldWriteCtx == null) {
      addWrite(writeCtx);
    } else {
      LOG.warn("Got a repeated request, same range, with xid:" +
          writeCtx.getXid());
    }
    return writeCtx;
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:79,代码来源:OpenFileCtx.java

示例10: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdUserGroup iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The writes is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      checkDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (stableHow != WriteStableHow.UNSTABLE) {
        LOG.info("Have to change stable write to unstable write:" +
            request.getStableHow());
        stableHow = WriteStableHow.UNSTABLE;
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug("UNSTABLE write request, send response for offset: " +
            writeCtx.getOffset());
      }
      WccData fileWcc = new WccData(preOpAttr, latestAttr);
      WRITE3Response response =
          new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow,
              Nfs3Constant.WRITE_COMMIT_VERF);
      Nfs3Utils.writeChannel(channel,
          response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
          xid);
      writeCtx.setReplied(true);
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:45,代码来源:OpenFileCtx.java

示例11: addWritesToCache

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
/**
 * Creates and adds a WriteCtx into the pendingWrites map. This is a
 * synchronized method to handle concurrent writes.
 * 
 * @return A non-null {@link WriteCtx} instance if the incoming write
 *         request's offset >= nextOffset. Otherwise null.
 */
private synchronized WriteCtx addWritesToCache(WRITE3Request request,
    Channel channel, int xid) {
  long offset = request.getOffset();
  int count = request.getCount();
  long cachedOffset = nextOffset.get();
  int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
  
  if (LOG.isDebugEnabled()) {
    LOG.debug("requesed offset=" + offset + " and current offset="
        + cachedOffset);
  }

  // Handle a special case first
  if ((offset < cachedOffset) && (offset + count > cachedOffset)) {
    // One Linux client behavior: after a file is closed and reopened to
    // write, the client sometimes combines previous written data(could still
    // be in kernel buffer) with newly appended data in one write. This is
    // usually the first write after file reopened. In this
    // case, we log the event and drop the overlapped section.
    LOG.warn(String.format("Got overwrite with appended data (%d-%d),"
        + " current offset %d," + " drop the overlapped section (%d-%d)"
        + " and append new data (%d-%d).", offset, (offset + count - 1),
        cachedOffset, offset, (cachedOffset - 1), cachedOffset, (offset
            + count - 1)));

    if (!pendingWrites.isEmpty()) {
      LOG.warn("There are other pending writes, fail this jumbo write");
      return null;
    }
    
    LOG.warn("Modify this write to write only the appended data");
    alterWriteRequest(request, cachedOffset);

    // Update local variable
    originalCount = count;
    offset = request.getOffset();
    count = request.getCount();
  }
  
  // Fail non-append call
  if (offset < cachedOffset) {
    LOG.warn("(offset,count,nextOffset):" + "(" + offset + "," + count + ","
        + nextOffset + ")");
    return null;
  } else {
    DataState dataState = offset == cachedOffset ? WriteCtx.DataState.NO_DUMP
        : WriteCtx.DataState.ALLOW_DUMP;
    WriteCtx writeCtx = new WriteCtx(request.getHandle(),
        request.getOffset(), request.getCount(), originalCount,
        request.getStableHow(), request.getData(), channel, xid, false,
        dataState);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Add new write to the list with nextOffset " + cachedOffset
          + " and requesed offset=" + offset);
    }
    if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
      // update the memory size
      updateNonSequentialWriteInMemory(count);
    }
    // check if there is a WriteCtx with the same range in pendingWrites
    WriteCtx oldWriteCtx = checkRepeatedWriteRequest(request, channel, xid);
    if (oldWriteCtx == null) {
      addWrite(writeCtx);
    } else {
      LOG.warn("Got a repeated request, same range, with xid:"
          + writeCtx.getXid());
    }
    return writeCtx;
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:78,代码来源:OpenFileCtx.java

示例12: receivedNewWriteInternal

import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; //导入方法依赖的package包/类
private void receivedNewWriteInternal(DFSClient dfsClient,
    WRITE3Request request, Channel channel, int xid,
    AsyncDataService asyncDataService, IdUserGroup iug) {
  WriteStableHow stableHow = request.getStableHow();
  WccAttr preOpAttr = latestAttr.getWccAttr();
  int count = request.getCount();

  WriteCtx writeCtx = addWritesToCache(request, channel, xid);
  if (writeCtx == null) {
    // offset < nextOffset
    processOverWrite(dfsClient, request, channel, xid, iug);
  } else {
    // The writes is added to pendingWrites.
    // Check and start writing back if necessary
    boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx);
    if (!startWriting) {
      // offset > nextOffset. check if we need to dump data
      checkDump();
      
      // In test, noticed some Linux client sends a batch (e.g., 1MB)
      // of reordered writes and won't send more writes until it gets
      // responses of the previous batch. So here send response immediately
      // for unstable non-sequential write
      if (stableHow != WriteStableHow.UNSTABLE) {
        LOG.info("Have to change stable write to unstable write:"
            + request.getStableHow());
        stableHow = WriteStableHow.UNSTABLE;
      }

      if (LOG.isDebugEnabled()) {
        LOG.debug("UNSTABLE write request, send response for offset: "
            + writeCtx.getOffset());
      }
      WccData fileWcc = new WccData(preOpAttr, latestAttr);
      WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
          fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
      Nfs3Utils
          .writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
              xid, new VerifierNone()), xid);
      writeCtx.setReplied(true);
    }
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:44,代码来源:OpenFileCtx.java


注:本文中的org.apache.hadoop.nfs.nfs3.request.WRITE3Request.getCount方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。