当前位置: 首页>>代码示例>>Java>>正文


Java DataState类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx.DataState的典型用法代码示例。如果您正苦于以下问题:Java DataState类的具体用法?Java DataState怎么用?Java DataState使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DataState类属于org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx包,在下文中一共展示了DataState类的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: addWritesToCache

import org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx.DataState; //导入依赖的package包/类
/**
 * Creates and adds a WriteCtx into the pendingWrites map. This is a
 * synchronized method to handle concurrent writes.
 *
 * @return A non-null {@link WriteCtx} instance if the incoming write
 * request's offset >= nextOffset. Otherwise null.
 */
private synchronized WriteCtx addWritesToCache(WRITE3Request request,
    Channel channel, int xid) {
  long offset = request.getOffset();
  int count = request.getCount();
  long cachedOffset = nextOffset.get();
  int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
  
  if (LOG.isDebugEnabled()) {
    LOG.debug(
        "requesed offset=" + offset + " and current offset=" + cachedOffset);
  }

  // Handle a special case first
  if ((offset < cachedOffset) && (offset + count > cachedOffset)) {
    // One Linux client behavior: after a file is closed and reopened to
    // write, the client sometimes combines previous written data(could still
    // be in kernel buffer) with newly appended data in one write. This is
    // usually the first write after file reopened. In this
    // case, we log the event and drop the overlapped section.
    LOG.warn(String.format(
        "Got overwrite with appended data (%d-%d)," + " current offset %d," +
            " drop the overlapped section (%d-%d)" +
            " and append new data (%d-%d).", offset, (offset + count - 1),
        cachedOffset, offset, (cachedOffset - 1), cachedOffset,
        (offset + count - 1)));

    if (!pendingWrites.isEmpty()) {
      LOG.warn("There are other pending writes, fail this jumbo write");
      return null;
    }
    
    LOG.warn("Modify this write to write only the appended data");
    alterWriteRequest(request, cachedOffset);

    // Update local variable
    originalCount = count;
    offset = request.getOffset();
    count = request.getCount();
  }
  
  // Fail non-append call
  if (offset < cachedOffset) {
    LOG.warn("(offset,count,nextOffset):" + "(" + offset + "," + count + "," +
        nextOffset + ")");
    return null;
  } else {
    DataState dataState =
        offset == cachedOffset ? WriteCtx.DataState.NO_DUMP :
            WriteCtx.DataState.ALLOW_DUMP;
    WriteCtx writeCtx = new WriteCtx(request.getHandle(), request.getOffset(),
        request.getCount(), originalCount, request.getStableHow(),
        request.getData(), channel, xid, false, dataState);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Add new write to the list with nextOffset " + cachedOffset +
          " and requesed offset=" + offset);
    }
    if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
      // update the memory size
      updateNonSequentialWriteInMemory(count);
    }
    // check if there is a WriteCtx with the same range in pendingWrites
    WriteCtx oldWriteCtx = checkRepeatedWriteRequest(request, channel, xid);
    if (oldWriteCtx == null) {
      addWrite(writeCtx);
    } else {
      LOG.warn("Got a repeated request, same range, with xid:" +
          writeCtx.getXid());
    }
    return writeCtx;
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:79,代码来源:OpenFileCtx.java

示例2: addWritesToCache

import org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx.DataState; //导入依赖的package包/类
/**
 * Creates and adds a WriteCtx into the pendingWrites map. This is a
 * synchronized method to handle concurrent writes.
 * 
 * @return A non-null {@link WriteCtx} instance if the incoming write
 *         request's offset >= nextOffset. Otherwise null.
 */
private synchronized WriteCtx addWritesToCache(WRITE3Request request,
    Channel channel, int xid) {
  long offset = request.getOffset();
  int count = request.getCount();
  long cachedOffset = nextOffset.get();
  int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
  
  if (LOG.isDebugEnabled()) {
    LOG.debug("requesed offset=" + offset + " and current offset="
        + cachedOffset);
  }

  // Handle a special case first
  if ((offset < cachedOffset) && (offset + count > cachedOffset)) {
    // One Linux client behavior: after a file is closed and reopened to
    // write, the client sometimes combines previous written data(could still
    // be in kernel buffer) with newly appended data in one write. This is
    // usually the first write after file reopened. In this
    // case, we log the event and drop the overlapped section.
    LOG.warn(String.format("Got overwrite with appended data (%d-%d),"
        + " current offset %d," + " drop the overlapped section (%d-%d)"
        + " and append new data (%d-%d).", offset, (offset + count - 1),
        cachedOffset, offset, (cachedOffset - 1), cachedOffset, (offset
            + count - 1)));

    if (!pendingWrites.isEmpty()) {
      LOG.warn("There are other pending writes, fail this jumbo write");
      return null;
    }
    
    LOG.warn("Modify this write to write only the appended data");
    alterWriteRequest(request, cachedOffset);

    // Update local variable
    originalCount = count;
    offset = request.getOffset();
    count = request.getCount();
  }
  
  // Fail non-append call
  if (offset < cachedOffset) {
    LOG.warn("(offset,count,nextOffset):" + "(" + offset + "," + count + ","
        + nextOffset + ")");
    return null;
  } else {
    DataState dataState = offset == cachedOffset ? WriteCtx.DataState.NO_DUMP
        : WriteCtx.DataState.ALLOW_DUMP;
    WriteCtx writeCtx = new WriteCtx(request.getHandle(),
        request.getOffset(), request.getCount(), originalCount,
        request.getStableHow(), request.getData(), channel, xid, false,
        dataState);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Add new write to the list with nextOffset " + cachedOffset
          + " and requesed offset=" + offset);
    }
    if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
      // update the memory size
      updateNonSequentialWriteInMemory(count);
    }
    // check if there is a WriteCtx with the same range in pendingWrites
    WriteCtx oldWriteCtx = checkRepeatedWriteRequest(request, channel, xid);
    if (oldWriteCtx == null) {
      addWrite(writeCtx);
    } else {
      LOG.warn("Got a repeated request, same range, with xid:"
          + writeCtx.getXid());
    }
    return writeCtx;
  }
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:78,代码来源:OpenFileCtx.java


注:本文中的org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx.DataState类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。