当前位置: 首页>>代码示例>>Java>>正文


Java ByteBufferPool类代码示例

本文整理汇总了Java中org.apache.hadoop.io.ByteBufferPool的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferPool类的具体用法?Java ByteBufferPool怎么用?Java ByteBufferPool使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ByteBufferPool类属于org.apache.hadoop.io包,在下文中一共展示了ByteBufferPool类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: read

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
    EnumSet<ReadOption> opts) 
        throws IOException, UnsupportedOperationException {
  try {
    return ((HasEnhancedByteBufferAccess)in).read(bufferPool,
        maxLength, opts);
  }
  catch (ClassCastException e) {
    ByteBuffer buffer = ByteBufferUtil.
        fallbackRead(this, bufferPool, maxLength);
    if (buffer != null) {
      extendedReadBuffers.put(buffer, bufferPool);
    }
    return buffer;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:18,代码来源:FSDataInputStream.java

示例2: read

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
    EnumSet<ReadOption> opts) throws IOException,
    UnsupportedOperationException {
  if (bufferPool == null) {
    throw new IOException("Please specify buffer pool.");
  }
  ByteBuffer buffer = bufferPool.getBuffer(true, maxLength);
  int pos = buffer.position();
  int n = read(buffer);
  if (n >= 0) {
    buffer.position(pos);
    return buffer;
  }
  
  return null;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:18,代码来源:TestCryptoStreams.java

示例3: releaseBuffer

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public void releaseBuffer(ByteBuffer buffer) {
  try {
    ((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer);
  }
  catch (ClassCastException e) {
    ByteBufferPool bufferPool = extendedReadBuffers.remove( buffer);
    if (bufferPool == null) {
      throw new IllegalArgumentException("tried to release a buffer " +
          "that was not created by this stream.");
    }
    bufferPool.putBuffer(buffer);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:15,代码来源:FSDataInputStream.java

示例4: read

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
    EnumSet<ReadOption> opts) throws IOException,
    UnsupportedOperationException {
  checkStream();
  try {
    if (outBuffer.remaining() > 0) {
      // Have some decrypted data unread, need to reset.
      ((Seekable) in).seek(getPos());
      resetStreamOffset(getPos());
    }
    final ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).
        read(bufferPool, maxLength, opts);
    if (buffer != null) {
      final int n = buffer.remaining();
      if (n > 0) {
        streamOffset += buffer.remaining(); // Read n bytes
        final int pos = buffer.position();
        decrypt(buffer, n, pos);
      }
    }
    return buffer;
  } catch (ClassCastException e) {
    throw new UnsupportedOperationException("This stream does not support " + 
        "enhanced byte buffer access.");
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:28,代码来源:CryptoInputStream.java

示例5: getBufferPool

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
private ByteBufferPool getBufferPool() {
  return new ByteBufferPool() {
    @Override
    public ByteBuffer getBuffer(boolean direct, int length) {
      return ByteBuffer.allocateDirect(length);
    }
    
    @Override
    public void putBuffer(ByteBuffer buffer) {
    }
  };
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:13,代码来源:CryptoStreamsTestBase.java

示例6: read

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, EnumSet<ReadOption> opts) throws IOException, UnsupportedOperationException {
  operatorStats.startWait();
  try {
    return underlyingIs.read(bufferPool, maxLength, opts);
  } finally {
    operatorStats.stopWait();
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:10,代码来源:DrillFSDataInputStream.java

示例7: read

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public synchronized ByteBuffer read(ByteBufferPool bufferPool,
    int maxLength, EnumSet<ReadOption> opts) 
        throws IOException, UnsupportedOperationException {
  if (maxLength == 0) {
    return EMPTY_BUFFER;
  } else if (maxLength < 0) {
    throw new IllegalArgumentException("can't read a negative " +
        "number of bytes.");
  }
  if ((blockReader == null) || (blockEnd == -1)) {
    if (pos >= getFileLength()) {
      return null;
    }
    /*
     * If we don't have a blockReader, or the one we have has no more bytes
     * left to read, we call seekToBlockSource to get a new blockReader and
     * recalculate blockEnd.  Note that we assume we're not at EOF here
     * (we check this above).
     */
    if ((!seekToBlockSource(pos)) || (blockReader == null)) {
      throw new IOException("failed to allocate new BlockReader " +
          "at position " + pos);
    }
  }
  ByteBuffer buffer = null;
  if (dfsClient.getConf().shortCircuitMmapEnabled) {
    buffer = tryReadZeroCopy(maxLength, opts);
  }
  if (buffer != null) {
    return buffer;
  }
  buffer = ByteBufferUtil.fallbackRead(this, bufferPool, maxLength);
  if (buffer != null) {
    getExtendedReadBuffers().put(buffer, bufferPool);
  }
  return buffer;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:DFSInputStream.java

示例8: releaseBuffer

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public synchronized void releaseBuffer(ByteBuffer buffer) {
  if (buffer == EMPTY_BUFFER) return;
  Object val = getExtendedReadBuffers().remove(buffer);
  if (val == null) {
    throw new IllegalArgumentException("tried to release a buffer " +
        "that was not created by this stream, " + buffer);
  }
  if (val instanceof ClientMmap) {
    IOUtils.closeQuietly((ClientMmap)val);
  } else if (val instanceof ByteBufferPool) {
    ((ByteBufferPool)val).putBuffer(buffer);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:DFSInputStream.java

示例9: read

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, EnumSet<ReadOption> opts) throws IOException, UnsupportedOperationException {
  try {
    return underlyingIs.read(bufferPool, maxLength, opts);
  } catch(FSError e) {
    throw FileSystemWrapper.propagateFSError(e);
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:9,代码来源:FSDataInputStreamWrapper.java

示例10: read

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, EnumSet<ReadOption> opts) throws IOException, UnsupportedOperationException {
  operatorStats.startWait();
  try {
    return super.read(bufferPool, maxLength, opts);
  } finally {
    operatorStats.stopWait();
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:10,代码来源:FSDataInputStreamWithStatsWrapper.java

示例11: read

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public synchronized ByteBuffer read(ByteBufferPool bufferPool,
    int maxLength, EnumSet<ReadOption> opts)
        throws IOException, UnsupportedOperationException {
  if (maxLength == 0) {
    return EMPTY_BUFFER;
  } else if (maxLength < 0) {
    throw new IllegalArgumentException("can't read a negative " +
        "number of bytes.");
  }
  if ((blockReader == null) || (blockEnd == -1)) {
    if (pos >= getFileLength()) {
      return null;
    }
    /*
     * If we don't have a blockReader, or the one we have has no more bytes
     * left to read, we call seekToBlockSource to get a new blockReader and
     * recalculate blockEnd.  Note that we assume we're not at EOF here
     * (we check this above).
     */
    if ((!seekToBlockSource(pos)) || (blockReader == null)) {
      throw new IOException("failed to allocate new BlockReader " +
          "at position " + pos);
    }
  }
  ByteBuffer buffer = null;
  if (dfsClient.getConf().getShortCircuitConf().isShortCircuitMmapEnabled()) {
    buffer = tryReadZeroCopy(maxLength, opts);
  }
  if (buffer != null) {
    return buffer;
  }
  buffer = ByteBufferUtil.fallbackRead(this, bufferPool, maxLength);
  if (buffer != null) {
    getExtendedReadBuffers().put(buffer, bufferPool);
  }
  return buffer;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:39,代码来源:DFSInputStream.java

示例12: read

import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
/**
 * May need online read recovery, zero-copy read doesn't make
 * sense, so don't support it.
 */
@Override
public synchronized ByteBuffer read(ByteBufferPool bufferPool,
    int maxLength, EnumSet<ReadOption> opts)
        throws IOException, UnsupportedOperationException {
  throw new UnsupportedOperationException(
      "Not support enhanced byte buffer access.");
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:12,代码来源:DFSStripedInputStream.java


注:本文中的org.apache.hadoop.io.ByteBufferPool类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。