本文整理汇总了Java中org.apache.hadoop.io.ByteBufferPool类的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferPool类的具体用法?Java ByteBufferPool怎么用?Java ByteBufferPool使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ByteBufferPool类属于org.apache.hadoop.io包,在下文中一共展示了ByteBufferPool类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: read
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
EnumSet<ReadOption> opts)
throws IOException, UnsupportedOperationException {
try {
return ((HasEnhancedByteBufferAccess)in).read(bufferPool,
maxLength, opts);
}
catch (ClassCastException e) {
ByteBuffer buffer = ByteBufferUtil.
fallbackRead(this, bufferPool, maxLength);
if (buffer != null) {
extendedReadBuffers.put(buffer, bufferPool);
}
return buffer;
}
}
示例2: read
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
EnumSet<ReadOption> opts) throws IOException,
UnsupportedOperationException {
if (bufferPool == null) {
throw new IOException("Please specify buffer pool.");
}
ByteBuffer buffer = bufferPool.getBuffer(true, maxLength);
int pos = buffer.position();
int n = read(buffer);
if (n >= 0) {
buffer.position(pos);
return buffer;
}
return null;
}
示例3: releaseBuffer
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public void releaseBuffer(ByteBuffer buffer) {
try {
((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer);
}
catch (ClassCastException e) {
ByteBufferPool bufferPool = extendedReadBuffers.remove( buffer);
if (bufferPool == null) {
throw new IllegalArgumentException("tried to release a buffer " +
"that was not created by this stream.");
}
bufferPool.putBuffer(buffer);
}
}
示例4: read
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
EnumSet<ReadOption> opts) throws IOException,
UnsupportedOperationException {
checkStream();
try {
if (outBuffer.remaining() > 0) {
// Have some decrypted data unread, need to reset.
((Seekable) in).seek(getPos());
resetStreamOffset(getPos());
}
final ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).
read(bufferPool, maxLength, opts);
if (buffer != null) {
final int n = buffer.remaining();
if (n > 0) {
streamOffset += buffer.remaining(); // Read n bytes
final int pos = buffer.position();
decrypt(buffer, n, pos);
}
}
return buffer;
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not support " +
"enhanced byte buffer access.");
}
}
示例5: getBufferPool
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
private ByteBufferPool getBufferPool() {
return new ByteBufferPool() {
@Override
public ByteBuffer getBuffer(boolean direct, int length) {
return ByteBuffer.allocateDirect(length);
}
@Override
public void putBuffer(ByteBuffer buffer) {
}
};
}
示例6: read
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, EnumSet<ReadOption> opts) throws IOException, UnsupportedOperationException {
operatorStats.startWait();
try {
return underlyingIs.read(bufferPool, maxLength, opts);
} finally {
operatorStats.stopWait();
}
}
示例7: read
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public synchronized ByteBuffer read(ByteBufferPool bufferPool,
int maxLength, EnumSet<ReadOption> opts)
throws IOException, UnsupportedOperationException {
if (maxLength == 0) {
return EMPTY_BUFFER;
} else if (maxLength < 0) {
throw new IllegalArgumentException("can't read a negative " +
"number of bytes.");
}
if ((blockReader == null) || (blockEnd == -1)) {
if (pos >= getFileLength()) {
return null;
}
/*
* If we don't have a blockReader, or the one we have has no more bytes
* left to read, we call seekToBlockSource to get a new blockReader and
* recalculate blockEnd. Note that we assume we're not at EOF here
* (we check this above).
*/
if ((!seekToBlockSource(pos)) || (blockReader == null)) {
throw new IOException("failed to allocate new BlockReader " +
"at position " + pos);
}
}
ByteBuffer buffer = null;
if (dfsClient.getConf().shortCircuitMmapEnabled) {
buffer = tryReadZeroCopy(maxLength, opts);
}
if (buffer != null) {
return buffer;
}
buffer = ByteBufferUtil.fallbackRead(this, bufferPool, maxLength);
if (buffer != null) {
getExtendedReadBuffers().put(buffer, bufferPool);
}
return buffer;
}
示例8: releaseBuffer
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public synchronized void releaseBuffer(ByteBuffer buffer) {
if (buffer == EMPTY_BUFFER) return;
Object val = getExtendedReadBuffers().remove(buffer);
if (val == null) {
throw new IllegalArgumentException("tried to release a buffer " +
"that was not created by this stream, " + buffer);
}
if (val instanceof ClientMmap) {
IOUtils.closeQuietly((ClientMmap)val);
} else if (val instanceof ByteBufferPool) {
((ByteBufferPool)val).putBuffer(buffer);
}
}
示例9: read
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, EnumSet<ReadOption> opts) throws IOException, UnsupportedOperationException {
try {
return underlyingIs.read(bufferPool, maxLength, opts);
} catch(FSError e) {
throw FileSystemWrapper.propagateFSError(e);
}
}
示例10: read
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength, EnumSet<ReadOption> opts) throws IOException, UnsupportedOperationException {
operatorStats.startWait();
try {
return super.read(bufferPool, maxLength, opts);
} finally {
operatorStats.stopWait();
}
}
示例11: read
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
@Override
public synchronized ByteBuffer read(ByteBufferPool bufferPool,
int maxLength, EnumSet<ReadOption> opts)
throws IOException, UnsupportedOperationException {
if (maxLength == 0) {
return EMPTY_BUFFER;
} else if (maxLength < 0) {
throw new IllegalArgumentException("can't read a negative " +
"number of bytes.");
}
if ((blockReader == null) || (blockEnd == -1)) {
if (pos >= getFileLength()) {
return null;
}
/*
* If we don't have a blockReader, or the one we have has no more bytes
* left to read, we call seekToBlockSource to get a new blockReader and
* recalculate blockEnd. Note that we assume we're not at EOF here
* (we check this above).
*/
if ((!seekToBlockSource(pos)) || (blockReader == null)) {
throw new IOException("failed to allocate new BlockReader " +
"at position " + pos);
}
}
ByteBuffer buffer = null;
if (dfsClient.getConf().getShortCircuitConf().isShortCircuitMmapEnabled()) {
buffer = tryReadZeroCopy(maxLength, opts);
}
if (buffer != null) {
return buffer;
}
buffer = ByteBufferUtil.fallbackRead(this, bufferPool, maxLength);
if (buffer != null) {
getExtendedReadBuffers().put(buffer, bufferPool);
}
return buffer;
}
示例12: read
import org.apache.hadoop.io.ByteBufferPool; //导入依赖的package包/类
/**
* May need online read recovery, zero-copy read doesn't make
* sense, so don't support it.
*/
@Override
public synchronized ByteBuffer read(ByteBufferPool bufferPool,
int maxLength, EnumSet<ReadOption> opts)
throws IOException, UnsupportedOperationException {
throw new UnsupportedOperationException(
"Not support enhanced byte buffer access.");
}