本文整理汇总了Java中org.apache.hadoop.io.ReadaheadPool类的典型用法代码示例。如果您正苦于以下问题:Java ReadaheadPool类的具体用法?Java ReadaheadPool怎么用?Java ReadaheadPool使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ReadaheadPool类属于org.apache.hadoop.io包,在下文中一共展示了ReadaheadPool类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: FadvisedFileRegion
import org.apache.hadoop.io.ReadaheadPool; //导入依赖的package包/类
public FadvisedFileRegion(RandomAccessFile file, long position, long count,
boolean manageOsCache, int readaheadLength, ReadaheadPool readaheadPool,
String identifier, int shuffleBufferSize,
boolean shuffleTransferToAllowed) throws IOException {
super(file.getChannel(), position, count);
this.manageOsCache = manageOsCache;
this.readaheadLength = readaheadLength;
this.readaheadPool = readaheadPool;
this.fd = file.getFD();
this.identifier = identifier;
this.fileChannel = file.getChannel();
this.count = count;
this.position = position;
this.shuffleBufferSize = shuffleBufferSize;
this.shuffleTransferToAllowed = shuffleTransferToAllowed;
}
示例2: FadvisedFileRegion
import org.apache.hadoop.io.ReadaheadPool; //导入依赖的package包/类
public FadvisedFileRegion(RandomAccessFile file, long position, long count,
boolean manageOsCache, int readaheadLength, ReadaheadPool readaheadPool,
String identifier, int shuffleBufferSize,
boolean shuffleTransferToAllowed) throws IOException {
super(file.getChannel(), position, count);
this.manageOsCache = manageOsCache;
this.readaheadLength = readaheadLength;
this.readaheadPool = readaheadPool;
this.fd = file.getFD();
this.identifier = identifier;
this.fileChannel = file.getChannel();
this.count = count;
this.position = position;
this.shuffleBufferSize = shuffleBufferSize;
this.shuffleTransferToAllowed = shuffleTransferToAllowed;
}
示例3: FadvisedChunkedFile
import org.apache.hadoop.io.ReadaheadPool; //导入依赖的package包/类
public FadvisedChunkedFile(RandomAccessFile file, long position, long count,
int chunkSize, boolean manageOsCache, int readaheadLength,
ReadaheadPool readaheadPool, String identifier) throws IOException {
super(file, position, count, chunkSize);
this.manageOsCache = manageOsCache;
this.readaheadLength = readaheadLength;
this.readaheadPool = readaheadPool;
this.fd = file.getFD();
this.identifier = identifier;
}
示例4: FadvisedFileRegion
import org.apache.hadoop.io.ReadaheadPool; //导入依赖的package包/类
public FadvisedFileRegion(RandomAccessFile file, long position, long count,
boolean manageOsCache, int readaheadLength, ReadaheadPool readaheadPool,
String identifier) throws IOException {
super(file.getChannel(), position, count);
this.manageOsCache = manageOsCache;
this.readaheadLength = readaheadLength;
this.readaheadPool = readaheadPool;
this.fd = file.getFD();
this.identifier = identifier;
}
示例5: startDataNode
import org.apache.hadoop.io.ReadaheadPool; //导入依赖的package包/类
/**
* This method starts the data node with the specified conf.
*
* @param conf - the configuration
* if conf's CONFIG_PROPERTY_SIMULATED property is set
* then a simulated storage based data node is created.
*
* @param dataDirs - only for a non-simulated storage data node
* @throws IOException
*/
void startDataNode(Configuration conf,
AbstractList<File> dataDirs,
// DatanodeProtocol namenode,
SecureResources resources
) throws IOException {
if(UserGroupInformation.isSecurityEnabled() && resources == null) {
if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
throw new RuntimeException("Cannot start secure cluster without "
+ "privileged resources.");
}
}
// settings global for all BPs in the Data Node
this.secureResources = resources;
this.dataDirs = dataDirs;
this.conf = conf;
this.dnConf = new DNConf(conf);
storage = new DataStorage();
// global DN settings
registerMXBean();
initDataXceiver(conf);
startInfoServer(conf);
// BlockPoolTokenSecretManager is required to create ipc server.
this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
initIpcServer(conf);
metrics = DataNodeMetrics.create(conf, getDisplayName());
blockPoolManager = new BlockPoolManager(this);
blockPoolManager.refreshNamenodes(conf);
// Create the ReadaheadPool from the DataNode context so we can
// exit without having to explicitly shutdown its thread pool.
readaheadPool = ReadaheadPool.getInstance();
}
示例6: startDataNode
import org.apache.hadoop.io.ReadaheadPool; //导入依赖的package包/类
/**
* This method starts the data node with the specified conf.
*
* @param conf
* - the configuration
* if conf's CONFIG_PROPERTY_SIMULATED property is set
* then a simulated storage based data node is created.
* @param dataDirs
* - only for a non-simulated storage data node
* @throws IOException
*/
void startDataNode(Configuration conf, AbstractList<File> dataDirs,
// DatanodeProtocol namenode,
SecureResources resources) throws IOException {
if (UserGroupInformation.isSecurityEnabled() && resources == null) {
if (!conf.getBoolean("ignore.secure.ports.for.testing", false)) {
throw new RuntimeException(
"Cannot start secure cluster without " + "privileged resources.");
}
}
// settings global for all BPs in the Data Node
this.secureResources = resources;
this.dataDirs = dataDirs;
this.conf = conf;
this.dnConf = new DNConf(conf);
storage = new DataStorage();
// global DN settings
registerMXBean();
initDataXceiver(conf);
startInfoServer(conf);
// BlockPoolTokenSecretManager is required to create ipc server.
this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
initIpcServer(conf);
metrics = DataNodeMetrics.create(conf, getDisplayName());
blockPoolManager = new BlockPoolManager(this);
blockPoolManager.refreshNamenodes(conf);
// Create the ReadaheadPool from the DataNode context so we can
// exit without having to explicitly shutdown its thread pool.
readaheadPool = ReadaheadPool.getInstance();
}
示例7: FadvisedChunkedFile
import org.apache.hadoop.io.ReadaheadPool; //导入依赖的package包/类
public FadvisedChunkedFile(RandomAccessFile file, long position, long count,
int chunkSize, boolean manageOsCache, int readaheadLength,
ReadaheadPool readaheadPool, String identifier) throws IOException {
super(file, position, count, chunkSize);
this.manageOsCache = manageOsCache;
this.readaheadLength = readaheadLength;
this.readaheadPool = readaheadPool;
this.fd = file.getFD();
this.identifier = identifier;
}
示例8: FadvisedFileRegionWrapper
import org.apache.hadoop.io.ReadaheadPool; //导入依赖的package包/类
public FadvisedFileRegionWrapper(RandomAccessFile file, long position, long count,
boolean manageOsCache, int readaheadLength, ReadaheadPool readaheadPool,
String identifier) throws IOException {
super(file, position, count, manageOsCache, readaheadLength, readaheadPool,
identifier, DEFAULT_SHUFFLE_BUFFER_SIZE, true);
}
示例9: FadvisedFileRegionWrapper
import org.apache.hadoop.io.ReadaheadPool; //导入依赖的package包/类
public FadvisedFileRegionWrapper(RandomAccessFile file, long position, long count,
boolean manageOsCache, int readaheadLength, ReadaheadPool readaheadPool,
String identifier) throws IOException {
super(file, position, count, manageOsCache, readaheadLength, readaheadPool, identifier);
}