本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil.getReplicas方法的典型用法代码示例。如果您正苦于以下问题:Java FsDatasetTestUtil.getReplicas方法的具体用法?Java FsDatasetTestUtil.getReplicas怎么用?Java FsDatasetTestUtil.getReplicas使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil
的用法示例。
在下文中一共展示了FsDatasetTestUtil.getReplicas方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: truncateBlockFile
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/** Truncate a block file */
private long truncateBlockFile() throws IOException {
synchronized (fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
// Truncate a block file that has a corresponding metadata file
if (f.exists() && f.length() != 0 && mf.exists()) {
FileOutputStream s = null;
FileChannel channel = null;
try {
s = new FileOutputStream(f);
channel = s.getChannel();
channel.truncate(0);
LOG.info("Truncated block file " + f.getAbsolutePath());
return b.getBlockId();
} finally {
IOUtils.cleanup(LOG, channel, s);
}
}
}
}
return 0;
}
示例2: truncateBlockFile
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/** Truncate a block file */
private long truncateBlockFile() throws IOException {
synchronized (fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
// Truncate a block file that has a corresponding metadata file
if (f.exists() && f.length() != 0 && mf.exists()) {
FileOutputStream s = new FileOutputStream(f);
FileChannel channel = s.getChannel();
channel.truncate(0);
LOG.info("Truncated block file " + f.getAbsolutePath());
return b.getBlockId();
}
}
}
return 0;
}
示例3: truncateBlockFile
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/**
* Truncate a block file
*/
private long truncateBlockFile() throws IOException {
synchronized (fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
// Truncate a block file that has a corresponding metadata file
if (f.exists() && f.length() != 0 && mf.exists()) {
FileOutputStream s = new FileOutputStream(f);
FileChannel channel = s.getChannel();
channel.truncate(0);
LOG.info("Truncated block file " + f.getAbsolutePath());
return b.getBlockId();
}
}
}
return 0;
}
示例4: getReplica
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
private static ReplicaInPipeline getReplica(final DataNode datanode,
final String bpid, final ReplicaState expectedState) throws InterruptedException {
final Collection<ReplicaInfo> replicas = FsDatasetTestUtil.getReplicas(
datanode.getFSDataset(), bpid);
for(int i = 0; i < 5 && replicas.size() == 0; i++) {
LOG.info("wait since replicas.size() == 0; i=" + i);
Thread.sleep(1000);
}
Assert.assertEquals(1, replicas.size());
final ReplicaInfo r = replicas.iterator().next();
Assert.assertEquals(expectedState, r.getState());
return (ReplicaInPipeline)r;
}
示例5: deleteBlockFile
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/** Delete a block file */
private long deleteBlockFile() {
synchronized(fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
// Delete a block file that has corresponding metadata file
if (f.exists() && mf.exists() && f.delete()) {
LOG.info("Deleting block file " + f.getAbsolutePath());
return b.getBlockId();
}
}
}
return 0;
}
示例6: deleteMetaFile
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/** Delete block meta file */
private long deleteMetaFile() {
synchronized(fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File file = b.getMetaFile();
// Delete a metadata file
if (file.exists() && file.delete()) {
LOG.info("Deleting metadata file " + file.getAbsolutePath());
return b.getBlockId();
}
}
}
return 0;
}
示例7: getReplica
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
private static ReplicaInPipeline getReplica(final DataNode datanode,
final String bpid, final ReplicaState expectedState)
throws InterruptedException {
final Collection<ReplicaInfo> replicas =
FsDatasetTestUtil.getReplicas(datanode.getFSDataset(), bpid);
for (int i = 0; i < 5 && replicas.size() == 0; i++) {
LOG.info("wait since replicas.size() == 0; i=" + i);
Thread.sleep(1000);
}
Assert.assertEquals(1, replicas.size());
final ReplicaInfo r = replicas.iterator().next();
Assert.assertEquals(expectedState, r.getState());
return (ReplicaInPipeline) r;
}
示例8: deleteBlockFile
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/**
* Delete a block file
*/
private long deleteBlockFile() {
synchronized (fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
// Delete a block file that has corresponding metadata file
if (f.exists() && mf.exists() && f.delete()) {
LOG.info("Deleting block file " + f.getAbsolutePath());
return b.getBlockId();
}
}
}
return 0;
}
示例9: deleteMetaFile
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/**
* Delete block meta file
*/
private long deleteMetaFile() {
synchronized (fds) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File file = b.getMetaFile();
// Delete a metadata file
if (file.exists() && file.delete()) {
LOG.info("Deleting metadata file " + file.getAbsolutePath());
return b.getBlockId();
}
}
}
return 0;
}