当前位置: 首页>>代码示例>>Java>>正文


Java FsDatasetTestUtil.getReplicas方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil.getReplicas方法的典型用法代码示例。如果您正苦于以下问题:Java FsDatasetTestUtil.getReplicas方法的具体用法?Java FsDatasetTestUtil.getReplicas怎么用?Java FsDatasetTestUtil.getReplicas使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil的用法示例。


在下文中一共展示了FsDatasetTestUtil.getReplicas方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: truncateBlockFile

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/** Truncate a block file */
private long truncateBlockFile() throws IOException {
  synchronized (fds) {
    for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
      File f = b.getBlockFile();
      File mf = b.getMetaFile();
      // Truncate a block file that has a corresponding metadata file
      if (f.exists() && f.length() != 0 && mf.exists()) {
        FileOutputStream s = null;
        FileChannel channel = null;
        try {
          s = new FileOutputStream(f);
          channel = s.getChannel();
          channel.truncate(0);
          LOG.info("Truncated block file " + f.getAbsolutePath());
          return b.getBlockId();
        } finally {
          IOUtils.cleanup(LOG, channel, s);
        }
      }
    }
  }
  return 0;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:25,代码来源:TestDirectoryScanner.java

示例2: truncateBlockFile

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/** Truncate a block file */
private long truncateBlockFile() throws IOException {
  synchronized (fds) {
    for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
      File f = b.getBlockFile();
      File mf = b.getMetaFile();
      // Truncate a block file that has a corresponding metadata file
      if (f.exists() && f.length() != 0 && mf.exists()) {
        FileOutputStream s = new FileOutputStream(f);
        FileChannel channel = s.getChannel();
        channel.truncate(0);
        LOG.info("Truncated block file " + f.getAbsolutePath());
        return b.getBlockId();
      }
    }
  }
  return 0;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:19,代码来源:TestDirectoryScanner.java

示例3: truncateBlockFile

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/**
 * Truncate a block file
 */
private long truncateBlockFile() throws IOException {
  synchronized (fds) {
    for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
      File f = b.getBlockFile();
      File mf = b.getMetaFile();
      // Truncate a block file that has a corresponding metadata file
      if (f.exists() && f.length() != 0 && mf.exists()) {
        FileOutputStream s = new FileOutputStream(f);
        FileChannel channel = s.getChannel();
        channel.truncate(0);
        LOG.info("Truncated block file " + f.getAbsolutePath());
        return b.getBlockId();
      }
    }
  }
  return 0;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:21,代码来源:TestDirectoryScanner.java

示例4: getReplica

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
private static ReplicaInPipeline getReplica(final DataNode datanode,
    final String bpid, final ReplicaState expectedState) throws InterruptedException {
  final Collection<ReplicaInfo> replicas = FsDatasetTestUtil.getReplicas(
      datanode.getFSDataset(), bpid);
  for(int i = 0; i < 5 && replicas.size() == 0; i++) {
    LOG.info("wait since replicas.size() == 0; i=" + i);
    Thread.sleep(1000);
  }
  Assert.assertEquals(1, replicas.size());
  final ReplicaInfo r = replicas.iterator().next();
  Assert.assertEquals(expectedState, r.getState());
  return (ReplicaInPipeline)r;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestTransferRbw.java

示例5: deleteBlockFile

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/** Delete a block file */
private long deleteBlockFile() {
  synchronized(fds) {
    for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
      File f = b.getBlockFile();
      File mf = b.getMetaFile();
      // Delete a block file that has corresponding metadata file
      if (f.exists() && mf.exists() && f.delete()) {
        LOG.info("Deleting block file " + f.getAbsolutePath());
        return b.getBlockId();
      }
    }
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestDirectoryScanner.java

示例6: deleteMetaFile

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/** Delete block meta file */
private long deleteMetaFile() {
  synchronized(fds) {
    for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
      File file = b.getMetaFile();
      // Delete a metadata file
      if (file.exists() && file.delete()) {
        LOG.info("Deleting metadata file " + file.getAbsolutePath());
        return b.getBlockId();
      }
    }
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestDirectoryScanner.java

示例7: getReplica

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
private static ReplicaInPipeline getReplica(final DataNode datanode,
    final String bpid, final ReplicaState expectedState)
    throws InterruptedException {
  final Collection<ReplicaInfo> replicas =
      FsDatasetTestUtil.getReplicas(datanode.getFSDataset(), bpid);
  for (int i = 0; i < 5 && replicas.size() == 0; i++) {
    LOG.info("wait since replicas.size() == 0; i=" + i);
    Thread.sleep(1000);
  }
  Assert.assertEquals(1, replicas.size());
  final ReplicaInfo r = replicas.iterator().next();
  Assert.assertEquals(expectedState, r.getState());
  return (ReplicaInPipeline) r;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:15,代码来源:TestTransferRbw.java

示例8: deleteBlockFile

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/**
 * Delete a block file
 */
private long deleteBlockFile() {
  synchronized (fds) {
    for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
      File f = b.getBlockFile();
      File mf = b.getMetaFile();
      // Delete a block file that has corresponding metadata file
      if (f.exists() && mf.exists() && f.delete()) {
        LOG.info("Deleting block file " + f.getAbsolutePath());
        return b.getBlockId();
      }
    }
  }
  return 0;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:18,代码来源:TestDirectoryScanner.java

示例9: deleteMetaFile

import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil; //导入方法依赖的package包/类
/**
 * Delete block meta file
 */
private long deleteMetaFile() {
  synchronized (fds) {
    for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
      File file = b.getMetaFile();
      // Delete a metadata file
      if (file.exists() && file.delete()) {
        LOG.info("Deleting metadata file " + file.getAbsolutePath());
        return b.getBlockId();
      }
    }
  }
  return 0;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:17,代码来源:TestDirectoryScanner.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil.getReplicas方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。