当前位置: 首页>>代码示例>>Java>>正文


Java DistributedFileSystem.setStoragePolicy方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DistributedFileSystem.setStoragePolicy方法的典型用法代码示例。如果您正苦于以下问题:Java DistributedFileSystem.setStoragePolicy方法的具体用法?Java DistributedFileSystem.setStoragePolicy怎么用?Java DistributedFileSystem.setStoragePolicy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DistributedFileSystem的用法示例。


在下文中一共展示了DistributedFileSystem.setStoragePolicy方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: run

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  final String path = StringUtils.popOptionWithArgument("-path", args);
  if (path == null) {
    System.err.println("Please specify the path for setting the storage " +
        "policy.\nUsage: " + getLongUsage());
    return 1;
  }

  final String policyName = StringUtils.popOptionWithArgument("-policy",
      args);
  if (policyName == null) {
    System.err.println("Please specify the policy name.\nUsage: " +
        getLongUsage());
    return 1;
  }

  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  try {
    dfs.setStoragePolicy(new Path(path), policyName);
    System.out.println("Set storage policy " + policyName + " on " + path);
  } catch (Exception e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:StoragePolicyAdmin.java

示例2: testMoverFailedRetry

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Test
public void testMoverFailedRetry() throws Exception {
  // HDFS-8147
  final Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .storageTypes(
          new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE}}).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String file = "/testMoverFailedRetry";
    // write to DISK
    final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
    out.writeChars("testMoverFailedRetry");
    out.close();

    // Delete block file so, block move will fail with FileNotFoundException
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
    cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
    // move to ARCHIVE
    dfs.setStoragePolicy(new Path(file), "COLD");
    int rc = ToolRunner.run(conf, new Mover.Cli(),
        new String[] {"-p", file.toString()});
    Assert.assertEquals("Movement should fail after some retry",
        ExitStatus.IO_EXCEPTION.getExitCode(), rc);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestMover.java

示例3: writeFile

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
private void writeFile(final DistributedFileSystem dfs,
    String dirName, String fileName, String StoragePolicy) throws IOException {
  Path dirPath = new Path(dirName);
  dfs.mkdirs(dirPath);
  dfs.setStoragePolicy(dirPath, StoragePolicy);
  writeFile(dfs, dirPath, fileName);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestFsck.java

示例4: testStoragePoliciesCK

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Test storage policy display
 */
@Test
public void testStoragePoliciesCK() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .storageTypes(
          new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
      .build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    writeFile(dfs, "/testhot", "file", "HOT");
    writeFile(dfs, "/testwarm", "file", "WARM");
    writeFile(dfs, "/testcold", "file", "COLD");
    String outStr = runFsck(conf, 0, true, "/", "-storagepolicies");
    assertTrue(outStr.contains("DISK:3(HOT)"));
    assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)"));
    assertTrue(outStr.contains("ARCHIVE:3(COLD)"));
    assertTrue(outStr.contains("All blocks satisfy specified storage policy."));
    dfs.setStoragePolicy(new Path("/testhot"), "COLD");
    dfs.setStoragePolicy(new Path("/testwarm"), "COLD");
    outStr = runFsck(conf, 0, true, "/", "-storagepolicies");
    assertTrue(outStr.contains("DISK:3(HOT)"));
    assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)"));
    assertTrue(outStr.contains("ARCHIVE:3(COLD)"));
    assertFalse(outStr.contains("All blocks satisfy specified storage policy."));
   } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestFsck.java

示例5: setStoragePolicy

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Set storage policies according to the corresponding scheme.
 */
void setStoragePolicy(DistributedFileSystem dfs) throws Exception {
  for (Map.Entry<Path, BlockStoragePolicy> entry : policyMap.entrySet()) {
    dfs.setStoragePolicy(entry.getKey(), entry.getValue().getName());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestStorageMover.java

示例6: testTwoReplicaShouldNotInSameDN

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Test special case. Two replicas belong to same block should not in same node.
 * We have 2 nodes.
 * We have a block in (DN0,SSD) and (DN1,DISK).
 * Replica in (DN0,SSD) should not be moved to (DN1,SSD).
 * Otherwise DN1 has 2 replicas.
 */
@Test(timeout=100000)
public void testTwoReplicaShouldNotInSameDN() throws Exception {
  final Configuration conf = new HdfsConfiguration();

  int blockSize = 5 * 1024 * 1024 ;
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);

  int numOfDatanodes =2;
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .racks(new String[]{"/default/rack0", "/default/rack0"})
      .storagesPerDatanode(2)
      .storageTypes(new StorageType[][]{
          {StorageType.SSD, StorageType.DISK},
          {StorageType.SSD, StorageType.DISK}})
      .storageCapacities(new long[][]{
          {100 * blockSize, 20 * blockSize},
          {20 * blockSize, 100 * blockSize}})
      .build();

  try {
    cluster.waitActive();

    //set "/bar" directory with ONE_SSD storage policy.
    DistributedFileSystem fs = cluster.getFileSystem();
    Path barDir = new Path("/bar");
    fs.mkdir(barDir,new FsPermission((short)777));
    fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

    // Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
    // and (DN0,SSD) and (DN1,DISK) are about 15% full.
    long fileLen  = 30 * blockSize;
    // fooFile has ONE_SSD policy. So
    // (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
    // (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
    Path fooFile = new Path(barDir, "foo");
    createFile(cluster, fooFile, fileLen, (short) numOfDatanodes, 0);
    // update space info
    cluster.triggerHeartbeats();

    Balancer.Parameters p = Balancer.Parameters.DEFAULT;
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    final int r = Balancer.run(namenodes, p, conf);

    // Replica in (DN0,SSD) was not moved to (DN1,SSD), because (DN1,DISK)
    // already has one. Otherwise DN1 will have 2 replicas.
    // For same reason, no replicas were moved.
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestBalancer.java


注:本文中的org.apache.hadoop.hdfs.DistributedFileSystem.setStoragePolicy方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。