当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.Builder方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.Builder方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.Builder方法的具体用法?Java MiniDFSCluster.Builder怎么用?Java MiniDFSCluster.Builder使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.Builder方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createCluster

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private MiniDFSCluster createCluster() throws HDFSQuasiServiceException {
  MiniDFSCluster hdfsCluster = null;

  File baseDir = new File(getWorkingDir()).getAbsoluteFile();
  FileUtil.fullyDelete(baseDir);
  this.conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());

  LOG.info("Using base dir " + baseDir.getAbsolutePath());

  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(this.conf);
  builder.numDataNodes(getNumberOfDataNodes());
  try {
    hdfsCluster = builder.build();
  } catch (IOException e) {
    LOG.error("Error in creating mini DFS cluster ", e);
    throw new HDFSQuasiServiceException("Error in creating mini DFS cluster ", e);
  }
  ListIterator<DataNode> itr = hdfsCluster.getDataNodes().listIterator();
  LOG.info("NameNode: " + hdfsCluster.getNameNode().getNameNodeAddressHostPortString());
  while (itr.hasNext()) {
    DataNode dn = itr.next();
    LOG.info("DataNode: " + dn.getDisplayName());
  }
  return hdfsCluster;
}
 
开发者ID:ampool,项目名称:monarch,代码行数:26,代码来源:HDFSQuasiService.java

示例2: setUp

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();
  
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");
  
  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();
  
  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 
  
  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestBinaryTokenFile.java

示例3: createDFSCluster

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private MiniDFSCluster createDFSCluster(Configuration conf) throws IOException {
  MiniDFSCluster cluster;
  String[] hosts = {"localhost", "localhost", "localhost"};
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.hosts(hosts).nameNodePort(9001).numDataNodes(3);
  cluster = builder.build();
  cluster.waitActive();
  return cluster;
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:10,代码来源:TestWithMiniDFSCluster.java

示例4: startMiniDFS

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Fire up our own hand-rolled MiniDFSCluster.  We do this here instead
 * of relying on TestHdfsHelper because we don't want to turn on ACL
 * support.
 *
 * @throws Exception
 */
private void startMiniDFS() throws Exception {

  File testDirRoot = TestDirHelper.getTestDir();

  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir",
            new File(testDirRoot, "hadoop-log").getAbsolutePath());
  }
  if (System.getProperty("test.build.data") == null) {
    System.setProperty("test.build.data",
            new File(testDirRoot, "hadoop-data").getAbsolutePath());
  }

  Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
  HadoopUsersConfTestHelper.addUserConf(conf);
  conf.set("fs.hdfs.impl.disable.cache", "true");
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");

  // Explicitly turn off ACL support
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);

  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.numDataNodes(2);
  miniDfs = builder.build();
  nnConf = miniDfs.getConfiguration(0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestHttpFSServerNoACLs.java

示例5: startMiniDFS

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Fire up our own hand-rolled MiniDFSCluster.  We do this here instead
 * of relying on TestHdfsHelper because we don't want to turn on XAttr
 * support.
 *
 * @throws Exception
 */
private void startMiniDFS() throws Exception {

  File testDirRoot = TestDirHelper.getTestDir();

  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir",
            new File(testDirRoot, "hadoop-log").getAbsolutePath());
  }
  if (System.getProperty("test.build.data") == null) {
    System.setProperty("test.build.data",
            new File(testDirRoot, "hadoop-data").getAbsolutePath());
  }

  Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
  HadoopUsersConfTestHelper.addUserConf(conf);
  conf.set("fs.hdfs.impl.disable.cache", "true");
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");

  // Explicitly turn off XAttr support
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, false);

  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.numDataNodes(2);
  miniDfs = builder.build();
  nnConf = miniDfs.getConfiguration(0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestHttpFSServerNoXAttrs.java

示例6: testBalancerEndInNoMoveProgress

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};
  
  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestBalancerWithNodeGroup.java

示例7: TestContext

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
TestContext(Configuration conf, int numNameServices) throws Exception {
  this.numNameServices = numNameServices;
  MiniDFSCluster.Builder bld = new MiniDFSCluster.Builder(conf).
      numDataNodes(1).
      storagesPerDatanode(1);
  if (numNameServices > 1) {
    bld.nnTopology(MiniDFSNNTopology.
          simpleFederatedTopology(numNameServices));
  }
  cluster = bld.build();
  cluster.waitActive();
  dfs = new DistributedFileSystem[numNameServices];
  for (int i = 0; i < numNameServices; i++) {
    dfs[i] = cluster.getFileSystem(i);
  }
  bpids = new String[numNameServices];
  for (int i = 0; i < numNameServices; i++) {
    bpids[i] = cluster.getNamesystem(i).getBlockPoolId();
  }
  datanode = cluster.getDataNodes().get(0);
  blockScanner = datanode.getBlockScanner();
  for (int i = 0; i < numNameServices; i++) {
    dfs[i].mkdirs(new Path("/test"));
  }
  data = datanode.getFSDataset();
  volumes = data.getVolumes();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestBlockScanner.java

示例8: newMiniClusterBuilder

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
public static MiniDFSCluster.Builder newMiniClusterBuilder(Configuration config) throws IOException {
  String buildDirectory = System.getProperty("project.build.directory", "target");
  buildDirectory += "/minicluster/test/data";
  System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, buildDirectory);
  return new MiniDFSCluster.Builder(config);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:7,代码来源:S3MapReduceCpTestUtils.java

示例9: testBalancerWithNodeGroup

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Create a cluster with even distribution, and a new empty node is added to
 * the cluster, then test node-group locality for balancer policy.
 */
@Test(timeout=60000)
public void testBalancerWithNodeGroup() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};
  
  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 20% full
    long totalUsedSpace = totalCapacity * 2 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / (numOfDatanodes/2),
        (short) (numOfDatanodes/2), 0);
    
    long newCapacity = CAPACITY;
    String newRack = RACK1;
    String newNodeGroup = NODEGROUP2;
    // start up an empty node with the same capacity and on NODEGROUP2
    cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
        new long[] {newCapacity}, new String[]{newNodeGroup});

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestBalancerWithNodeGroup.java

示例10: getDfsBuilder

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
public MiniDFSCluster.Builder getDfsBuilder() {
  return dfsBuilder;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:4,代码来源:MiniQJMHACluster.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.Builder方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。