当前位置: 首页>>代码示例>>Java>>正文


Java Builder类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.Builder的典型用法代码示例。如果您正苦于以下问题:Java Builder类的具体用法?Java Builder怎么用?Java Builder使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Builder类属于org.apache.hadoop.hdfs.MiniDFSCluster包,在下文中一共展示了Builder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testBasedir

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
/** tests basedir option copying files from dfs file system to dfs file system */
public void testBasedir() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-basedir",
                                       "/basedir",
                                       namenode+"/basedir/middle/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat/middle/srcdat", files));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/basedir");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestCopyFiles.java

示例2: initMiniHACluster

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port)
    throws IOException {
  Configuration confForMiniDFS = new Configuration();
  
  Builder builder = new MiniDFSCluster.Builder(confForMiniDFS)
  .nnTopology(new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
  .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port))
  .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port))
      ))
      .numDataNodes(1);
  
  MiniDFSCluster cluster = builder.build();
  cluster.waitActive();

  NameNode nnode1 = cluster.getNameNode(0);
  assertTrue(nnode1.isStandbyState());
  NameNode nnode2 = cluster.getNameNode(1);
  assertTrue(nnode2.isStandbyState());

  cluster.transitionToActive(0);
  assertFalse(nnode1.isStandbyState());
  return cluster;
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:25,代码来源:HdfsSortedOplogOrganizerJUnitTest.java

示例3: startCluster

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
private void startCluster() throws IOException {
  conf = new HdfsConfiguration();
  conf.setInt("dfs.blocksize", 1024*1024);
  cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  nn = cluster.getNameNode(0);
  assertNotNull(nn);
  dn0 = cluster.getDataNodes().get(0);
  assertNotNull(dn0);
  blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestDataNodeRollingUpgrade.java

示例4: testCopyFromDfsToDfs

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
/** copy files from dfs file system to dfs file system */
public void testCopyFromDfsToDfs() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
      assertTrue("Log directory does not exist.",
                 fs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestCopyFiles.java

示例5: testEmptyDir

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
/** copy empty directory on dfs file system */
public void testEmptyDir() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      
      FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
      fs.mkdirs(new Path("/empty"));

      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/empty",
                                       namenode+"/dest"});
      fs = FileSystem.get(URI.create(namenode+"/destdat"), conf);
      assertTrue("Destination directory does not exist.",
                 fs.exists(new Path(namenode+"/dest")));
      deldir(hdfs, "/dest");
      deldir(hdfs, "/empty");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestCopyFiles.java

示例6: testCopyFromLocalToDfs

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
/** copy files from local file system to dfs file system */
public void testCopyFromLocalToDfs() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       "file:///"+TEST_ROOT_DIR+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(cluster.getFileSystem(), "/destdat", files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/logs");
      deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR+"/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestCopyFiles.java

示例7: testCopyFromDfsToLocal

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
/** copy files from dfs file system to local file system */
public void testCopyFromDfsToLocal() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       "/logs",
                                       namenode+"/srcdat",
                                       "file:///"+TEST_ROOT_DIR+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path("/logs")));
      deldir(localfs, TEST_ROOT_DIR+"/destdat");
      deldir(hdfs, "/logs");
      deldir(hdfs, "/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestCopyFiles.java

示例8: testDeleteLocal

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
/**
 * verify that -delete option works for other {@link FileSystem}
 * implementations. See MAPREDUCE-1285 */
public void testDeleteLocal() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      String destdir = TEST_ROOT_DIR + "/destdat";
      MyFile[] localFiles = createFiles(localfs, destdir);
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-delete",
                                       "-update",
                                       "-log",
                                       "/logs",
                                       namenode+"/srcdat",
                                       "file:///"+TEST_ROOT_DIR+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(localfs, destdir, files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path("/logs")));
      deldir(localfs, destdir);
      deldir(hdfs, "/logs");
      deldir(hdfs, "/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestCopyFiles.java

示例9: testGlobbing

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
/** test globbing  */
public void testGlobbing() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat/*",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
      assertTrue("Log directory does not exist.",
                 fs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestCopyFiles.java

示例10: initMiniCluster

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:14,代码来源:CreateHDFSStoreTest.java

示例11: initMiniCluster

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");
  
  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:16,代码来源:BaseHoplogTestCase.java

示例12: initMiniCluster

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
private void initMiniCluster(Configuration hconf, int numDataNodes)
    throws IOException {
  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDataNodes);
  builder.nameNodePort(CLUSTER_PORT);
  cluster = builder.build();
}
 
开发者ID:gemxd,项目名称:gemfirexd-oss,代码行数:8,代码来源:HdfsErrorHandlingJunitTest.java

示例13: testCopyFromDfsToDfs

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
/** copy files from dfs file system to dfs file system */
@SuppressWarnings("deprecation")
public void testCopyFromDfsToDfs() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
      assertTrue("Log directory does not exist.",
                 fs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:31,代码来源:TestCopyFiles.java

示例14: testEmptyDir

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
/** copy empty directory on dfs file system */
@SuppressWarnings("deprecation")
public void testEmptyDir() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      
      FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
      fs.mkdirs(new Path("/empty"));

      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/empty",
                                       namenode+"/dest"});
      fs = FileSystem.get(URI.create(namenode+"/destdat"), conf);
      assertTrue("Destination directory does not exist.",
                 fs.exists(new Path(namenode+"/dest")));
      deldir(hdfs, "/dest");
      deldir(hdfs, "/empty");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:32,代码来源:TestCopyFiles.java

示例15: testCopyFromLocalToDfs

import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; //导入依赖的package包/类
/** copy files from local file system to dfs file system */
@SuppressWarnings("deprecation")
public void testCopyFromLocalToDfs() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       "file:///"+TEST_ROOT_DIR+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(cluster.getFileSystem(), "/destdat", files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/logs");
      deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR+"/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:29,代码来源:TestCopyFiles.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.Builder类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。