当前位置: 首页>>代码示例>>Java>>正文


Java DistributedFileSystem.mkdirs方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs方法的典型用法代码示例。如果您正苦于以下问题:Java DistributedFileSystem.mkdirs方法的具体用法?Java DistributedFileSystem.mkdirs怎么用?Java DistributedFileSystem.mkdirs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DistributedFileSystem的用法示例。


在下文中一共展示了DistributedFileSystem.mkdirs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: mapreduce

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Test
public void mapreduce() {
    MapReduceConfiguration mapReduceConfiguration = new MapReduceConfiguration();
    DistributedFileSystem distributedFileSystem = mapReduceConfiguration.distributedFileSystem();
    try {
        String inputPath = mapReduceConfiguration.url() + "/mapreduce/temperature/input";
        Path inputpath = new Path(inputPath);
        //文件不存在 则将输入文件导入到hdfs中
        if (!distributedFileSystem.exists(inputpath)) {
            distributedFileSystem.mkdirs(inputpath);
            distributedFileSystem.copyFromLocalFile(true, new Path(this.getClass().getClassLoader().getResource("mapred/temperature/input").getPath()), new Path(inputPath + "/input"));
        }
        String outputPath = mapReduceConfiguration.url() + "/mapreduce/temperature/mapred/output" + DateFormatUtils.format(new Date(), "yyyyMMddHHmmSS");

        ToolRunner.run(new MaxTemperatureMapRed(), new String[]{inputPath, outputPath});
        mapReduceConfiguration.print(outputPath);
    } catch (Exception e) {
        log.error(e);
        e.printStackTrace();
    } finally {
        mapReduceConfiguration.close(distributedFileSystem);
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-mapreduce,代码行数:24,代码来源:MaxTemperatureMapRedTest.java

示例2: testEditsLogOldRename

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSFileContextMainOperations.java

示例3: testEditsLogRename

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, true, false, Rename.OVERWRITE);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSFileContextMainOperations.java

示例4: setup

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  Configuration conf = new Configuration();
  // Turn off IPC client caching, so that the suite can handle
  // the restart of the daemons between test cases.
  conf.setInt(
      CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      0);

  MiniQJMHACluster miniQjmHaCluster = new MiniQJMHACluster.Builder(conf).build();
  cluster = miniQjmHaCluster.getDfsCluster();
  jCluster = miniQjmHaCluster.getJournalCluster();
  
  // make nn0 active
  cluster.transitionToActive(0);
  // do sth to generate in-progress edit log data
  DistributedFileSystem dfs = (DistributedFileSystem) 
      HATestUtil.configureFailoverFs(cluster, conf);
  dfs.mkdirs(new Path("/test2"));
  dfs.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestBootstrapStandbyWithQJM.java

示例5: testSaveNamespaceWithRenamedLease

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Test for save namespace should succeed when parent directory renamed with
 * open lease and destination directory exist. 
 * This test is a regression for HDFS-2827
 */
@Test
public void testSaveNamespaceWithRenamedLease() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
      .numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
  OutputStream out = null;
  try {
    fs.mkdirs(new Path("/test-target"));
    out = fs.create(new Path("/test-source/foo")); // don't close
    fs.rename(new Path("/test-source/"), new Path("/test-target/"));

    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    cluster.getNameNodeRpc().saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  } finally {
    IOUtils.cleanup(LOG, out, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestSaveNamespace.java

示例6: testWebHdfsCreateSnapshot

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Test snapshot creation through WebHdfs
 */
@Test
public void testWebHdfsCreateSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);

    try {
      webHdfs.createSnapshot(foo);
      fail("Cannot create snapshot on a non-snapshottable directory");
    } catch (Exception e) {
      GenericTestUtils.assertExceptionContains(
          "Directory is not a snapshottable directory", e);
    }

    // allow snapshots on /foo
    dfs.allowSnapshot(foo);
    // create snapshots on foo using WebHdfs
    webHdfs.createSnapshot(foo, "s1");
    // create snapshot without specifying name
    final Path spath = webHdfs.createSnapshot(foo, null);

    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestWebHDFS.java

示例7: testWebHdfsDeleteSnapshot

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Test snapshot deletion through WebHdfs
 */
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path spath = webHdfs.createSnapshot(foo, null);
    Assert.assertTrue(webHdfs.exists(spath));
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // delete the two snapshots
    webHdfs.deleteSnapshot(foo, "s1");
    Assert.assertFalse(webHdfs.exists(s1path));
    webHdfs.deleteSnapshot(foo, spath.getName());
    Assert.assertFalse(webHdfs.exists(spath));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestWebHDFS.java

示例8: testWebHdfsRenameSnapshot

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Test snapshot rename through WebHdfs
 */
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // rename s1 to s2
    webHdfs.renameSnapshot(foo, "s1", "s2");
    Assert.assertFalse(webHdfs.exists(s1path));
    final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
    Assert.assertTrue(webHdfs.exists(s2path));

    webHdfs.deleteSnapshot(foo, "s2");
    Assert.assertFalse(webHdfs.exists(s2path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestWebHDFS.java

示例9: prepare

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Create files/directories/snapshots.
 */
void prepare(DistributedFileSystem dfs, short repl) throws Exception {
  for (Path d : dirs) {
    dfs.mkdirs(d);
  }
  for (Path file : files) {
    DFSTestUtil.createFile(dfs, file, fileSize, repl, 0L);
  }
  for (Map.Entry<Path, List<String>> entry : snapshotMap.entrySet()) {
    for (String snapshot : entry.getValue()) {
      SnapshotTestHelper.createSnapshot(dfs, entry.getKey(), snapshot);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestStorageMover.java

示例10: writeFile

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
private void writeFile(final DistributedFileSystem dfs,
    String dirName, String fileName, String StoragePolicy) throws IOException {
  Path dirPath = new Path(dirName);
  dfs.mkdirs(dirPath);
  dfs.setStoragePolicy(dirPath, StoragePolicy);
  writeFile(dfs, dirPath, fileName);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestFsck.java

示例11: createTargetTmpDir

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
private static Path createTargetTmpDir(DistributedFileSystem targetFs,
    Path targetDir) throws IOException {
  final Path tmp = new Path(targetDir,
      DistCpConstants.HDFS_DISTCP_DIFF_DIRECTORY_NAME + DistCp.rand.nextInt());
  if (!targetFs.mkdirs(tmp)) {
    throw new IOException("The tmp directory " + tmp + " already exists");
  }
  return tmp;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:DistCpSync.java

示例12: moveToTarget

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Finish the rename operations: move all the intermediate files/directories
 * from the tmp dir to the final targets.
 */
private static void moveToTarget(DiffInfo[] diffs,
    DistributedFileSystem targetFs) throws IOException {
  // sort the diffs based on their target paths to make sure the parent
  // directories are created first.
  Arrays.sort(diffs, DiffInfo.targetComparator);
  for (DiffInfo diff : diffs) {
    if (diff.target != null) {
      if (!targetFs.exists(diff.target.getParent())) {
        targetFs.mkdirs(diff.target.getParent());
      }
      targetFs.rename(diff.getTmp(), diff.target);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DistCpSync.java

示例13: testAcl

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
private void testAcl(boolean persistNamespace) throws IOException {
  Path p = new Path("/p");
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.create(p).close();
  fs.mkdirs(new Path("/23"));

  AclEntry e = new AclEntry.Builder().setName("foo")
      .setPermission(READ_EXECUTE).setScope(ACCESS).setType(USER).build();
  fs.modifyAclEntries(p, Lists.newArrayList(e));

  restart(fs, persistNamespace);

  AclStatus s = cluster.getNamesystem().getAclStatus(p.toString());
  AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray(
      new AclEntry[0]);
  Assert.assertArrayEquals(new AclEntry[] {
      aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
      aclEntry(ACCESS, GROUP, READ) }, returned);

  fs.removeAcl(p);

  if (persistNamespace) {
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  }

  cluster.restartNameNode();
  cluster.waitActive();

  s = cluster.getNamesystem().getAclStatus(p.toString());
  returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
  Assert.assertArrayEquals(new AclEntry[] { }, returned);

  fs.modifyAclEntries(p, Lists.newArrayList(e));
  s = cluster.getNamesystem().getAclStatus(p.toString());
  returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
  Assert.assertArrayEquals(new AclEntry[] {
      aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
      aclEntry(ACCESS, GROUP, READ) }, returned);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestFSImageWithAcl.java

示例14: testFilesInGetListingOps

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:TestINodeFile.java

示例15: testSnapshotStatsMXBeanInfo

import org.apache.hadoop.hdfs.DistributedFileSystem; //导入方法依赖的package包/类
/**
 * Test getting SnapshotStatsMXBean information
 */
@Test
public void testSnapshotStatsMXBeanInfo() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  String pathName = "/snapshot";
  Path path = new Path(pathName);

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();

    SnapshotManager sm = cluster.getNamesystem().getSnapshotManager();
    DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
    dfs.mkdirs(path);
    dfs.allowSnapshot(path);
    dfs.createSnapshot(path);

    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=NameNode,name=SnapshotInfo");

    CompositeData[] directories =
        (CompositeData[]) mbs.getAttribute(
            mxbeanName, "SnapshottableDirectories");
    int numDirectories = Array.getLength(directories);
    assertEquals(sm.getNumSnapshottableDirs(), numDirectories);
    CompositeData[] snapshots =
        (CompositeData[]) mbs.getAttribute(mxbeanName, "Snapshots");
    int numSnapshots = Array.getLength(snapshots);
    assertEquals(sm.getNumSnapshots(), numSnapshots);

    CompositeData d = (CompositeData) Array.get(directories, 0);
    CompositeData s = (CompositeData) Array.get(snapshots, 0);
    assertTrue(((String) d.get("path")).contains(pathName));
    assertTrue(((String) s.get("snapshotDirectory")).contains(pathName));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestSnapshotStatsMXBean.java


注:本文中的org.apache.hadoop.hdfs.DistributedFileSystem.mkdirs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。