当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.mkdirs方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.mkdirs方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.mkdirs方法的具体用法?Java FileSystem.mkdirs怎么用?Java FileSystem.mkdirs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.mkdirs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
    // 通过Java API创建HDFS目录
    Constant constant = new Constant();
    String rootPath = "hdfs://nameservice1";
    System.out.println(rootPath + constant.HDFS_LARGE_FILE_DIR);
    Path p = new Path(rootPath + constant.HDFS_LARGE_FILE_DIR);

    Configuration conf = new Configuration();
    conf.addResource("core-site.xml");
    conf.addResource("hdfs-site.xml");
    conf.addResource("yarn-site.xml");
    // 没开kerberos,注释下面两行
    FileSystem fs = p.getFileSystem(conf);
    boolean b = fs.mkdirs(p);
    System.out.println(b);
    fs.close();
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:18,代码来源:CreateDir.java

示例2: testSavesFilesOnRequest

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testSavesFilesOnRequest() throws Exception {
  Stoppable stop = new StoppableImplementation();
  Configuration conf = UTIL.getConfiguration();
  Path testDir = UTIL.getDataTestDir();
  FileSystem fs = UTIL.getTestFileSystem();
  String confKey = "hbase.test.cleaner.delegates";
  conf.set(confKey, NeverDelete.class.getName());

  AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);

  // create the directory layout in the directory to clean
  Path parent = new Path(testDir, "parent");
  Path file = new Path(parent, "someFile");
  fs.mkdirs(parent);
  // touch a new file
  fs.create(file).close();
  assertTrue("Test file didn't get created.", fs.exists(file));

  // run the chore
  chore.chore();

  // verify all the files got deleted
  assertTrue("File didn't get deleted", fs.exists(file));
  assertTrue("Empty directory didn't get deleted", fs.exists(parent));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestCleanerChore.java

示例3: testModifyAclEntriesOnlyDefault

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testModifyAclEntriesOnlyDefault() throws IOException {
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(DEFAULT, USER, "foo", ALL));
  fs.setAcl(path, aclSpec);
  aclSpec = Lists.newArrayList(
    aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
  fs.modifyAclEntries(path, aclSpec);
  AclStatus s = fs.getAclStatus(path);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] {
    aclEntry(DEFAULT, USER, ALL),
    aclEntry(DEFAULT, USER, "foo", READ_EXECUTE),
    aclEntry(DEFAULT, GROUP, READ_EXECUTE),
    aclEntry(DEFAULT, MASK, READ_EXECUTE),
    aclEntry(DEFAULT, OTHER, NONE) }, returned);
  assertPermission((short)010750);
  assertAclFeature(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:FSAclBaseTest.java

示例4: call

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public FSDataOutputStream call() throws IOException {
  try {
    FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
    FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),
        HConstants.DATA_FILE_UMASK_KEY);
    Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);
    fs.mkdirs(tmpDir);
    HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);
    final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);
    out.writeBytes(InetAddress.getLocalHost().toString());
    out.flush();
    return out;
  } catch(RemoteException e) {
    if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
      return null;
    } else {
      throw e;
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:HBaseFsck.java

示例5: checkCompression

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void checkCompression(boolean compressMapOutputs,
                              CompressionType redCompression,
                              boolean includeCombine
                              ) throws Exception {
  JobConf conf = new JobConf(TestMapRed.class);
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = FileSystem.get(conf);
  fs.delete(testdir, true);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setMapperClass(MyMap.class);
  conf.setReducerClass(MyReduce.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setOutputFormat(SequenceFileOutputFormat.class);
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
  if (includeCombine) {
    conf.setCombinerClass(IdentityReducer.class);
  }
  conf.setCompressMapOutput(compressMapOutputs);
  SequenceFileOutputFormat.setOutputCompressionType(conf, redCompression);
  try {
    if (!fs.mkdirs(testdir)) {
      throw new IOException("Mkdirs failed to create " + testdir.toString());
    }
    if (!fs.mkdirs(inDir)) {
      throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
    Path inFile = new Path(inDir, "part0");
    DataOutputStream f = fs.create(inFile);
    f.writeBytes("Owen was here\n");
    f.writeBytes("Hadoop is fun\n");
    f.writeBytes("Is this done, yet?\n");
    f.close();
    RunningJob rj = JobClient.runJob(conf);
    assertTrue("job was complete", rj.isComplete());
    assertTrue("job was successful", rj.isSuccessful());
    Path output = new Path(outDir,
                           Task.getOutputName(0));
    assertTrue("reduce output exists " + output, fs.exists(output));
    SequenceFile.Reader rdr = 
      new SequenceFile.Reader(fs, output, conf);
    assertEquals("is reduce output compressed " + output, 
                 redCompression != CompressionType.NONE, 
                 rdr.isCompressed());
    rdr.close();
  } finally {
    fs.delete(testdir, true);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestMapRed.java

示例6: createFiles

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void createFiles(FileSystem fs, String topdir,
    FileEntry[] entries) throws IOException {
  for (FileEntry entry : entries) {
    Path newpath = new Path(topdir + "/" + entry.getPath());
    if (entry.isDirectory()) {
      fs.mkdirs(newpath);
    } else {
      OutputStream out = fs.create(newpath);
      try {
        out.write((topdir + "/" + entry).getBytes());
        out.write("\n".getBytes());
      } finally {
        out.close();
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestDistCpSystem.java

示例7: testSetPermissionCannotSetAclBit

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testSetPermissionCannotSetAclBit() throws IOException {
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
  fs.setPermission(path, FsPermission.createImmutable((short)0700));
  assertPermission((short)0700);
  fs.setPermission(path,
    new FsPermissionExtension(FsPermission.
        createImmutable((short)0755), true, true));
  INode inode = cluster.getNamesystem().getFSDirectory().getINode(
      path.toUri().getPath(), false);
  assertNotNull(inode);
  FsPermission perm = inode.getFsPermission();
  assertNotNull(perm);
  assertEquals(0755, perm.toShort());
  assertEquals(0755, perm.toExtendedShort());
  assertAclFeature(false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:FSAclBaseTest.java

示例8: submit

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Submit this job to mapred. The state becomes RUNNING if submission 
 * is successful, FAILED otherwise.  
 */
protected synchronized void submit() {
  try {
    Configuration conf = job.getConfiguration();
    if (conf.getBoolean(CREATE_DIR, false)) {
      FileSystem fs = FileSystem.get(conf);
      Path inputPaths[] = FileInputFormat.getInputPaths(job);
      for (int i = 0; i < inputPaths.length; i++) {
        if (!fs.exists(inputPaths[i])) {
          try {
            fs.mkdirs(inputPaths[i]);
          } catch (IOException e) {

          }
        }
      }
    }
    job.submit();
    this.state = State.RUNNING;
  } catch (Exception ioe) {
    LOG.info(getJobName()+" got an error while submitting ",ioe);
    this.state = State.FAILED;
    this.message = StringUtils.stringifyException(ioe);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:ControlledJob.java

示例9: mkdirs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void mkdirs(String path) throws Exception {
  FileSystem fileSystem = null;
  try {
    fileSystem = cluster.getFileSystem();
    fileSystem.mkdirs(new Path(path));
    recordInExpectedValues(path);
  }
  finally {
    IOUtils.cleanup(null, fileSystem);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestGlobbedCopyListing.java

示例10: testDoesNotCheckDirectories

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test to make sure that we don't attempt to ask the delegate whether or not we should preserve a
 * directory.
 * @throws Exception on failure
 */
@Test
public void testDoesNotCheckDirectories() throws Exception {
  Stoppable stop = new StoppableImplementation();
  Configuration conf = UTIL.getConfiguration();
  Path testDir = UTIL.getDataTestDir();
  FileSystem fs = UTIL.getTestFileSystem();
  String confKey = "hbase.test.cleaner.delegates";
  conf.set(confKey, AlwaysDelete.class.getName());

  AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
  // spy on the delegate to ensure that we don't check for directories
  AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
  AlwaysDelete spy = Mockito.spy(delegate);
  chore.cleanersChain.set(0, spy);

  // create the directory layout in the directory to clean
  Path parent = new Path(testDir, "parent");
  Path file = new Path(parent, "someFile");
  fs.mkdirs(parent);
  assertTrue("Test parent didn't get created.", fs.exists(parent));
  // touch a new file
  fs.create(file).close();
  assertTrue("Test file didn't get created.", fs.exists(file));
  
  FileStatus fStat = fs.getFileStatus(parent);
  chore.chore();
  // make sure we never checked the directory
  Mockito.verify(spy, Mockito.never()).isFileDeletable(fStat);
  Mockito.reset(spy);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestCleanerChore.java

示例11: renameOrMerge

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
    throws IOException {
  if (algorithmVersion == 1) {
    if (!fs.rename(from.getPath(), to)) {
      throw new IOException("Failed to rename " + from + " to " + to);
    }
  } else {
    fs.mkdirs(to);
    for (FileStatus subFrom : fs.listStatus(from.getPath())) {
      Path subTo = new Path(to, subFrom.getPath().getName());
      mergePaths(fs, subFrom, subTo);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:FileOutputCommitter.java

示例12: testMkDirDepth1

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testMkDirDepth1() throws Throwable {
  FileSystem fs = getFileSystem();
  Path dir = new Path("/testmkdirdepth1");
  assertPathDoesNotExist("directory already exists", dir);
  fs.mkdirs(dir);
  ContractTestUtils.assertIsDirectory(getFileSystem(), dir);
  assertPathExists("directory already exists", dir);
  assertDeleted(dir, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:AbstractContractRootDirectoryTest.java

示例13: testFindsSnapshotFilesWhenCleaning

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testFindsSnapshotFilesWhenCleaning() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  Path rootDir = FSUtils.getRootDir(conf);
  Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  FileSystem fs = FileSystem.get(conf);
  SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
  cleaner.setConf(conf);

  // write an hfile to the snapshot directory
  String snapshotName = "snapshot";
  byte[] snapshot = Bytes.toBytes(snapshotName);
  TableName tableName = TableName.valueOf("table");
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
  HRegionInfo mockRegion = new HRegionInfo(tableName);
  Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
  Path familyDir = new Path(regionSnapshotDir, "family");
  // create a reference to a supposedly valid hfile
  String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
  Path refFile = new Path(familyDir, hfile);

  // make sure the reference file exists
  fs.create(refFile);

  // create the hfile in the archive
  fs.mkdirs(archivedHfileDir);
  fs.createNewFile(new Path(archivedHfileDir, hfile));

  // make sure that the file isn't deletable
  assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestSnapshotHFileCleaner.java

示例14: testFailedIllegalColumns

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void testFailedIllegalColumns() throws IOException {
  // Make sure that if a MapReduce job to do the import fails due
  // to an IOException, we tell the user about it.

  // Create a table to attempt to import.
  createTableForColType("VARCHAR(32)", "'meep'");

  Configuration conf = new Configuration();

  // Make the output dir exist so we know the job will fail via IOException.
  Path outputPath = new Path(new Path(getWarehouseDir()), getTableName());
  FileSystem fs = FileSystem.getLocal(conf);
  fs.mkdirs(outputPath);

  assertTrue(fs.exists(outputPath));

  // DATA_COL0 ok, by zyzzyva not good
  String [] argv = getArgv(true, new String [] { "DATA_COL0", "zyzzyva" },
      conf);

  Sqoop importer = new Sqoop(new ImportTool());
  try {
    int ret = Sqoop.runSqoop(importer, argv);
    assertTrue("Expected job to fail due bad colname.", 1==ret);
  } catch (Exception e) {
    // In debug mode, IOException is wrapped in RuntimeException.
    LOG.info("Got exceptional return (expected: ok). msg is: " + e);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:30,代码来源:TestImportJob.java

示例15: testCheckpointTriggerOnTxnCount

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test that the 2NN triggers a checkpoint after the configurable interval
 */
@Test(timeout=30000)
public void testCheckpointTriggerOnTxnCount() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  Configuration conf = new HdfsConfiguration();

  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(0)
        .format(true).build();
    FileSystem fs = cluster.getFileSystem();
    secondary = startSecondaryNameNode(conf);
    secondary.startCheckpointThread();
    final NNStorage storage = secondary.getFSImage().getStorage();

    // 2NN should checkpoint at startup
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        LOG.info("Waiting for checkpoint txn id to go to 2");
        return storage.getMostRecentCheckpointTxId() == 2;
      }
    }, 200, 15000);

    // If we make 10 transactions, it should checkpoint again
    for (int i = 0; i < 10; i++) {
      fs.mkdirs(new Path("/test" + i));
    }
    
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        LOG.info("Waiting for checkpoint txn id to go > 2");
        return storage.getMostRecentCheckpointTxId() > 2;
      }
    }, 200, 15000);
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestCheckpoint.java


注:本文中的org.apache.hadoop.fs.FileSystem.mkdirs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。