当前位置: 首页>>代码示例>>Java>>正文


Java Path.SEPARATOR属性代码示例

本文整理汇总了Java中org.apache.hadoop.fs.Path.SEPARATOR属性的典型用法代码示例。如果您正苦于以下问题:Java Path.SEPARATOR属性的具体用法?Java Path.SEPARATOR怎么用?Java Path.SEPARATOR使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.SEPARATOR属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: ChRootedFs

public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
  throws URISyntaxException {
  super(fs.getUri(), fs.getUri().getScheme(),
      fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
  myFs = fs;
  myFs.checkPath(theRoot);
  chRootPathPart = new Path(myFs.getUriPath(theRoot));
  chRootPathPartString = chRootPathPart.toUri().getPath();
  /*
   * We are making URI include the chrootedPath: e.g. file:///chrootedPath.
   * This is questionable since Path#makeQualified(uri, path) ignores
   * the pathPart of a uri. Since this class is internal we can ignore
   * this issue but if we were to make it external then this needs
   * to be resolved.
   */
  // Handle the two cases:
  //              scheme:/// and scheme://authority/
  myUri = new URI(myFs.getUri().toString() + 
      (myFs.getUri().getAuthority() == null ? "" :  Path.SEPARATOR) +
        chRootPathPart.toUri().getPath().substring(1));
  super.checkPath(theRoot);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:ChRootedFs.java

示例2: makeArchiveWithRepl

private String makeArchiveWithRepl() throws Exception {
  final String inputPathStr = inputPath.toUri().getPath();
  System.out.println("inputPathStr = " + inputPathStr);

  final URI uri = fs.getUri();
  final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort()
      + archivePath.toUri().getPath() + Path.SEPARATOR;

  final String harName = "foo.har";
  final String fullHarPathStr = prefix + harName;
  final String[] args = { "-archiveName", harName, "-p", inputPathStr,
      "-r 3", "*", archivePath.toString() };
  System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
      HADOOP_ARCHIVES_JAR);
  final HadoopArchives har = new HadoopArchives(conf);
  assertEquals(0, ToolRunner.run(har, args));
  return fullHarPathStr;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestHadoopArchives.java

示例3: testCleanupRemainders

@Test(timeout=10000)
public void testCleanupRemainders() throws Exception {
  Assume.assumeTrue(NativeIO.isAvailable());
  Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
  File path = new File(TEST_BASE, "testCleanupRemainders");
  path.mkdirs();
  String remainder1 = path.getAbsolutePath() + 
      Path.SEPARATOR + "woot2_remainder1";
  String remainder2 = path.getAbsolutePath() +
      Path.SEPARATOR + "woot2_remainder2";
  createTempFile(remainder1);
  createTempFile(remainder2);
  SharedFileDescriptorFactory.create("woot2_", 
      new String[] { path.getAbsolutePath() });
  // creating the SharedFileDescriptorFactory should have removed 
  // the remainders
  Assert.assertFalse(new File(remainder1).exists());
  Assert.assertFalse(new File(remainder2).exists());
  FileUtil.fullyDelete(path);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:20,代码来源:TestSharedFileDescriptorFactory.java

示例4: startMiniDfsCluster

/**
 * Start a MiniDFS cluster backed Drillbit cluster
 * @param testClass
 * @param isImpersonationEnabled Enable impersonation in the cluster?
 * @throws Exception
 */
protected static void startMiniDfsCluster(
    final String testClass, final boolean isImpersonationEnabled) throws Exception {
  Preconditions.checkArgument(!Strings.isNullOrEmpty(testClass), "Expected a non-null and non-empty test class name");
  dfsConf = new Configuration();

  // Set the MiniDfs base dir to be the temp directory of the test, so that all files created within the MiniDfs
  // are properly cleanup when test exits.
  miniDfsStoragePath = System.getProperty("java.io.tmpdir") + Path.SEPARATOR + testClass;
  dfsConf.set("hdfs.minidfs.basedir", miniDfsStoragePath);

  if (isImpersonationEnabled) {
    // Set the proxyuser settings so that the user who is running the Drillbits/MiniDfs can impersonate other users.
    dfsConf.set(String.format("hadoop.proxyuser.%s.hosts", processUser), "*");
    dfsConf.set(String.format("hadoop.proxyuser.%s.groups", processUser), "*");
  }

  // Start the MiniDfs cluster
  dfsCluster = new MiniDFSCluster.Builder(dfsConf)
      .numDataNodes(3)
      .format(true)
      .build();

  fs = dfsCluster.getFileSystem();
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:30,代码来源:BaseTestImpersonation.java

示例5: makeArchive

private String makeArchive(Path parentPath, String relGlob) throws Exception {
  final String parentPathStr = parentPath.toUri().getPath();
  final String relPathGlob = relGlob == null ? "*" : relGlob;
  System.out.println("parentPathStr = " + parentPathStr);

  final URI uri = fs.getUri();
  final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort()
      + archivePath.toUri().getPath() + Path.SEPARATOR;

  final String harName = "foo.har";
  final String fullHarPathStr = prefix + harName;
  final String[] args = { "-archiveName", harName, "-p", parentPathStr,
      relPathGlob, archivePath.toString() };
  System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH,
      HADOOP_ARCHIVES_JAR);
  final HadoopArchives har = new HadoopArchives(conf);
  assertEquals(0, ToolRunner.run(har, args));
  return fullHarPathStr;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestHadoopArchives.java

示例6: resolveDotInodesPath

private static String resolveDotInodesPath(String src,
    byte[][] pathComponents, FSDirectory fsd)
    throws FileNotFoundException {
  final String inodeId = DFSUtil.bytes2String(pathComponents[3]);
  final long id;
  try {
    id = Long.parseLong(inodeId);
  } catch (NumberFormatException e) {
    throw new FileNotFoundException("Invalid inode path: " + src);
  }
  if (id == INodeId.ROOT_INODE_ID && pathComponents.length == 4) {
    return Path.SEPARATOR;
  }
  INode inode = fsd.getInode(id);
  if (inode == null) {
    throw new FileNotFoundException(
        "File for given inode path does not exist: " + src);
  }
  
  // Handle single ".." for NFS lookup support.
  if ((pathComponents.length > 4)
      && DFSUtil.bytes2String(pathComponents[4]).equals("..")) {
    INode parent = inode.getParent();
    if (parent == null || parent.getId() == INodeId.ROOT_INODE_ID) {
      // inode is root, or its parent is root.
      return Path.SEPARATOR;
    } else {
      return parent.getFullPathName();
    }
  }

  String path = "";
  if (id != INodeId.ROOT_INODE_ID) {
    path = inode.getFullPathName();
  }
  return constructRemainingPath(path, pathComponents, 4);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:FSDirectory.java

示例7: testFallback

/**
 * Test the sync returns false in the following scenarios:
 * 1. the source/target dir are not snapshottable dir
 * 2. the source/target does not have the given snapshots
 * 3. changes have been made in target
 */
@Test
public void testFallback() throws Exception {
  // the source/target dir are not snapshottable dir
  Assert.assertFalse(DistCpSync.sync(options, conf));
  // make sure the source path has been updated to the snapshot path
  final Path spath = new Path(source,
      HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s2");
  Assert.assertEquals(spath, options.getSourcePaths().get(0));

  // reset source path in options
  options.setSourcePaths(Arrays.asList(source));
  // the source/target does not have the given snapshots
  dfs.allowSnapshot(source);
  dfs.allowSnapshot(target);
  Assert.assertFalse(DistCpSync.sync(options, conf));
  Assert.assertEquals(spath, options.getSourcePaths().get(0));

  // reset source path in options
  options.setSourcePaths(Arrays.asList(source));
  dfs.createSnapshot(source, "s1");
  dfs.createSnapshot(source, "s2");
  dfs.createSnapshot(target, "s1");
  Assert.assertTrue(DistCpSync.sync(options, conf));

  // reset source paths in options
  options.setSourcePaths(Arrays.asList(source));
  // changes have been made in target
  final Path subTarget = new Path(target, "sub");
  dfs.mkdirs(subTarget);
  Assert.assertFalse(DistCpSync.sync(options, conf));
  // make sure the source path has been updated to the snapshot path
  Assert.assertEquals(spath, options.getSourcePaths().get(0));

  // reset source paths in options
  options.setSourcePaths(Arrays.asList(source));
  dfs.delete(subTarget, true);
  Assert.assertTrue(DistCpSync.sync(options, conf));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestDistCpSync.java

示例8: getUserLocalDirs

protected List<String> getUserLocalDirs(List<String> localDirs) {
  List<String> userLocalDirs = new ArrayList<>(localDirs.size());
  String user = container.getUser();

  for (String localDir : localDirs) {
    String userLocalDir = localDir + Path.SEPARATOR +
        ContainerLocalizer.USERCACHE + Path.SEPARATOR + user
        + Path.SEPARATOR;

    userLocalDirs.add(userLocalDir);
  }

  return userLocalDirs;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:ContainerLaunch.java

示例9: getNMFilecacheDirs

protected List<String> getNMFilecacheDirs(List<String> localDirs) {
  List<String> filecacheDirs = new ArrayList<>(localDirs.size());

  for (String localDir : localDirs) {
    String filecacheDir = localDir + Path.SEPARATOR +
        ContainerLocalizer.FILECACHE;

    filecacheDirs.add(filecacheDir);
  }

  return filecacheDirs;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:ContainerLaunch.java

示例10: getSkipOutputPath

/**
 * Get the directory to which skipped records are written. By default it is 
 * the sub directory of the output _logs directory.
 * User can stop writing skipped records by setting the value null.
 * 
 * @param conf the configuration.
 * @return path skip output directory. Null is returned if this is not set 
 * and output directory is also not set.
 */
public static Path getSkipOutputPath(Configuration conf) {
  String name =  conf.get(OUT_PATH);
  if(name!=null) {
    if("none".equals(name)) {
      return null;
    }
    return new Path(name);
  }
  Path outPath = FileOutputFormat.getOutputPath(new JobConf(conf));
  return outPath==null ? null : new Path(outPath, 
      "_logs"+Path.SEPARATOR+"skip");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:SkipBadRecords.java

示例11: parsePath

public static String parsePath(Path p) {
  // p = file://xxxx/xxx/xxxx, trans to /xxxx/xxx/xxxx
  int depth = p.depth();
  String str = "";
  while (depth > 0) {
    str = Path.SEPARATOR + p.getName() + str;
    p = p.getParent();
    --depth;
  }
  return str;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:RemoteJobQueue.java

示例12: writeFile

private void writeFile(final DistributedFileSystem dfs,
    Path dir, String fileName) throws IOException {
  Path filePath = new Path(dir.toString() + Path.SEPARATOR + fileName);
  final FSDataOutputStream out = dfs.create(filePath);
  out.writeChars("teststring");
  out.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:TestFsck.java

示例13: createPath

private Path createPath(FileContext fc, Path root, int year, int month,
                        int day, String id) throws IOException {
  Path path = new Path(root, year + Path.SEPARATOR + month + Path.SEPARATOR +
          day + Path.SEPARATOR + id);
  fc.mkdir(path, FsPermission.getDirDefault(), true);
  return path;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:TestJobHistoryUtils.java

示例14: handleWildCard

private static Path handleWildCard(final String root) {
  if (root.contains(WILD_CARD)) {
    int idx = root.indexOf(WILD_CARD); // first wild card in the path
    idx = root.lastIndexOf(PATH_SEPARATOR, idx); // file separator right before the first wild card
    final String newRoot = root.substring(0, idx);
    return newRoot.isEmpty() ? new Path(Path.SEPARATOR) : new Path(newRoot);
  } else {
    return new Path(root);
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:10,代码来源:FileSelection.java

示例15: copyLocalFileToDfs

public static Path copyLocalFileToDfs(FileSystem fs, String appId,
    Path srcPath, String dstFileName) throws IOException {
  Path dstPath = new Path(fs.getHomeDirectory(),
      Constants.DEFAULT_APP_NAME + Path.SEPARATOR + appId + Path.SEPARATOR + dstFileName);
  LOG.info("Copying " + srcPath + " to " + dstPath);
  fs.copyFromLocalFile(srcPath, dstPath);
  return dstPath;
}
 
开发者ID:Intel-bigdata,项目名称:TensorFlowOnYARN,代码行数:8,代码来源:Utils.java


注:本文中的org.apache.hadoop.fs.Path.SEPARATOR属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。