当前位置: 首页>>代码示例>>Java>>正文


Java Path.equals方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.Path.equals方法的典型用法代码示例。如果您正苦于以下问题:Java Path.equals方法的具体用法?Java Path.equals怎么用?Java Path.equals使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.equals方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: incrementFileCountForLocalCacheDirectory

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
public void incrementFileCountForLocalCacheDirectory(Path cacheDir) {
  if (useLocalCacheDirectoryManager) {
    Path cacheRoot = LocalCacheDirectoryManager.getCacheDirectoryRoot(
        cacheDir);
    if (cacheRoot != null) {
      LocalCacheDirectoryManager dir = directoryManagers.get(cacheRoot);
      if (dir == null) {
        dir = new LocalCacheDirectoryManager(conf);
        LocalCacheDirectoryManager otherDir =
            directoryManagers.putIfAbsent(cacheRoot, dir);
        if (otherDir != null) {
          dir = otherDir;
        }
      }
      if (cacheDir.equals(cacheRoot)) {
        dir.incrementFileCountForPath("");
      } else {
        String dirStr = cacheDir.toUri().getRawPath();
        String rootStr = cacheRoot.toUri().getRawPath();
        dir.incrementFileCountForPath(
            dirStr.substring(rootStr.length() + 1));
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:LocalResourcesTrackerImpl.java

示例2: correspondsTo

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Override public boolean correspondsTo(BasicJob j) throws IOException {
  List<Path> paths = ((ArchiveJob) j).compactedHFiles;
  if (compactedHFiles == null && paths == null) return true;
  else if (compactedHFiles == null || paths == null) return false;
  else if (compactedHFiles.size() != paths.size()) {
    return false;
  } else {
    for (Path p1 : paths) {
      boolean found = false;
      for (Path p2 : compactedHFiles) {
        if (p1.equals(p2)) {
          found = true;
          break;
        }
      }
      if (!found) return false;
    }
  }
  return true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:ArchiveJobQueue.java

示例3: rollLogDirIfNeeded

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Check the current directory against the time stamp.  If they're not
 * the same, create a new directory and a new log file in that directory.
 *
 * @throws MetricsException thrown if an error occurs while creating the
 * new directory or new log file
 */
private void rollLogDirIfNeeded() throws MetricsException {
  Date now = new Date();
  String currentDir = DATE_FORMAT.format(now);
  Path path = new Path(basePath, currentDir);

  // We check whether currentOutStream is null instead of currentDirPath,
  // because if currentDirPath is null, then currentOutStream is null, but
  // currentOutStream can be null for other reasons.
  if ((currentOutStream == null) || !path.equals(currentDirPath)) {
    // If we're not yet connected to HDFS, create the connection
    if (!initialized) {
      initialized = initFs();
    }

    if (initialized) {
      // Close the stream. This step could have been handled already by the
      // flusher thread, but if it has, the PrintStream will just swallow the
      // exception, which is fine.
      if (currentOutStream != null) {
        currentOutStream.close();
      }

      currentDirPath = path;

      try {
        rollLogDir();
      } catch (IOException ex) {
        throwMetricsException("Failed to create new log file", ex);
      }

      scheduleFlush(now);
    }
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:42,代码来源:RollingFileSystemSink.java

示例4: getDataStream

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Get access to the LOB data itself.
 * This method returns a lazy reader of the LOB data, accessing the
 * filesystem for external LOB storage as necessary.
 * @param conf the Configuration used to access the filesystem
 * @param basePath the base directory where the table records are
 * stored.
 * @return an object that lazily streams the record to the client.
 * @throws IOException if it could not read the LOB from external storage.
 */
public ACCESSORTYPE getDataStream(Configuration conf, Path basePath)
    throws IOException {
  if (isExternal()) {
    // Read from external storage.
    Path pathToRead = LobReaderCache.qualify(
        new Path(basePath, fileName), conf);
    LOG.debug("Retreving data stream from external path: " + pathToRead);
    if (lobReader != null) {
      // We already have a reader open to a LobFile. Is it the correct file?
      if (!pathToRead.equals(lobReader.getPath())) {
        // No. Close this.lobReader and get the correct one.
        LOG.debug("Releasing previous external reader for "
            + lobReader.getPath());
        LobReaderCache.getCache().recycle(lobReader);
        lobReader = LobReaderCache.getCache().get(pathToRead, conf);
      }
    } else {
      lobReader = LobReaderCache.getCache().get(pathToRead, conf);
    }

    // We now have a LobFile.Reader associated with the correct file. Get to
    // the correct offset and return an InputStream/Reader to the user.
    if (lobReader.tell() != offset) {
      LOG.debug("Seeking to record start offset " + offset);
      lobReader.seek(offset);
    }

    if (!lobReader.next()) {
      throw new IOException("Could not locate record at " + pathToRead
          + ":" + offset);
    }

    return getExternalSource(lobReader);
  } else {
    // This data is already materialized in memory; wrap it and return.
    return getInternalSource(realData);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:49,代码来源:LobRef.java

示例5: moveAround

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Move hot files to warm and cold, warm files to hot and cold,
 * and cold files to hot and warm.
 */
void moveAround(DistributedFileSystem dfs) throws Exception {
  for(Path srcDir : map.keySet()) {
    int i = 0;
    for(Path dstDir : map.keySet()) {
      if (!srcDir.equals(dstDir)) {
        final Path src = new Path(srcDir, "file" + i++);
        final Path dst = new Path(dstDir, srcDir.getName() + "2" + dstDir.getName());
        LOG.info("rename " + src + " to " + dst);
        dfs.rename(src, dst);
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestStorageMover.java

示例6: getSnapshotFile

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Generate the path for a snapshot file.
 * 
 * @param snapshotRoot of format
 *          {@literal <snapshottble_dir>/.snapshot/<snapshot_name>}
 * @param file path to a file
 * @return The path of the snapshot of the file assuming the file has a
 *         snapshot under the snapshot root of format
 *         {@literal <snapshottble_dir>/.snapshot/<snapshot_name>/<path_to_file_inside_snapshot>}
 *         . Null if the file is not under the directory associated with the
 *         snapshot root.
 */
static Path getSnapshotFile(Path snapshotRoot, Path file) {
  Path rootParent = snapshotRoot.getParent();
  if (rootParent != null && rootParent.getName().equals(".snapshot")) {
    Path snapshotDir = rootParent.getParent();
    if (file.toString().contains(snapshotDir.toString())
        && !file.equals(snapshotDir)) {
      String fileName = file.toString().substring(
          snapshotDir.toString().length() + 1);
      Path snapshotFile = new Path(snapshotRoot, fileName);
      return snapshotFile;
    }
  }
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:SnapshotTestHelper.java

示例7: getTmpFile

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private Path getTmpFile(Path target, Mapper.Context context) {
  Path targetWorkPath = new Path(context.getConfiguration().
      get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));

  Path root = target.equals(targetWorkPath)? targetWorkPath.getParent() : targetWorkPath;
  LOG.info("Creating temp file: " +
      new Path(root, ".distcp.tmp." + context.getTaskAttemptID().toString()));
  return new Path(root, ".distcp.tmp." + context.getTaskAttemptID().toString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:RetriableFileCopyCommand.java

示例8: listSubPaths

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Override
public Set<Path> listSubPaths(Path path) throws IOException {
  Path normalizedPath = normalize(path);
  // This is inefficient but more than adequate for testing purposes.
  Set<Path> subPaths = new LinkedHashSet<Path>();
  for (Path p : inodes.tailMap(normalizedPath).keySet()) {
    if (normalizedPath.equals(p.getParent())) {
      subPaths.add(p);
    }
  }
  return subPaths;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:InMemoryFileSystemStore.java

示例9: testBlockSize

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void testBlockSize() throws Exception {
  FileSystem fs = getFileSystem();
  long defaultBlockSize = fs.getDefaultBlockSize();
  assertEquals("incorrect blocksize",
      S3AFileSystem.DEFAULT_BLOCKSIZE, defaultBlockSize);
  long newBlockSize = defaultBlockSize * 2;
  fs.getConf().setLong(Constants.FS_S3A_BLOCK_SIZE, newBlockSize);

  Path dir = path("testBlockSize");
  Path file = new Path(dir, "file");
  createFile(fs, file, true, dataset(1024, 'a', 'z' - 'a'));
  FileStatus fileStatus = fs.getFileStatus(file);
  assertEquals("Double default block size in stat(): " + fileStatus,
      newBlockSize,
      fileStatus.getBlockSize());

  // check the listing  & assert that the block size is picked up by
  // this route too.
  boolean found = false;
  FileStatus[] listing = fs.listStatus(dir);
  for (FileStatus stat : listing) {
    LOG.info("entry: {}", stat);
    if (file.equals(stat.getPath())) {
      found = true;
      assertEquals("Double default block size in ls(): " + stat,
          newBlockSize,
          stat.getBlockSize());
    }
  }
  assertTrue("Did not find " + fileStatsToString(listing, ", "), found);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestS3ABlocksize.java

示例10: testHFileLink

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Test
public void testHFileLink() throws Exception {
  // pass a link, and verify that correct paths are returned.
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  Path aFileLink = new Path(rootDir, "table/2086db948c48/cf/table=21212abcdc33-0906db948c48");
  Path preNamespaceTablePath = new Path(rootDir, "table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceArchivePath =
    new Path(rootDir, ".archive/table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceTempPath = new Path(rootDir, ".tmp/table/21212abcdc33/cf/0906db948c48");
  boolean preNSTablePathExists = false;
  boolean preNSArchivePathExists = false;
  boolean preNSTempPathExists = false;
  assertTrue(HFileLink.isHFileLink(aFileLink));
  HFileLink hFileLink = 
    HFileLink.buildFromHFileLinkPattern(TEST_UTIL.getConfiguration(), aFileLink);
  assertTrue(hFileLink.getArchivePath().toString().startsWith(rootDir.toString()));

  HFileV1Detector t = new HFileV1Detector();
  t.setConf(TEST_UTIL.getConfiguration());
  FileLink fileLink = t.getFileLinkWithPreNSPath(aFileLink);
  //assert it has 6 paths (2 NS, 2 Pre NS, and 2 .tmp)  to look.
  assertTrue(fileLink.getLocations().length == 6);
  for (Path p : fileLink.getLocations()) {
    if (p.equals(preNamespaceArchivePath)) preNSArchivePathExists = true;
    if (p.equals(preNamespaceTablePath)) preNSTablePathExists = true;
    if (p.equals(preNamespaceTempPath)) preNSTempPathExists = true;
  }
  assertTrue(preNSArchivePathExists & preNSTablePathExists & preNSTempPathExists);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestUpgradeTo96.java

示例11: preserveFileAttributesForDirectories

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private void preserveFileAttributesForDirectories(Configuration conf) throws IOException {
  String attrSymbols = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  final boolean syncOrOverwrite = syncFolder || overwrite;

  LOG.info("About to preserve attributes: " + attrSymbols);

  EnumSet<FileAttribute> attributes = DistCpUtils.unpackAttributes(attrSymbols);
  final boolean preserveRawXattrs =
      conf.getBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);

  Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
  FileSystem clusterFS = sourceListing.getFileSystem(conf);
  SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
                                    SequenceFile.Reader.file(sourceListing));
  long totalLen = clusterFS.getFileStatus(sourceListing).getLen();

  Path targetRoot = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));

  long preservedEntries = 0;
  try {
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();

    // Iterate over every source path that was copied.
    while (sourceReader.next(srcRelPath, srcFileStatus)) {
      // File-attributes for files are set at the time of copy,
      // in the map-task.
      if (! srcFileStatus.isDirectory()) continue;

      Path targetFile = new Path(targetRoot.toString() + "/" + srcRelPath);
      //
      // Skip the root folder when syncOrOverwrite is true.
      //
      if (targetRoot.equals(targetFile) && syncOrOverwrite) continue;

      FileSystem targetFS = targetFile.getFileSystem(conf);
      DistCpUtils.preserve(targetFS, targetFile, srcFileStatus, attributes,
          preserveRawXattrs);

      taskAttemptContext.progress();
      taskAttemptContext.setStatus("Preserving status on directory entries. [" +
          sourceReader.getPosition() * 100 / totalLen + "%]");
    }
  } finally {
    IOUtils.closeStream(sourceReader);
  }
  LOG.info("Preserved status on " + preservedEntries + " dir entries on target");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:CopyCommitter.java


注:本文中的org.apache.hadoop.fs.Path.equals方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。