當前位置: 首頁>>代碼示例>>Java>>正文


Java Path.equals方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.Path.equals方法的典型用法代碼示例。如果您正苦於以下問題:Java Path.equals方法的具體用法?Java Path.equals怎麽用?Java Path.equals使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.equals方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: incrementFileCountForLocalCacheDirectory

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
public void incrementFileCountForLocalCacheDirectory(Path cacheDir) {
  if (useLocalCacheDirectoryManager) {
    Path cacheRoot = LocalCacheDirectoryManager.getCacheDirectoryRoot(
        cacheDir);
    if (cacheRoot != null) {
      LocalCacheDirectoryManager dir = directoryManagers.get(cacheRoot);
      if (dir == null) {
        dir = new LocalCacheDirectoryManager(conf);
        LocalCacheDirectoryManager otherDir =
            directoryManagers.putIfAbsent(cacheRoot, dir);
        if (otherDir != null) {
          dir = otherDir;
        }
      }
      if (cacheDir.equals(cacheRoot)) {
        dir.incrementFileCountForPath("");
      } else {
        String dirStr = cacheDir.toUri().getRawPath();
        String rootStr = cacheRoot.toUri().getRawPath();
        dir.incrementFileCountForPath(
            dirStr.substring(rootStr.length() + 1));
      }
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:26,代碼來源:LocalResourcesTrackerImpl.java

示例2: correspondsTo

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Override public boolean correspondsTo(BasicJob j) throws IOException {
  List<Path> paths = ((ArchiveJob) j).compactedHFiles;
  if (compactedHFiles == null && paths == null) return true;
  else if (compactedHFiles == null || paths == null) return false;
  else if (compactedHFiles.size() != paths.size()) {
    return false;
  } else {
    for (Path p1 : paths) {
      boolean found = false;
      for (Path p2 : compactedHFiles) {
        if (p1.equals(p2)) {
          found = true;
          break;
        }
      }
      if (!found) return false;
    }
  }
  return true;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:ArchiveJobQueue.java

示例3: rollLogDirIfNeeded

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Check the current directory against the time stamp.  If they're not
 * the same, create a new directory and a new log file in that directory.
 *
 * @throws MetricsException thrown if an error occurs while creating the
 * new directory or new log file
 */
private void rollLogDirIfNeeded() throws MetricsException {
  Date now = new Date();
  String currentDir = DATE_FORMAT.format(now);
  Path path = new Path(basePath, currentDir);

  // We check whether currentOutStream is null instead of currentDirPath,
  // because if currentDirPath is null, then currentOutStream is null, but
  // currentOutStream can be null for other reasons.
  if ((currentOutStream == null) || !path.equals(currentDirPath)) {
    // If we're not yet connected to HDFS, create the connection
    if (!initialized) {
      initialized = initFs();
    }

    if (initialized) {
      // Close the stream. This step could have been handled already by the
      // flusher thread, but if it has, the PrintStream will just swallow the
      // exception, which is fine.
      if (currentOutStream != null) {
        currentOutStream.close();
      }

      currentDirPath = path;

      try {
        rollLogDir();
      } catch (IOException ex) {
        throwMetricsException("Failed to create new log file", ex);
      }

      scheduleFlush(now);
    }
  }
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:42,代碼來源:RollingFileSystemSink.java

示例4: getDataStream

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Get access to the LOB data itself.
 * This method returns a lazy reader of the LOB data, accessing the
 * filesystem for external LOB storage as necessary.
 * @param conf the Configuration used to access the filesystem
 * @param basePath the base directory where the table records are
 * stored.
 * @return an object that lazily streams the record to the client.
 * @throws IOException if it could not read the LOB from external storage.
 */
public ACCESSORTYPE getDataStream(Configuration conf, Path basePath)
    throws IOException {
  if (isExternal()) {
    // Read from external storage.
    Path pathToRead = LobReaderCache.qualify(
        new Path(basePath, fileName), conf);
    LOG.debug("Retreving data stream from external path: " + pathToRead);
    if (lobReader != null) {
      // We already have a reader open to a LobFile. Is it the correct file?
      if (!pathToRead.equals(lobReader.getPath())) {
        // No. Close this.lobReader and get the correct one.
        LOG.debug("Releasing previous external reader for "
            + lobReader.getPath());
        LobReaderCache.getCache().recycle(lobReader);
        lobReader = LobReaderCache.getCache().get(pathToRead, conf);
      }
    } else {
      lobReader = LobReaderCache.getCache().get(pathToRead, conf);
    }

    // We now have a LobFile.Reader associated with the correct file. Get to
    // the correct offset and return an InputStream/Reader to the user.
    if (lobReader.tell() != offset) {
      LOG.debug("Seeking to record start offset " + offset);
      lobReader.seek(offset);
    }

    if (!lobReader.next()) {
      throw new IOException("Could not locate record at " + pathToRead
          + ":" + offset);
    }

    return getExternalSource(lobReader);
  } else {
    // This data is already materialized in memory; wrap it and return.
    return getInternalSource(realData);
  }
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:49,代碼來源:LobRef.java

示例5: moveAround

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Move hot files to warm and cold, warm files to hot and cold,
 * and cold files to hot and warm.
 */
void moveAround(DistributedFileSystem dfs) throws Exception {
  for(Path srcDir : map.keySet()) {
    int i = 0;
    for(Path dstDir : map.keySet()) {
      if (!srcDir.equals(dstDir)) {
        final Path src = new Path(srcDir, "file" + i++);
        final Path dst = new Path(dstDir, srcDir.getName() + "2" + dstDir.getName());
        LOG.info("rename " + src + " to " + dst);
        dfs.rename(src, dst);
      }
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:TestStorageMover.java

示例6: getSnapshotFile

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Generate the path for a snapshot file.
 * 
 * @param snapshotRoot of format
 *          {@literal <snapshottble_dir>/.snapshot/<snapshot_name>}
 * @param file path to a file
 * @return The path of the snapshot of the file assuming the file has a
 *         snapshot under the snapshot root of format
 *         {@literal <snapshottble_dir>/.snapshot/<snapshot_name>/<path_to_file_inside_snapshot>}
 *         . Null if the file is not under the directory associated with the
 *         snapshot root.
 */
static Path getSnapshotFile(Path snapshotRoot, Path file) {
  Path rootParent = snapshotRoot.getParent();
  if (rootParent != null && rootParent.getName().equals(".snapshot")) {
    Path snapshotDir = rootParent.getParent();
    if (file.toString().contains(snapshotDir.toString())
        && !file.equals(snapshotDir)) {
      String fileName = file.toString().substring(
          snapshotDir.toString().length() + 1);
      Path snapshotFile = new Path(snapshotRoot, fileName);
      return snapshotFile;
    }
  }
  return null;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:SnapshotTestHelper.java

示例7: getTmpFile

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private Path getTmpFile(Path target, Mapper.Context context) {
  Path targetWorkPath = new Path(context.getConfiguration().
      get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));

  Path root = target.equals(targetWorkPath)? targetWorkPath.getParent() : targetWorkPath;
  LOG.info("Creating temp file: " +
      new Path(root, ".distcp.tmp." + context.getTaskAttemptID().toString()));
  return new Path(root, ".distcp.tmp." + context.getTaskAttemptID().toString());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:10,代碼來源:RetriableFileCopyCommand.java

示例8: listSubPaths

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Override
public Set<Path> listSubPaths(Path path) throws IOException {
  Path normalizedPath = normalize(path);
  // This is inefficient but more than adequate for testing purposes.
  Set<Path> subPaths = new LinkedHashSet<Path>();
  for (Path p : inodes.tailMap(normalizedPath).keySet()) {
    if (normalizedPath.equals(p.getParent())) {
      subPaths.add(p);
    }
  }
  return subPaths;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:InMemoryFileSystemStore.java

示例9: testBlockSize

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Test
@SuppressWarnings("deprecation")
public void testBlockSize() throws Exception {
  FileSystem fs = getFileSystem();
  long defaultBlockSize = fs.getDefaultBlockSize();
  assertEquals("incorrect blocksize",
      S3AFileSystem.DEFAULT_BLOCKSIZE, defaultBlockSize);
  long newBlockSize = defaultBlockSize * 2;
  fs.getConf().setLong(Constants.FS_S3A_BLOCK_SIZE, newBlockSize);

  Path dir = path("testBlockSize");
  Path file = new Path(dir, "file");
  createFile(fs, file, true, dataset(1024, 'a', 'z' - 'a'));
  FileStatus fileStatus = fs.getFileStatus(file);
  assertEquals("Double default block size in stat(): " + fileStatus,
      newBlockSize,
      fileStatus.getBlockSize());

  // check the listing  & assert that the block size is picked up by
  // this route too.
  boolean found = false;
  FileStatus[] listing = fs.listStatus(dir);
  for (FileStatus stat : listing) {
    LOG.info("entry: {}", stat);
    if (file.equals(stat.getPath())) {
      found = true;
      assertEquals("Double default block size in ls(): " + stat,
          newBlockSize,
          stat.getBlockSize());
    }
  }
  assertTrue("Did not find " + fileStatsToString(listing, ", "), found);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:34,代碼來源:TestS3ABlocksize.java

示例10: testHFileLink

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Test
public void testHFileLink() throws Exception {
  // pass a link, and verify that correct paths are returned.
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  Path aFileLink = new Path(rootDir, "table/2086db948c48/cf/table=21212abcdc33-0906db948c48");
  Path preNamespaceTablePath = new Path(rootDir, "table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceArchivePath =
    new Path(rootDir, ".archive/table/21212abcdc33/cf/0906db948c48");
  Path preNamespaceTempPath = new Path(rootDir, ".tmp/table/21212abcdc33/cf/0906db948c48");
  boolean preNSTablePathExists = false;
  boolean preNSArchivePathExists = false;
  boolean preNSTempPathExists = false;
  assertTrue(HFileLink.isHFileLink(aFileLink));
  HFileLink hFileLink = 
    HFileLink.buildFromHFileLinkPattern(TEST_UTIL.getConfiguration(), aFileLink);
  assertTrue(hFileLink.getArchivePath().toString().startsWith(rootDir.toString()));

  HFileV1Detector t = new HFileV1Detector();
  t.setConf(TEST_UTIL.getConfiguration());
  FileLink fileLink = t.getFileLinkWithPreNSPath(aFileLink);
  //assert it has 6 paths (2 NS, 2 Pre NS, and 2 .tmp)  to look.
  assertTrue(fileLink.getLocations().length == 6);
  for (Path p : fileLink.getLocations()) {
    if (p.equals(preNamespaceArchivePath)) preNSArchivePathExists = true;
    if (p.equals(preNamespaceTablePath)) preNSTablePathExists = true;
    if (p.equals(preNamespaceTempPath)) preNSTempPathExists = true;
  }
  assertTrue(preNSArchivePathExists & preNSTablePathExists & preNSTempPathExists);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:TestUpgradeTo96.java

示例11: preserveFileAttributesForDirectories

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private void preserveFileAttributesForDirectories(Configuration conf) throws IOException {
  String attrSymbols = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  final boolean syncOrOverwrite = syncFolder || overwrite;

  LOG.info("About to preserve attributes: " + attrSymbols);

  EnumSet<FileAttribute> attributes = DistCpUtils.unpackAttributes(attrSymbols);
  final boolean preserveRawXattrs =
      conf.getBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);

  Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
  FileSystem clusterFS = sourceListing.getFileSystem(conf);
  SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
                                    SequenceFile.Reader.file(sourceListing));
  long totalLen = clusterFS.getFileStatus(sourceListing).getLen();

  Path targetRoot = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));

  long preservedEntries = 0;
  try {
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();

    // Iterate over every source path that was copied.
    while (sourceReader.next(srcRelPath, srcFileStatus)) {
      // File-attributes for files are set at the time of copy,
      // in the map-task.
      if (! srcFileStatus.isDirectory()) continue;

      Path targetFile = new Path(targetRoot.toString() + "/" + srcRelPath);
      //
      // Skip the root folder when syncOrOverwrite is true.
      //
      if (targetRoot.equals(targetFile) && syncOrOverwrite) continue;

      FileSystem targetFS = targetFile.getFileSystem(conf);
      DistCpUtils.preserve(targetFS, targetFile, srcFileStatus, attributes,
          preserveRawXattrs);

      taskAttemptContext.progress();
      taskAttemptContext.setStatus("Preserving status on directory entries. [" +
          sourceReader.getPosition() * 100 / totalLen + "%]");
    }
  } finally {
    IOUtils.closeStream(sourceReader);
  }
  LOG.info("Preserved status on " + preservedEntries + " dir entries on target");
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:49,代碼來源:CopyCommitter.java


注:本文中的org.apache.hadoop.fs.Path.equals方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。