當前位置: 首頁>>代碼示例>>Java>>正文


Java Path.depth方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.Path.depth方法的典型用法代碼示例。如果您正苦於以下問題:Java Path.depth方法的具體用法?Java Path.depth怎麽用?Java Path.depth使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.depth方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: PartitionedTablePathResolver

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
PartitionedTablePathResolver(IMetaStoreClient metastore, Table table)
    throws NoSuchObjectException, MetaException, TException {
  this.metastore = metastore;
  this.table = table;
  LOG.debug("Table '{}' is partitioned", Warehouse.getQualifiedName(table));
  tableBaseLocation = locationAsPath(table);
  List<Partition> onePartition = metastore.listPartitions(table.getDbName(), table.getTableName(), (short) 1);
  if (onePartition.isEmpty()) {
    LOG.warn("Table '{}' has no partitions, perhaps you can simply delete: {}.", Warehouse.getQualifiedName(table),
        tableBaseLocation);
    throw new ConfigurationException();
  }
  Path partitionLocation = locationAsPath(onePartition.get(0));
  int branches = partitionLocation.depth() - tableBaseLocation.depth();
  String globSuffix = StringUtils.repeat("*", "/", branches);
  globPath = new Path(tableBaseLocation, globSuffix);
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:18,代碼來源:PartitionedTablePathResolver.java

示例2: isMatchingTail

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
 * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true.  Does not consider
 * schema; i.e. if schemas different but path or subpath matches, the two will equate.
 * @param pathToSearch Path we will be trying to match.
 * @param pathTail
 * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
 */
public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
  if (pathToSearch.depth() != pathTail.depth()) return false;
  Path tailPath = pathTail;
  String tailName;
  Path toSearch = pathToSearch;
  String toSearchName;
  boolean result = false;
  do {
    tailName = tailPath.getName();
    if (tailName == null || tailName.length() <= 0) {
      result = true;
      break;
    }
    toSearchName = toSearch.getName();
    if (toSearchName == null || toSearchName.length() <= 0) break;
    // Move up a parent on each path for next go around.  Path doesn't let us go off the end.
    tailPath = tailPath.getParent();
    toSearch = toSearch.getParent();
  } while(tailName.equals(toSearchName));
  return result;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:FSUtils.java

示例3: checkMetastorePath

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
static void checkMetastorePath(Path path, int globDepth) {
  checkPathContainsEventId(path, "metastore");
  if (path.depth() != globDepth) {
    throw new IllegalStateException(
        "ABORTING: Metastore path structure looks wrong; depth != file system glob depth: '" + path + "'.");
  }
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:8,代碼來源:ConsistencyCheck.java

示例4: testFileAdd

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/** Test metrics associated with addition of a file */
@Test
public void testFileAdd() throws Exception {
  // Add files with 100 blocks
  final Path file = getTestPath("testFileAdd");
  createFile(file, 3200, (short)3);
  final long blockCount = 32;
  int blockCapacity = namesystem.getBlockCapacity();
  updateMetrics();
  assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));

  MetricsRecordBuilder rb = getMetrics(NN_METRICS);
  // File create operations is 1
  // Number of files created is depth of <code>file</code> path
  assertCounter("CreateFileOps", 1L, rb);
  assertCounter("FilesCreated", (long)file.depth(), rb);

  updateMetrics();
  long filesTotal = file.depth() + 1; // Add 1 for root
  rb = getMetrics(NS_METRICS);
  assertGauge("FilesTotal", filesTotal, rb);
  assertGauge("BlocksTotal", blockCount, rb);
  fs.delete(file, true);
  filesTotal--; // reduce the filecount for deleted file

  rb = waitForDnMetricValue(NS_METRICS, "FilesTotal", filesTotal);
  assertGauge("BlocksTotal", 0L, rb);
  assertGauge("PendingDeletionBlocks", 0L, rb);

  rb = getMetrics(NN_METRICS);
  // Delete file operations and number of files deleted must be 1
  assertCounter("DeleteFileOps", 1L, rb);
  assertCounter("FilesDeleted", 1L, rb);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:35,代碼來源:TestNameNodeMetrics.java

示例5: checkValidName

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private boolean checkValidName(String name) {
  Path tmp = new Path(name);
  if (tmp.depth() != 1) {
    return false;
  }
  if (name.endsWith(".har")) 
    return true;
  return false;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:10,代碼來源:HadoopArchives.java

示例6: largestDepth

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private Path largestDepth(List<Path> paths) {
  Path deepest = paths.get(0);
  for (Path p: paths) {
    if (p.depth() > deepest.depth()) {
      deepest = p;
    }
  }
  return deepest;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:10,代碼來源:HadoopArchives.java

示例7: parsePath

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
public static String parsePath(Path p) {
  // p = file://xxxx/xxx/xxxx, trans to /xxxx/xxx/xxxx
  int depth = p.depth();
  String str = "";
  while (depth > 0) {
    str = Path.SEPARATOR + p.getName() + str;
    p = p.getParent();
    --depth;
  }
  return str;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:12,代碼來源:RemoteJobQueue.java

示例8: checkPathLength

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Check path length does not exceed maximum.  Returns true if
 * length and depth are okay.  Returns false if length is too long 
 * or depth is too great.
 */
private boolean checkPathLength(String src) {
  Path srcPath = new Path(src);
  return (src.length() <= MAX_PATH_LENGTH &&
          srcPath.depth() <= MAX_PATH_DEPTH);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:11,代碼來源:NameNodeRpcServer.java

示例9: vacuumTable

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@VisibleForTesting
void vacuumTable(String databaseName, String tableName)
    throws MetaException, TException, NoSuchObjectException, URISyntaxException, IOException {
  Table table = metastore.getTable(databaseName, tableName);

  TablePathResolver pathResolver = TablePathResolver.Factory.newTablePathResolver(metastore, table);
  Path tableBaseLocation = pathResolver.getTableBaseLocation();
  Path globPath = pathResolver.getGlobPath();
  LOG.debug("Table base location: '{}'", tableBaseLocation);
  LOG.debug("Glob path: '{}'", globPath);
  int globDepth = globPath.depth();

  Set<Path> metastorePaths = pathResolver.getMetastorePaths(batchSize, expectedPathCount);
  ConsistencyCheck.checkMetastorePaths(metastorePaths, globDepth);
  Set<Path> unvisitedMetastorePaths = new HashSet<>(metastorePaths);

  FileSystem fs = tableBaseLocation.getFileSystem(conf);
  FileStatus[] listStatus = fs.globStatus(globPath);
  Set<Path> pathsToRemove = new HashSet<>(listStatus.length);
  int metaStorePathCount = 0;
  int housekeepingPathCount = 0;
  for (FileStatus fileStatus : listStatus) {
    Path fsPath = PathUtils.normalise(fileStatus.getPath());
    ConsistencyCheck.checkFsPath(fsPath);
    if (metastorePaths.contains(fsPath)) {
      LOG.info("KEEP path '{}', referenced in the metastore.", fsPath);
      unvisitedMetastorePaths.remove(fsPath);
      metaStorePathCount++;
    } else if (housekeepingPaths.contains(fsPath)) {
      LOG.info("KEEP path '{}', referenced in housekeeping.", fsPath);
      housekeepingPathCount++;
    } else {
      pathsToRemove.add(fsPath);
    }
  }
  for (Path unvisitedMetastorePath : unvisitedMetastorePaths) {
    LOG.warn("Metastore path '{}' references non-existent data!", unvisitedMetastorePath);
    ConsistencyCheck.checkUnvisitedPath(fs, unvisitedMetastorePath);
  }
  for (Path toRemove : pathsToRemove) {
    removePath(toRemove);
  }
  LOG.info("Table '{}' vacuum path summary; filesystem: {}, metastore: {}, housekeeping: {}, remove: {}.",
      Warehouse.getQualifiedName(table), listStatus.length, metaStorePathCount, housekeepingPathCount,
      pathsToRemove.size());
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:47,代碼來源:VacuumToolApplication.java


注:本文中的org.apache.hadoop.fs.Path.depth方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。