当前位置: 首页>>代码示例>>Java>>正文


Java Path.depth方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.Path.depth方法的典型用法代码示例。如果您正苦于以下问题:Java Path.depth方法的具体用法?Java Path.depth怎么用?Java Path.depth使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.depth方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: PartitionedTablePathResolver

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
PartitionedTablePathResolver(IMetaStoreClient metastore, Table table)
    throws NoSuchObjectException, MetaException, TException {
  this.metastore = metastore;
  this.table = table;
  LOG.debug("Table '{}' is partitioned", Warehouse.getQualifiedName(table));
  tableBaseLocation = locationAsPath(table);
  List<Partition> onePartition = metastore.listPartitions(table.getDbName(), table.getTableName(), (short) 1);
  if (onePartition.isEmpty()) {
    LOG.warn("Table '{}' has no partitions, perhaps you can simply delete: {}.", Warehouse.getQualifiedName(table),
        tableBaseLocation);
    throw new ConfigurationException();
  }
  Path partitionLocation = locationAsPath(onePartition.get(0));
  int branches = partitionLocation.depth() - tableBaseLocation.depth();
  String globSuffix = StringUtils.repeat("*", "/", branches);
  globPath = new Path(tableBaseLocation, globSuffix);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:18,代码来源:PartitionedTablePathResolver.java

示例2: isMatchingTail

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Compare path component of the Path URI; e.g. if hdfs://a/b/c and /a/b/c, it will compare the
 * '/a/b/c' part. If you passed in 'hdfs://a/b/c and b/c, it would return true.  Does not consider
 * schema; i.e. if schemas different but path or subpath matches, the two will equate.
 * @param pathToSearch Path we will be trying to match.
 * @param pathTail
 * @return True if <code>pathTail</code> is tail on the path of <code>pathToSearch</code>
 */
public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
  if (pathToSearch.depth() != pathTail.depth()) return false;
  Path tailPath = pathTail;
  String tailName;
  Path toSearch = pathToSearch;
  String toSearchName;
  boolean result = false;
  do {
    tailName = tailPath.getName();
    if (tailName == null || tailName.length() <= 0) {
      result = true;
      break;
    }
    toSearchName = toSearch.getName();
    if (toSearchName == null || toSearchName.length() <= 0) break;
    // Move up a parent on each path for next go around.  Path doesn't let us go off the end.
    tailPath = tailPath.getParent();
    toSearch = toSearch.getParent();
  } while(tailName.equals(toSearchName));
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:FSUtils.java

示例3: checkMetastorePath

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
static void checkMetastorePath(Path path, int globDepth) {
  checkPathContainsEventId(path, "metastore");
  if (path.depth() != globDepth) {
    throw new IllegalStateException(
        "ABORTING: Metastore path structure looks wrong; depth != file system glob depth: '" + path + "'.");
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:8,代码来源:ConsistencyCheck.java

示例4: testFileAdd

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/** Test metrics associated with addition of a file */
@Test
public void testFileAdd() throws Exception {
  // Add files with 100 blocks
  final Path file = getTestPath("testFileAdd");
  createFile(file, 3200, (short)3);
  final long blockCount = 32;
  int blockCapacity = namesystem.getBlockCapacity();
  updateMetrics();
  assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));

  MetricsRecordBuilder rb = getMetrics(NN_METRICS);
  // File create operations is 1
  // Number of files created is depth of <code>file</code> path
  assertCounter("CreateFileOps", 1L, rb);
  assertCounter("FilesCreated", (long)file.depth(), rb);

  updateMetrics();
  long filesTotal = file.depth() + 1; // Add 1 for root
  rb = getMetrics(NS_METRICS);
  assertGauge("FilesTotal", filesTotal, rb);
  assertGauge("BlocksTotal", blockCount, rb);
  fs.delete(file, true);
  filesTotal--; // reduce the filecount for deleted file

  rb = waitForDnMetricValue(NS_METRICS, "FilesTotal", filesTotal);
  assertGauge("BlocksTotal", 0L, rb);
  assertGauge("PendingDeletionBlocks", 0L, rb);

  rb = getMetrics(NN_METRICS);
  // Delete file operations and number of files deleted must be 1
  assertCounter("DeleteFileOps", 1L, rb);
  assertCounter("FilesDeleted", 1L, rb);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestNameNodeMetrics.java

示例5: checkValidName

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private boolean checkValidName(String name) {
  Path tmp = new Path(name);
  if (tmp.depth() != 1) {
    return false;
  }
  if (name.endsWith(".har")) 
    return true;
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:HadoopArchives.java

示例6: largestDepth

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private Path largestDepth(List<Path> paths) {
  Path deepest = paths.get(0);
  for (Path p: paths) {
    if (p.depth() > deepest.depth()) {
      deepest = p;
    }
  }
  return deepest;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:HadoopArchives.java

示例7: parsePath

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
public static String parsePath(Path p) {
  // p = file://xxxx/xxx/xxxx, trans to /xxxx/xxx/xxxx
  int depth = p.depth();
  String str = "";
  while (depth > 0) {
    str = Path.SEPARATOR + p.getName() + str;
    p = p.getParent();
    --depth;
  }
  return str;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:RemoteJobQueue.java

示例8: checkPathLength

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Check path length does not exceed maximum.  Returns true if
 * length and depth are okay.  Returns false if length is too long 
 * or depth is too great.
 */
private boolean checkPathLength(String src) {
  Path srcPath = new Path(src);
  return (src.length() <= MAX_PATH_LENGTH &&
          srcPath.depth() <= MAX_PATH_DEPTH);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:NameNodeRpcServer.java

示例9: vacuumTable

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@VisibleForTesting
void vacuumTable(String databaseName, String tableName)
    throws MetaException, TException, NoSuchObjectException, URISyntaxException, IOException {
  Table table = metastore.getTable(databaseName, tableName);

  TablePathResolver pathResolver = TablePathResolver.Factory.newTablePathResolver(metastore, table);
  Path tableBaseLocation = pathResolver.getTableBaseLocation();
  Path globPath = pathResolver.getGlobPath();
  LOG.debug("Table base location: '{}'", tableBaseLocation);
  LOG.debug("Glob path: '{}'", globPath);
  int globDepth = globPath.depth();

  Set<Path> metastorePaths = pathResolver.getMetastorePaths(batchSize, expectedPathCount);
  ConsistencyCheck.checkMetastorePaths(metastorePaths, globDepth);
  Set<Path> unvisitedMetastorePaths = new HashSet<>(metastorePaths);

  FileSystem fs = tableBaseLocation.getFileSystem(conf);
  FileStatus[] listStatus = fs.globStatus(globPath);
  Set<Path> pathsToRemove = new HashSet<>(listStatus.length);
  int metaStorePathCount = 0;
  int housekeepingPathCount = 0;
  for (FileStatus fileStatus : listStatus) {
    Path fsPath = PathUtils.normalise(fileStatus.getPath());
    ConsistencyCheck.checkFsPath(fsPath);
    if (metastorePaths.contains(fsPath)) {
      LOG.info("KEEP path '{}', referenced in the metastore.", fsPath);
      unvisitedMetastorePaths.remove(fsPath);
      metaStorePathCount++;
    } else if (housekeepingPaths.contains(fsPath)) {
      LOG.info("KEEP path '{}', referenced in housekeeping.", fsPath);
      housekeepingPathCount++;
    } else {
      pathsToRemove.add(fsPath);
    }
  }
  for (Path unvisitedMetastorePath : unvisitedMetastorePaths) {
    LOG.warn("Metastore path '{}' references non-existent data!", unvisitedMetastorePath);
    ConsistencyCheck.checkUnvisitedPath(fs, unvisitedMetastorePath);
  }
  for (Path toRemove : pathsToRemove) {
    removePath(toRemove);
  }
  LOG.info("Table '{}' vacuum path summary; filesystem: {}, metastore: {}, housekeeping: {}, remove: {}.",
      Warehouse.getQualifiedName(table), listStatus.length, metaStorePathCount, housekeepingPathCount,
      pathsToRemove.size());
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:47,代码来源:VacuumToolApplication.java


注:本文中的org.apache.hadoop.fs.Path.depth方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。