当前位置: 首页>>代码示例>>Java>>正文


Java Path.getName方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.Path.getName方法的典型用法代码示例。如果您正苦于以下问题:Java Path.getName方法的具体用法?Java Path.getName怎么用?Java Path.getName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.getName方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: removeDirectoryFromSerialNumberIndex

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private void removeDirectoryFromSerialNumberIndex(Path serialDirPath) {
  String serialPart = serialDirPath.getName();
  String timeStampPart = JobHistoryUtils
      .getTimestampPartFromPath(serialDirPath.toString());
  if (timeStampPart == null) {
    LOG.warn("Could not find timestamp portion from path: "
        + serialDirPath.toString() + ". Continuing with next");
    return;
  }
  if (serialPart == null) {
    LOG.warn("Could not find serial portion from path: "
        + serialDirPath.toString() + ". Continuing with next");
    return;
  }
  serialNumberIndex.remove(serialPart, timeStampPart);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:HistoryFileManager.java

示例2: mkdirs

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Convenience method, so that we don't open a new connection when using this
 * method from within another method. Otherwise every API invocation incurs
 * the overhead of opening/closing a TCP connection.
 */
private boolean mkdirs(FTPClient client, Path file, FsPermission permission)
    throws IOException {
  boolean created = true;
  Path workDir = new Path(client.printWorkingDirectory());
  Path absolute = makeAbsolute(workDir, file);
  String pathName = absolute.getName();
  if (!exists(client, absolute)) {
    Path parent = absolute.getParent();
    created = (parent == null || mkdirs(client, parent, FsPermission
        .getDirDefault()));
    if (created) {
      String parentDir = parent.toUri().getPath();
      client.changeWorkingDirectory(parentDir);
      created = created && client.makeDirectory(pathName);
    }
  } else if (isFile(client, absolute)) {
    throw new ParentNotDirectoryException(String.format(
        "Can't make directory for path %s since it is a file.", absolute));
  }
  return created;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:FTPFileSystem.java

示例3: testDistributedCache

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Test (timeout = 600000)
public void testDistributedCache() throws Exception {
  // Test with a local (file:///) Job Jar
  Path localJobJarPath = makeJobJarWithLib(TEST_ROOT_DIR.toUri().toString());
  _testDistributedCache(localJobJarPath.toUri().toString());
  
  // Test with a remote (hdfs://) Job Jar
  Path remoteJobJarPath = new Path(remoteFs.getUri().toString() + "/",
          localJobJarPath.getName());
  remoteFs.moveFromLocalFile(localJobJarPath, remoteJobJarPath);
  File localJobJarFile = new File(localJobJarPath.toUri().toString());
  if (localJobJarFile.exists()) {     // just to make sure
      localJobJarFile.delete();
  }
  _testDistributedCache(remoteJobJarPath.toUri().toString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestMRJobs.java

示例4: PageReader

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
PageReader(ColumnReader<?> parentStatus, FileSystem fs, Path path, ColumnChunkMetaData columnChunkMetaData)
  throws ExecutionSetupException{
  this.parentColumnReader = parentStatus;
  allocatedDictionaryBuffers = new ArrayList<ByteBuf>();
  codecFactory = parentColumnReader.parentReader.getCodecFactory();

  long start = columnChunkMetaData.getFirstDataPageOffset();
  try {
    FSDataInputStream f = fs.open(path);
    this.dataReader = new ColumnDataReader(f, start, columnChunkMetaData.getTotalSize());
    loadDictionaryIfExists(parentStatus, columnChunkMetaData, f);

  } catch (IOException e) {
    throw new ExecutionSetupException("Error opening or reading metadata for parquet file at location: "
        + path.getName(), e);
  }

}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:19,代码来源:PageReader.java

示例5: getServerNameFromWALDirectoryName

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * This function returns region server name from a log file name which is in one of the following
 * formats:
 * <ul>
 *   <li>hdfs://&lt;name node&gt;/hbase/.logs/&lt;server name&gt;-splitting/...</li>
 *   <li>hdfs://&lt;name node&gt;/hbase/.logs/&lt;server name&gt;/...</li>
 * </ul>
 * @param logFile
 * @return null if the passed in logFile isn't a valid WAL file path
 */
public static ServerName getServerNameFromWALDirectoryName(Path logFile) {
  String logDirName = logFile.getParent().getName();
  // We were passed the directory and not a file in it.
  if (logDirName.equals(HConstants.HREGION_LOGDIR_NAME)) {
    logDirName = logFile.getName();
  }
  ServerName serverName = null;
  if (logDirName.endsWith(SPLITTING_EXT)) {
    logDirName = logDirName.substring(0, logDirName.length() - SPLITTING_EXT.length());
  }
  try {
    serverName = ServerName.parseServerName(logDirName);
  } catch (IllegalArgumentException ex) {
    serverName = null;
    LOG.warn("Cannot parse a server name from path=" + logFile + "; " + ex.getMessage());
  }
  if (serverName != null && serverName.getStartcode() < 0) {
    LOG.warn("Invalid log file path=" + logFile);
    serverName = null;
  }
  return serverName;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:DefaultWALProvider.java

示例6: getRegionArchiveDir

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Get the archive directory for a given region under the specified table
 * @param tableName the table name. Cannot be null.
 * @param regiondir the path to the region directory. Cannot be null.
 * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
 *         should not be archived
 */
public static Path getRegionArchiveDir(Path rootDir,
                                       TableName tableName,
                                       Path regiondir) {
  // get the archive directory for a table
  Path archiveDir = getTableArchivePath(rootDir, tableName);

  // then add on the region path under the archive
  String encodedRegionName = regiondir.getName();
  return HRegion.getRegionDir(archiveDir, encodedRegionName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:HFileArchiveUtil.java

示例7: migrateTable

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Migrates table info files.
 * Moves the latest table info file (is present) from the table dir to the table info subdir.
 * Removes any older table info files from the table dir and any existing table info subdir.
 */
private static void migrateTable(FileSystem fs, Path tableDir) throws IOException {
  FileStatus oldTableStatus = FSTableDescriptors.getCurrentTableInfoStatus(fs,  tableDir, true);
  if (oldTableStatus == null) {
    LOG.debug("No table info file to migrate for " + tableDir);
    return;
  }
  
  Path tableInfoDir = new Path(tableDir, FSTableDescriptors.TABLEINFO_DIR);
  // remove table info subdir if it already exists
  boolean removedExistingSubdir = FSUtils.deleteDirectory(fs, tableInfoDir);
  if (removedExistingSubdir) {
    LOG.info("Removed existing subdir at: " + tableInfoDir);
  }
  boolean createdSubdir = fs.mkdirs(tableInfoDir);
  if (!createdSubdir) {
    throw new IOException("Unable to create new table info directory: " + tableInfoDir);
  }
  
  Path oldTableInfoPath = oldTableStatus.getPath();
  Path newTableInfoPath = new Path(tableInfoDir, oldTableInfoPath.getName());
  boolean renamedInfoFile = fs.rename(oldTableInfoPath, newTableInfoPath);
  if (!renamedInfoFile) {
    throw new IOException("Failed to move table info file from old location: "
      + oldTableInfoPath + " to new location: " + newTableInfoPath);
  }
 
  LOG.info("Migrated table info from: " + oldTableInfoPath
    + " to new location: " + newTableInfoPath);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:FSTableDescriptorMigrationToSubdir.java

示例8: main

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
  final Configuration conf = new Configuration();
  final FileSystem lfs = FileSystem.getLocal(conf);

  for (String arg : args) {
    Path filePath = new Path(arg).makeQualified(lfs);
    String fileName = filePath.getName();
    if (fileName.startsWith("input")) {
      LoggedDiscreteCDF newResult = histogramFileToCDF(filePath, lfs);
      String testName = fileName.substring("input".length());
      Path goldFilePath = new Path(filePath.getParent(), "gold"+testName);

      ObjectMapper mapper = new ObjectMapper();
      JsonFactory factory = mapper.getJsonFactory();
      FSDataOutputStream ostream = lfs.create(goldFilePath, true);
      JsonGenerator gen = factory.createJsonGenerator(ostream,
          JsonEncoding.UTF8);
      gen.useDefaultPrettyPrinter();
      
      gen.writeObject(newResult);
      
      gen.close();
    } else {
      System.err.println("Input file not started with \"input\". File "+fileName+" skipped.");
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestHistograms.java

示例9: updateToken

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Override
public void updateToken(MRDelegationTokenIdentifier tokenId,
    Long renewDate) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Updating token " + tokenId.getSequenceNumber());
  }

  // Files cannot be atomically replaced, therefore we write a temporary
  // update file, remove the original token file, then rename the update
  // file to the token file. During recovery either the token file will be
  // used or if that is missing and an update file is present then the
  // update file is used.
  Path tokenPath = getTokenPath(tokenId);
  Path tmp = new Path(tokenPath.getParent(),
      UPDATE_TMP_FILE_PREFIX + tokenPath.getName());
  writeFile(tmp, buildTokenData(tokenId, renewDate));
  try {
    deleteFile(tokenPath);
  } catch (IOException e) {
    fs.delete(tmp, false);
    throw e;
  }
  if (!fs.rename(tmp, tokenPath)) {
    throw new IOException("Could not rename " + tmp + " to " + tokenPath);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:HistoryServerFileSystemStateStoreService.java

示例10: DirWorker

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
DirWorker(Path dir, File localDir, int nFiles) throws IOException {
  super(dir.getName());
  this.dir = dir;
  this.localDir = localDir;

  this.files = new FileWorker[nFiles];
  for(int i = 0; i < files.length; i++) {
    files[i] = new FileWorker(dir, localDir, String.format("file%02d", i));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestAppendSnapshotTruncate.java

示例11: visitRegionStoreFiles

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Iterate over the region store files
 *
 * @param fs {@link FileSystem}
 * @param regionDir {@link Path} to the region directory
 * @param visitor callback object to get the store files
 * @throws IOException if an error occurred while scanning the directory
 */
public static void visitRegionStoreFiles(final FileSystem fs, final Path regionDir,
    final StoreFileVisitor visitor) throws IOException {
  FileStatus[] families = FSUtils.listStatus(fs, regionDir, new FSUtils.FamilyDirFilter(fs));
  if (families == null) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("No families under region directory:" + regionDir);
    }
    return;
  }

  PathFilter fileFilter = new FSUtils.FileFilter(fs);
  for (FileStatus family: families) {
    Path familyDir = family.getPath();
    String familyName = familyDir.getName();

    // get all the storeFiles in the family
    FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir, fileFilter);
    if (storeFiles == null) {
      if (LOG.isTraceEnabled()) {
        LOG.trace("No hfiles found for family: " + familyDir + ", skipping.");
      }
      continue;
    }

    for (FileStatus hfile: storeFiles) {
      Path hfilePath = hfile.getPath();
      visitor.storeFile(regionDir.getName(), familyName, hfilePath.getName());
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:FSVisitor.java

示例12: openFile

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private static FSDataInputStream openFile(FileSystem fs, Path path) throws ExecutionSetupException {
  try {
    return fs.open(path);
  } catch (IOException e) {
    throw new ExecutionSetupException("Error opening or reading metadata for parquet file at location: "
      + path.getName(), e);
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:9,代码来源:PageReader.java

示例13: isFileDeletable

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Override
public boolean isFileDeletable(FileStatus fStat) {
  try {
    // if its a directory, then it can be deleted
    if (fStat.isDirectory()) return true;
    
    Path file = fStat.getPath();
    // check to see if
    FileStatus[] deleteStatus = FSUtils.listStatus(this.fs, file, null);
    // if the file doesn't exist, then it can be deleted (but should never
    // happen since deleted files shouldn't get passed in)
    if (deleteStatus == null) return true;

    // otherwise, we need to check the file's table and see its being archived
    Path family = file.getParent();
    Path region = family.getParent();
    Path table = region.getParent();

    String tableName = table.getName();
    boolean ret = !archiveTracker.keepHFiles(tableName);
    LOG.debug("Archiver says to [" + (ret ? "delete" : "keep") + "] files for table:" + tableName);
    return ret;
  } catch (IOException e) {
    LOG.error("Failed to lookup status of:" + fStat.getPath() + ", keeping it just incase.", e);
    return false;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:LongTermArchivingHFileCleaner.java

示例14: PageReader

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
PageReader(ColumnReader<?> parentStatus, FSDataInputStream inputStream, Path path, ColumnChunkMetaData columnChunkMetaData) throws ExecutionSetupException {
  this.parentColumnReader = parentStatus;
  allocatedDictionaryBuffers = new ArrayList<ByteBuf>();
  codecFactory = parentColumnReader.parentReader.getCodecFactory();
  this.stats = parentColumnReader.parentReader.parquetReaderStats;
  long start = columnChunkMetaData.getFirstDataPageOffset();
  this.inputStream = inputStream;
  try {
    this.dataReader = new ColumnDataReader(inputStream, start, columnChunkMetaData.getTotalSize());
    loadDictionaryIfExists(parentStatus, columnChunkMetaData, inputStream);
  } catch (IOException e) {
    throw new ExecutionSetupException("Error opening or reading metadata for parquet file at location: "
      + path.getName(), e);
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:16,代码来源:PageReader.java

示例15: testSnapshotName

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testSnapshotName() throws Exception {
  final String dirStr = "/testSnapshotWithQuota/dir";
  final Path dir = new Path(dirStr);
  hdfs.mkdirs(dir, new FsPermission((short)0777));
  hdfs.allowSnapshot(dir);

  // set namespace quota
  final int NS_QUOTA = 6;
  hdfs.setQuota(dir, NS_QUOTA, HdfsConstants.QUOTA_DONT_SET);

  // create object to use up the quota.
  final Path foo = new Path(dir, "foo");
  final Path f1 = new Path(foo, "f1");
  DFSTestUtil.createFile(hdfs, f1, BLOCKSIZE, REPLICATION, SEED);
  {
    //create a snapshot with default snapshot name
    final Path snapshotPath = hdfs.createSnapshot(dir);

    //check snapshot path and the default snapshot name
    final String snapshotName = snapshotPath.getName(); 
    Assert.assertTrue("snapshotName=" + snapshotName, Pattern.matches(
        "s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",
        snapshotName));
    final Path parent = snapshotPath.getParent();
    Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR, parent.getName());
    Assert.assertEquals(dir, parent.getParent());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestNestedSnapshots.java


注:本文中的org.apache.hadoop.fs.Path.getName方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。