当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.rename方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.rename方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.rename方法的具体用法?Java FileSystem.rename怎么用?Java FileSystem.rename使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.rename方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: sidelineTable

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Side line an entire table.
 */
void sidelineTable(FileSystem fs, TableName tableName, Path hbaseDir,
    Path backupHbaseDir) throws IOException {
  Path tableDir = FSUtils.getTableDir(hbaseDir, tableName);
  if (fs.exists(tableDir)) {
    Path backupTableDir= FSUtils.getTableDir(backupHbaseDir, tableName);
    fs.mkdirs(backupTableDir.getParent());
    boolean success = fs.rename(tableDir, backupTableDir);
    if (!success) {
      throw new IOException("Failed to move  " + tableName + " from "
          +  tableDir + " to " + backupTableDir);
    }
  } else {
    LOG.info("No previous " + tableName +  " exists.  Continuing.");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:HBaseFsck.java

示例2: testRenameWithValidPath

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testRenameWithValidPath() throws Exception {
  setupRPC(
      DFS.RpcType.RENAME_REQUEST, DFS.RenameRequest.newBuilder().setOldpath("/foo/bar").setNewpath("/foo/bar2").build(),
      DFS.RpcType.RENAME_RESPONSE, DFS.RenameResponse.newBuilder().setValue(true).build());

  FileSystem fs = newRemoteNodeFileSystem();

  Path oldPath = new Path("/foo/bar");
  Path newPath = new Path("/foo/bar2");
  boolean result = fs.rename(oldPath, newPath);

  assertTrue(result);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:15,代码来源:TestRemoteNodeFileSystem.java

示例3: renameFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**重命名**/
public static void renameFile(String origin, String newName) throws IOException{
    Configuration conf = new Configuration();
    String str = NodeConfig.HDFS_PATH+origin;
    String dst = NodeConfig.HDFS_PATH+newName;
    FileSystem fs = FileSystem.get(URI.create(str), conf);
    Path srcPath = new Path(str);
    Path dstPath = new Path(dst);
    fs.rename(srcPath, dstPath);
    fs.close();
}
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:12,代码来源:HDFSTool.java

示例4: compact

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * {@inheritDoc}
 */
public CompactionResponse compact() throws Exception {
    Validate.notNull(criteria, "Criteria cannot be null");
    log.info("In place compaction requested for input path {}", criteria.getSourcePath());
    final URI uri = URI.create(criteria.getSourcePath());
    final FileSystem fileSystem = FileSystem.get(uri, configuration);

    final String tempCompactedLocation = TEMP_OUTPUT_LOCATION + "compacted-" + UUID.randomUUID().toString() + "/";
    final Path tempLocation = new Path(TEMP_OUTPUT_LOCATION + UUID.randomUUID().toString() + "/");
    try {
        fileSystem.access(new Path(criteria.getSourcePath()), FsAction.WRITE);
    } catch (AccessControlException e) {
        throw new IllegalStateException(String.format("User does not have permissions to perform move/delete for location %s", criteria.getSourcePath()));
    }
    // Perform normal compaction from Source --> TempTargetLocation
    log.info("Performing Normal Compaction from Source {} to Temp Target {}", criteria.getSourcePath(), tempCompactedLocation);
    final CompactionCriteria compactionCriteria = new CompactionCriteria(criteria.getSourcePath(), tempCompactedLocation, criteria.getThresholdInBytes());
    final CompactionManager compactionManager = new CompactionManagerImpl(configuration, compactionCriteria);
    final CompactionResponse response = compactionManager.compact();

    log.info("Moving files from input path {} to temp path {}", criteria.getSourcePath(), tempLocation.toString());
    fileSystem.rename(new Path(criteria.getSourcePath()), tempLocation);

    log.info("Moving compacted files from temp compacted path {} to final location {}", tempCompactedLocation, criteria.getSourcePath());
    fileSystem.rename(new Path(tempCompactedLocation), new Path(criteria.getSourcePath()));
    return response;
}
 
开发者ID:ExpediaInceCommercePlatform,项目名称:dataSqueeze,代码行数:30,代码来源:CompactionManagerInPlaceImpl.java

示例5: rewriteAsPb

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * @param cid
 * @throws IOException
 */
private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
    final ClusterId cid)
throws IOException {
  // Rewrite the file as pb.  Move aside the old one first, write new
  // then delete the moved-aside file.
  Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
  if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
  setClusterId(fs, rootdir, cid, 100);
  if (!fs.delete(movedAsideName, false)) {
    throw new IOException("Failed delete of " + movedAsideName);
  }
  LOG.debug("Rewrote the hbase.id file as pb");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:FSUtils.java

示例6: createDictionaryVersionedRootPath

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static Path createDictionaryVersionedRootPath(FileSystem fs, Path tableDir, long nextVersion, Path tmpDictionaryRootPath) throws IOException {
  final Path dictionaryRootDir = new Path(tableDir, dictionaryRootDirName(nextVersion));
  if (fs.exists(dictionaryRootDir)) {
    throw new IOException(format("Dictionary already exists for version: %d, path: %s", nextVersion, dictionaryRootDir));
  }
  final long currentVersion = getDictionaryVersion(fs, tableDir);
  if (currentVersion > nextVersion) {
    throw new IOException(format("Dictionary exists with a higher version %d, attempted version %d", currentVersion, nextVersion));
  }
  if (!fs.rename(tmpDictionaryRootPath, dictionaryRootDir)) {
    throw new IOException(format("Failed to rename temporary dictionaries at %s to %s, for version %d", tmpDictionaryRootPath, dictionaryRootDir, nextVersion));
  }
  return dictionaryRootDir;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:15,代码来源:GlobalDictionaryBuilder.java

示例7: renameOrMerge

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
    throws IOException {
  if (algorithmVersion == 1) {
    if (!fs.rename(from.getPath(), to)) {
      throw new IOException("Failed to rename " + from + " to " + to);
    }
  } else {
    fs.mkdirs(to);
    for (FileStatus subFrom : fs.listStatus(from.getPath())) {
      Path subTo = new Path(to, subFrom.getPath().getName());
      mergePaths(fs, subFrom, subTo);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:FileOutputCommitter.java

示例8: setVersion

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Sets version of file system
 *
 * @param fs filesystem object
 * @param rootdir hbase root directory
 * @param version version to set
 * @param wait time to wait for retry
 * @param retries number of times to retry before throwing an IOException
 * @throws IOException e
 */
public static void setVersion(FileSystem fs, Path rootdir, String version,
    int wait, int retries) throws IOException {
  Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
  Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
    HConstants.VERSION_FILE_NAME);
  while (true) {
    try {
      // Write the version to a temporary file
      FSDataOutputStream s = fs.create(tempVersionFile);
      try {
        s.write(toVersionByteArray(version));
        s.close();
        s = null;
        // Move the temp version file to its normal location. Returns false
        // if the rename failed. Throw an IOE in that case.
        if (!fs.rename(tempVersionFile, versionFile)) {
          throw new IOException("Unable to move temp version file to " + versionFile);
        }
      } finally {
        // Cleaning up the temporary if the rename failed would be trying
        // too hard. We'll unconditionally create it again the next time
        // through anyway, files are overwritten by default by create().

        // Attempt to close the stream on the way out if it is still open.
        try {
          if (s != null) s.close();
        } catch (IOException ignore) { }
      }
      LOG.info("Created version file at " + rootdir.toString() + " with version=" + version);
      return;
    } catch (IOException e) {
      if (retries > 0) {
        LOG.debug("Unable to create version file at " + rootdir.toString() + ", retrying", e);
        fs.delete(versionFile, false);
        try {
          if (wait > 0) {
            Thread.sleep(wait);
          }
        } catch (InterruptedException ie) {
          throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
        }
        retries--;
      } else {
        throw e;
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:59,代码来源:FSUtils.java

示例9: newMapTask

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
protected Callable<Boolean> newMapTask(final String address) throws IOException {
  return new Callable<Boolean>() {
    @Override
    public Boolean call() throws Exception {
      // Only directories should be removed with a fork/join task
      final FileSystem fs = getDelegateFileSystem(address);
      FileStatus status = fs.getFileStatus(path);
      if (status.isFile()) {
        throw new FileNotFoundException("Directory not found: " + path);
      }
      return fs.rename(path, dst);
    }
  };
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:16,代码来源:PseudoDistributedFileSystem.java

示例10: canRename

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
static boolean canRename(FileSystem fs, Path src, Path dst
    ) throws IOException {
  try {
    fs.rename(src, dst);
    return true;
  } catch(AccessControlException e) {
    return false;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestPermission.java

示例11: execute

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void execute(FileSystem fileSystem, Path filePath)
		throws IOException {
	Path destPath = new Path(destination, filePath.getName());
	LOG.info("Moving file {} to {}", filePath, destPath);
	boolean success = fileSystem.rename(filePath, destPath);
	return;
}
 
开发者ID:PacktPublishing,项目名称:Mastering-Apache-Storm,代码行数:8,代码来源:MoveFileAction.java

示例12: commitData

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void commitData(Configuration conf) throws IOException {

    Path workDir = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
    Path finalDir = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
    FileSystem targetFS = workDir.getFileSystem(conf);

    LOG.info("Atomic commit enabled. Moving " + workDir + " to " + finalDir);
    if (targetFS.exists(finalDir) && targetFS.exists(workDir)) {
      LOG.error("Pre-existing final-path found at: " + finalDir);
      throw new IOException("Target-path can't be committed to because it " +
          "exists at " + finalDir + ". Copied data is in temp-dir: " + workDir + ". ");
    }

    boolean result = targetFS.rename(workDir, finalDir);
    if (!result) {
      LOG.warn("Rename failed. Perhaps data already moved. Verifying...");
      result = targetFS.exists(finalDir) && !targetFS.exists(workDir);
    }
    if (result) {
      LOG.info("Data committed successfully to " + finalDir);
      taskAttemptContext.setStatus("Data committed successfully to " + finalDir);
    } else {
      LOG.error("Unable to commit data to " + finalDir);
      throw new IOException("Atomic commit failed. Temporary data in " + workDir +
        ", Unable to move to " + finalDir);
    }
  }
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:CopyCommitter.java

示例13: testHbckFixOrphanTable

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testHbckFixOrphanTable() throws Exception {
  TableName table = TableName.valueOf("tableInfo");
  FileSystem fs = null;
  Path tableinfo = null;
  try {
    setupTable(table);

    Path hbaseTableDir = FSUtils.getTableDir(
        FSUtils.getRootDir(conf), table);
    fs = hbaseTableDir.getFileSystem(conf);
    FileStatus status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
    tableinfo = status.getPath();
    fs.rename(tableinfo, new Path("/.tableinfo"));

    //to report error if .tableinfo is missing.
    HBaseFsck hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_TABLEINFO_FILE });

    // fix OrphanTable with default .tableinfo (htd not yet cached on master)
    hbck = doFsck(conf, true);
    assertNoErrors(hbck);
    status = null;
    status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
    assertNotNull(status);

    HTableDescriptor htd = admin.getTableDescriptor(table);
    htd.setValue("NOT_DEFAULT", "true");
    admin.disableTable(table);
    admin.modifyTable(table, htd);
    admin.enableTable(table);
    fs.delete(status.getPath(), true);

    // fix OrphanTable with cache
    htd = admin.getTableDescriptor(table); // warms up cached htd on master
    hbck = doFsck(conf, true);
    assertNoErrors(hbck);
    status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
    assertNotNull(status);
    htd = admin.getTableDescriptor(table);
    assertEquals(htd.getValue("NOT_DEFAULT"), "true");
  } finally {
    fs.rename(new Path("/.tableinfo"), tableinfo);
    cleanupTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:47,代码来源:TestHBaseFsck.java

示例14: handleCreateTable

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Responsible of table creation (on-disk and META) and assignment.
 * - Create the table directory and descriptor (temp folder)
 * - Create the on-disk regions (temp folder)
 *   [If something fails here: we've just some trash in temp]
 * - Move the table from temp to the root directory
 *   [If something fails here: we've the table in place but some of the rows required
 *    present in META. (hbck needed)]
 * - Add regions to META
 *   [If something fails here: we don't have regions assigned: table disabled]
 * - Assign regions to Region Servers
 *   [If something fails here: we still have the table in disabled state]
 * - Update ZooKeeper with the enabled state
 */
private void handleCreateTable(TableName tableName)
    throws IOException, CoordinatedStateException {
  Path tempdir = fileSystemManager.getTempDir();
  FileSystem fs = fileSystemManager.getFileSystem();

  // 1. Create Table Descriptor
  Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
  new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
    tempTableDir, this.hTableDescriptor, false);
  Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName);

  // 2. Create Regions
  List<HRegionInfo> regionInfos = handleCreateHdfsRegions(tempdir, tableName);
  // 3. Move Table temp directory to the hbase root location
  if (!fs.rename(tempTableDir, tableDir)) {
    throw new IOException("Unable to move table from temp=" + tempTableDir +
      " to hbase root=" + tableDir);
  }

  if (regionInfos != null && regionInfos.size() > 0) {
    // 4. Add regions to META
    addRegionsToMeta(regionInfos, hTableDescriptor.getRegionReplication());
    // 5. Add replicas if needed
    regionInfos = addReplicas(hTableDescriptor, regionInfos);

    // 6. Setup replication for region replicas if needed
    if (hTableDescriptor.getRegionReplication() > 1) {
      ServerRegionReplicaUtil.setupRegionReplicaReplication(conf);
    }

    // 7. Trigger immediate assignment of the regions in round-robin fashion
    ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
  }

  // 8. Set table enabled flag up in zk.
  try {
    assignmentManager.getTableStateManager().setTableState(tableName,
      ZooKeeperProtos.Table.State.ENABLED);
  } catch (CoordinatedStateException e) {
    throw new IOException("Unable to ensure that " + tableName + " will be" +
      " enabled because of a ZooKeeper issue", e);
  }

  // 8. Update the tabledescriptor cache.
  ((HMaster) this.server).getTableDescriptors().get(tableName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:61,代码来源:CreateTableHandler.java

示例15: renameAndSetModifyTime

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
    throws IOException {
  // set the modify time for TimeToLive Cleaner
  fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);
  return fs.rename(src, dest);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:FSUtils.java


注:本文中的org.apache.hadoop.fs.FileSystem.rename方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。