当前位置: 首页>>代码示例>>Java>>正文


Java FileChecksum.equals方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileChecksum.equals方法的典型用法代码示例。如果您正苦于以下问题:Java FileChecksum.equals方法的具体用法?Java FileChecksum.equals怎么用?Java FileChecksum.equals使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileChecksum的用法示例。


在下文中一共展示了FileChecksum.equals方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: sameFile

import org.apache.hadoop.fs.FileChecksum; //导入方法依赖的package包/类
/**
 * Check if the two files are equal by looking at the file length,
 * and at the checksum (if user has specified the verifyChecksum flag).
 */
private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat) {
  // Not matching length
  if (inputStat.getLen() != outputStat.getLen()) return false;

  // Mark files as equals, since user asked for no checksum verification
  if (!verifyChecksum) return true;

  // If checksums are not available, files are not the same.
  FileChecksum inChecksum = getFileChecksum(inputFs, inputStat.getPath());
  if (inChecksum == null) return false;

  FileChecksum outChecksum = getFileChecksum(outputFs, outputStat.getPath());
  if (outChecksum == null) return false;

  return inChecksum.equals(outChecksum);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:ExportSnapshot.java

示例2: checkUpdate

import org.apache.hadoop.fs.FileChecksum; //导入方法依赖的package包/类
private FileAction checkUpdate(FileSystem sourceFS, FileStatus source,
    Path target, FileStatus targetFileStatus) throws IOException {
  if (targetFileStatus != null && !overWrite) {
    if (canSkip(sourceFS, source, targetFileStatus)) {
      return FileAction.SKIP;
    } else if (append) {
      long targetLen = targetFileStatus.getLen();
      if (targetLen < source.getLen()) {
        FileChecksum sourceChecksum = sourceFS.getFileChecksum(
            source.getPath(), targetLen);
        if (sourceChecksum != null
            && sourceChecksum.equals(targetFS.getFileChecksum(target))) {
          // We require that the checksum is not null. Thus currently only
          // DistributedFileSystem is supported
          return FileAction.APPEND;
        }
      }
    }
  }
  return FileAction.OVERWRITE;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:22,代码来源:CopyMapper.java

示例3: checkUpdate

import org.apache.hadoop.fs.FileChecksum; //导入方法依赖的package包/类
private FileAction checkUpdate(FileSystem sourceFS, FileStatus source,
    Path target) throws IOException {
  final FileStatus targetFileStatus;
  try {
    targetFileStatus = targetFS.getFileStatus(target);
  } catch (FileNotFoundException e) {
    return FileAction.OVERWRITE;
  }
  if (targetFileStatus != null && !overWrite) {
    if (canSkip(sourceFS, source, targetFileStatus)) {
      return FileAction.SKIP;
    } else if (append) {
      long targetLen = targetFileStatus.getLen();
      if (targetLen < source.getLen()) {
        FileChecksum sourceChecksum = sourceFS.getFileChecksum(
            source.getPath(), targetLen);
        if (sourceChecksum != null
            && sourceChecksum.equals(targetFS.getFileChecksum(target))) {
          // We require that the checksum is not null. Thus currently only
          // DistributedFileSystem is supported
          return FileAction.APPEND;
        }
      }
    }
  }
  return FileAction.OVERWRITE;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:CopyMapper.java

示例4: map

import org.apache.hadoop.fs.FileChecksum; //导入方法依赖的package包/类
public void map(Text source, Text target, OutputCollector<NullWritable, NullWritable> oc, Reporter rprtr) throws IOException {
    Path sourceFile = new Path(source.toString());
    Path finalFile = new Path(target.toString());
    Path tmpFile = new Path(tmpRoot, UUID.randomUUID().toString());

    setStatus(rprtr, "Copying " + sourceFile.toString() + " to " + tmpFile.toString());

    if(fsDest.exists(finalFile)) {
        FileChecksum fc1 = fsSource.getFileChecksum(sourceFile);
        FileChecksum fc2 = fsDest.getFileChecksum(finalFile);
        if(fc1 != null && fc2 != null && !fc1.equals(fc2) ||
           fsSource.getContentSummary(sourceFile).getLength()!=fsDest.getContentSummary(finalFile).getLength() ||
           ((fc1==null || fc2==null) && !Utils.firstNBytesSame(fsSource, sourceFile, fsDest, finalFile, 1024*1024))) {
            throw new IOException("Target file already exists and is different! " + finalFile.toString());
        } else {
            return;
        }
    }

    fsDest.mkdirs(tmpFile.getParent());

    copyFile(fsSource, sourceFile, fsDest, tmpFile, rprtr);

    setStatus(rprtr, "Renaming " + tmpFile.toString() + " to " + finalFile.toString());

    fsDest.mkdirs(finalFile.getParent());
    if(!fsDest.rename(tmpFile, finalFile))
        throw new IOException("could not rename " + tmpFile.toString() + " to " + finalFile.toString());

    // this is a bit of a hack; if we don't do this explicit rename, the owner of the file will
    // be hadoop each time.
    //fsDest.setOwner(finalFile, this.owner, fs.getGroup());
}
 
开发者ID:indix,项目名称:dfs-datastores,代码行数:34,代码来源:AbstractFileCopyMapper.java

示例5: checksumsAreEqual

import org.apache.hadoop.fs.FileChecksum; //导入方法依赖的package包/类
/**
 * Utility to compare checksums for the paths specified.
 *
 * If checksums's can't be retrieved, it doesn't fail the test
 * Only time the comparison would fail is when checksums are
 * available and they don't match
 *
 * @param sourceFS FileSystem for the source path.
 * @param source The source path.
 * @param sourceChecksum The checksum of the source file. If it is null we
 * still need to retrieve it through sourceFS.
 * @param targetFS FileSystem for the target path.
 * @param target The target path.
 * @return If either checksum couldn't be retrieved, the function returns
 * false. If checksums are retrieved, the function returns true if they match,
 * and false otherwise.
 * @throws IOException if there's an exception while retrieving checksums.
 */
public static boolean checksumsAreEqual(FileSystem sourceFS, Path source,
    FileChecksum sourceChecksum, FileSystem targetFS, Path target)
    throws IOException {
  FileChecksum targetChecksum = null;
  try {
    sourceChecksum = sourceChecksum != null ? sourceChecksum : sourceFS
        .getFileChecksum(source);
    targetChecksum = targetFS.getFileChecksum(target);
  } catch (IOException e) {
    LOG.error("Unable to retrieve checksum for " + source + " or " + target, e);
  }
  return (sourceChecksum == null || targetChecksum == null ||
          sourceChecksum.equals(targetChecksum));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DistCpUtils.java

示例6: checksumsAreEqual

import org.apache.hadoop.fs.FileChecksum; //导入方法依赖的package包/类
/**
 * Utility to compare checksums for the paths specified.
 *
 * If checksums's can't be retrieved, it doesn't fail the test
 * Only time the comparison would fail is when checksums are
 * available and they don't match
 *                                  
 * @param sourceFS FileSystem for the source path.
 * @param source The source path.
 * @param targetFS FileSystem for the target path.
 * @param target The target path.
 * @return If either checksum couldn't be retrieved, the function returns
 * false. If checksums are retrieved, the function returns true if they match,
 * and false otherwise.
 * @throws IOException if there's an exception while retrieving checksums.
 */
public static boolean checksumsAreEqual(FileSystem sourceFS, Path source,
                                 FileSystem targetFS, Path target)
                                 throws IOException {
  FileChecksum sourceChecksum = null;
  FileChecksum targetChecksum = null;
  try {
    sourceChecksum = sourceFS.getFileChecksum(source);
    targetChecksum = targetFS.getFileChecksum(target);
  } catch (IOException e) {
    LOG.error("Unable to retrieve checksum for " + source + " or " + target, e);
  }
  return (sourceChecksum == null || targetChecksum == null ||
          sourceChecksum.equals(targetChecksum));
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:31,代码来源:DistCpUtils.java


注:本文中的org.apache.hadoop.fs.FileChecksum.equals方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。