当前位置: 首页>>代码示例>>Java>>正文


Java FileStatus.getBlockSize方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileStatus.getBlockSize方法的典型用法代码示例。如果您正苦于以下问题:Java FileStatus.getBlockSize方法的具体用法?Java FileStatus.getBlockSize怎么用?Java FileStatus.getBlockSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileStatus的用法示例。


在下文中一共展示了FileStatus.getBlockSize方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: create

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private FSDataOutputStream create(Path f, Reporter reporter,
    FileStatus srcstat) throws IOException {
  if (destFileSys.exists(f)) {
    destFileSys.delete(f, false);
  }
  if (!preserve_status) {
    return destFileSys.create(f, true, sizeBuf, reporter);
  }

  FsPermission permission = preseved.contains(FileAttribute.PERMISSION)?
      srcstat.getPermission(): null;
  short replication = preseved.contains(FileAttribute.REPLICATION)?
      srcstat.getReplication(): destFileSys.getDefaultReplication(f);
  long blockSize = preseved.contains(FileAttribute.BLOCK_SIZE)?
      srcstat.getBlockSize(): destFileSys.getDefaultBlockSize(f);
  return destFileSys.create(f, permission, true, sizeBuf, replication,
      blockSize, reporter);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DistCpV1.java

示例2: canSkip

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private boolean canSkip(FileSystem sourceFS, FileStatus source, 
    FileStatus target) throws IOException {
  if (!syncFolders) {
    return true;
  }
  boolean sameLength = target.getLen() == source.getLen();
  boolean sameBlockSize = source.getBlockSize() == target.getBlockSize()
      || !preserve.contains(FileAttribute.BLOCKSIZE);
  if (sameLength && sameBlockSize) {
    return skipCrc ||
        DistCpUtils.checksumsAreEqual(sourceFS, source.getPath(), null,
            targetFS, target.getPath());
  } else {
    return false;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:CopyMapper.java

示例3: concat

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
public static void concat(String dir) throws IOException {


        String directory = NodeConfig.HDFS_PATH + dir;
        Configuration conf = new Configuration();
        DistributedFileSystem fs = (DistributedFileSystem)FileSystem.get(URI.create(directory), conf);
        FileStatus fileList[] = fs.listStatus(new Path(directory));

        if (fileList.length>=2) {

            ArrayList<Path>  srcs = new ArrayList<Path>(fileList.length);
            for (FileStatus fileStatus : fileList) {
                if ( fileStatus.isFile() &&
                        (fileStatus.getLen()&~fileStatus.getBlockSize())<fileStatus.getBlockSize()/2 ) {
                    srcs.add(fileStatus.getPath());
                }
            }

            if (srcs.size()>=2) {
                Logger.println("come to here");
                Path appended = srcs.get(0);
                Path[] sources = new Path[srcs.size()-1];
                for (int i=0; i<srcs.size()-1; i++) {
                    sources[i] = srcs.get(i+1);
                }
                Logger.println(fs==null);
                Logger.println(appended==null);
                Logger.println(sources==null);
                fs.concat(appended, sources);
                Logger.println("concat to : " + appended.getName());
                Logger.println(Arrays.toString(sources));
            }

            fs.close();
        }


    }
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:39,代码来源:HDFSTool.java

示例4: checkNumCachedReplicas

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private static void checkNumCachedReplicas(final DistributedFileSystem dfs,
    final List<Path> paths, final int expectedBlocks,
    final int expectedReplicas)
    throws Exception {
  int numCachedBlocks = 0;
  int numCachedReplicas = 0;
  for (Path p: paths) {
    final FileStatus f = dfs.getFileStatus(p);
    final long len = f.getLen();
    final long blockSize = f.getBlockSize();
    // round it up to full blocks
    final long numBlocks = (len + blockSize - 1) / blockSize;
    BlockLocation[] locs = dfs.getFileBlockLocations(p, 0, len);
    assertEquals("Unexpected number of block locations for path " + p,
        numBlocks, locs.length);
    for (BlockLocation l: locs) {
      if (l.getCachedHosts().length > 0) {
        numCachedBlocks++;
      }
      numCachedReplicas += l.getCachedHosts().length;
    }
  }
  LOG.info("Found " + numCachedBlocks + " of " + expectedBlocks + " blocks");
  LOG.info("Found " + numCachedReplicas + " of " + expectedReplicas
      + " replicas");
  assertEquals("Unexpected number of cached blocks", expectedBlocks,
      numCachedBlocks);
  assertEquals("Unexpected number of cached replicas", expectedReplicas,
      numCachedReplicas);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestCacheDirectives.java

示例5: getFileBlockLocations

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Return an array containing hostnames, offset and size of
 * portions of the given file. For WASB we'll just lie and give
 * fake hosts to make sure we get many splits in MR jobs.
 */
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file,
    long start, long len) throws IOException {
  if (file == null) {
    return null;
  }

  if ((start < 0) || (len < 0)) {
    throw new IllegalArgumentException("Invalid start or len parameter");
  }

  if (file.getLen() < start) {
    return new BlockLocation[0];
  }
  final String blobLocationHost = getConf().get(
      AZURE_BLOCK_LOCATION_HOST_PROPERTY_NAME,
      AZURE_BLOCK_LOCATION_HOST_DEFAULT);
  final String[] name = { blobLocationHost };
  final String[] host = { blobLocationHost };
  long blockSize = file.getBlockSize();
  if (blockSize <= 0) {
    throw new IllegalArgumentException(
        "The block size for the given file is not a positive number: "
            + blockSize);
  }
  int numberOfLocations = (int) (len / blockSize)
      + ((len % blockSize == 0) ? 0 : 1);
  BlockLocation[] locations = new BlockLocation[numberOfLocations];
  for (int i = 0; i < locations.length; i++) {
    long currentOffset = start + (i * blockSize);
    long currentLength = Math.min(blockSize, start + len - currentOffset);
    locations[i] = new BlockLocation(name, host, currentOffset, currentLength);
  }
  return locations;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:NativeAzureFileSystem.java

示例6: getBlockSize

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * @return the block size of the source file if we need to preserve either
 *         the block size or the checksum type. Otherwise the default block
 *         size of the target FS.
 */
private static long getBlockSize(
        EnumSet<FileAttribute> fileAttributes,
        FileStatus sourceFile, FileSystem targetFS, Path tmpTargetPath) {
  boolean preserve = fileAttributes.contains(FileAttribute.BLOCKSIZE)
      || fileAttributes.contains(FileAttribute.CHECKSUMTYPE);
  return preserve ? sourceFile.getBlockSize() : targetFS
      .getDefaultBlockSize(tmpTargetPath);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:RetriableFileCopyCommand.java

示例7: dumpTypedBytes

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * Dump given list of files to standard output as typed bytes.
 */
@SuppressWarnings("unchecked")
private int dumpTypedBytes(List<FileStatus> files) throws IOException {
  JobConf job = new JobConf(getConf()); 
  DataOutputStream dout = new DataOutputStream(System.out);
  AutoInputFormat autoInputFormat = new AutoInputFormat();
  for (FileStatus fileStatus : files) {
    FileSplit split = new FileSplit(fileStatus.getPath(), 0,
      fileStatus.getLen() * fileStatus.getBlockSize(),
      (String[]) null);
    RecordReader recReader = null;
    try {
      recReader = autoInputFormat.getRecordReader(split, job, Reporter.NULL);
      Object key = recReader.createKey();
      Object value = recReader.createValue();
      while (recReader.next(key, value)) {
        if (key instanceof Writable) {
          TypedBytesWritableOutput.get(dout).write((Writable) key);
        } else {
          TypedBytesOutput.get(dout).write(key);
        }
        if (value instanceof Writable) {
          TypedBytesWritableOutput.get(dout).write((Writable) value);
        } else {
          TypedBytesOutput.get(dout).write(value);
        }
      }
    } finally {
      if (recReader != null) {
        recReader.close();
      }
    }
  }
  dout.flush();
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:DumpTypedBytes.java

示例8: transform

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
private static FileStatus transform(FileStatus input, String bucket) {
  String relativePath = removeLeadingSlash(Path.getPathWithoutSchemeAndAuthority(input.getPath()).toString());
  Path bucketPath  = new Path(Path.SEPARATOR + bucket);
  Path fullPath = Strings.isEmpty(relativePath) ? bucketPath : new Path(bucketPath, relativePath);
  return new FileStatus(input.getLen(),
          input.isDirectory(),
          input.getReplication(),
          input.getBlockSize(),
          input.getModificationTime(),
          input.getAccessTime(),
          input.getPermission(),
          input.getOwner(),
          input.getGroup(),
          fullPath);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:16,代码来源:S3FileSystem.java

示例9: getFileLists

import org.apache.hadoop.fs.FileStatus; //导入方法依赖的package包/类
/**
 * @return <expected, gotten, backup>, where each is sorted
 */
private static List<List<String>> getFileLists(FileStatus[] previous, FileStatus[] archived) {
  List<List<String>> files = new ArrayList<List<String>>();

  // copy over the original files
  List<String> originalFileNames = convertToString(previous);
  files.add(originalFileNames);

  List<String> currentFiles = new ArrayList<String>(previous.length);
  List<FileStatus> backedupFiles = new ArrayList<FileStatus>(previous.length);
  for (FileStatus f : archived) {
    String name = f.getPath().getName();
    // if the file has been backed up
    if (name.contains(".")) {
      Path parent = f.getPath().getParent();
      String shortName = name.split("[.]")[0];
      Path modPath = new Path(parent, shortName);
      FileStatus file = new FileStatus(f.getLen(), f.isDirectory(), f.getReplication(),
          f.getBlockSize(), f.getModificationTime(), modPath);
      backedupFiles.add(file);
    } else {
      // otherwise, add it to the list to compare to the original store files
      currentFiles.add(name);
    }
  }

  files.add(currentFiles);
  files.add(convertToString(backedupFiles));
  return files;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:HFileArchiveTestingUtil.java


注:本文中的org.apache.hadoop.fs.FileStatus.getBlockSize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。