当前位置: 首页>>代码示例>>Java>>正文


Java DataChecksum.newDataChecksum方法代码示例

本文整理汇总了Java中org.apache.hadoop.util.DataChecksum.newDataChecksum方法的典型用法代码示例。如果您正苦于以下问题:Java DataChecksum.newDataChecksum方法的具体用法?Java DataChecksum.newDataChecksum怎么用?Java DataChecksum.newDataChecksum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.util.DataChecksum的用法示例。


在下文中一共展示了DataChecksum.newDataChecksum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: ChecksumFSOutputSummer

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress,
                      FsPermission permission)
  throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
                                     bufferSize, replication, blockSize,
                                     progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
                                           permission, true, sumBufferSize,
                                           replication, blockSize, null);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:23,代码来源:ChecksumFileSystem.java

示例2: IFileInputStream

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/**
 * Create a checksum input stream that reads
 * @param in The input stream to be verified for checksum.
 * @param len The length of the input stream including checksum bytes.
 */
public IFileInputStream(InputStream in, long len, Configuration conf) {
  this.in = in;
  this.inFd = getFileDescriptorIfAvail(in);
  sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 
      Integer.MAX_VALUE);
  checksumSize = sum.getChecksumSize();
  length = len;
  dataLength = length - checksumSize;

  conf = (conf != null) ? conf : new Configuration();
  readahead = conf.getBoolean(MRConfig.MAPRED_IFILE_READAHEAD,
      MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD);
  readaheadLength = conf.getInt(MRConfig.MAPRED_IFILE_READAHEAD_BYTES,
      MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD_BYTES);

  doReadahead();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:IFileInputStream.java

示例3: preadHeader

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/**
 * Read the header without changing the position of the FileChannel.
 *
 * @param fc The FileChannel to read.
 * @return the Metadata Header.
 * @throws IOException on error.
 */
public static BlockMetadataHeader preadHeader(FileChannel fc)
    throws IOException {
  final byte arr[] = new byte[getHeaderSize()];
  ByteBuffer buf = ByteBuffer.wrap(arr);

  while (buf.hasRemaining()) {
    if (fc.read(buf, 0) <= 0) {
      throw new EOFException("unexpected EOF while reading " +
          "metadata file header");
    }
  }
  short version = (short)((arr[0] << 8) | (arr[1] & 0xff));
  DataChecksum dataChecksum = DataChecksum.newDataChecksum(arr, 2);
  return new BlockMetadataHeader(version, dataChecksum);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:BlockMetadataHeader.java

示例4: TestFileDescriptorPair

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
public TestFileDescriptorPair() throws IOException {
  fis = new FileInputStream[2];
  for (int i = 0; i < 2; i++) {
    String name = dir.getDir() + "/file" + i;
    FileOutputStream fos = new FileOutputStream(name);
    if (i == 0) {
      // write 'data' file
      fos.write(1);
    } else {
      // write 'metadata' file
      BlockMetadataHeader header =
          new BlockMetadataHeader((short)1,
              DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4));
      DataOutputStream dos = new DataOutputStream(fos);
      BlockMetadataHeader.writeHeader(dos, header);
      dos.close();
    }
    fos.close();
    fis[i] = new FileInputStream(name);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestShortCircuitCache.java

示例5: generateChecksums

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/**
 * Generates a checksum for all the data in indata. The checksum is
 * written to outdata.
 * @param indata input data stream
 * @param startOffset starting offset in the indata stream from where to
 *                    compute checkums from
 * @param endOffset ending offset in the indata stream upto
 *                   which checksums needs to be computed
 * @param outdata the output buffer where checksum values are written
 * @param outOffset the starting offset in the outdata where the
 *                  checksum values are written
 * @param checksumType type of checksum
 * @param bytesPerChecksum number of bytes per checksum value
 */
static void generateChecksums(byte[] indata, int startOffset, int endOffset,
  byte[] outdata, int outOffset, ChecksumType checksumType,
  int bytesPerChecksum) throws IOException {

  if (checksumType == ChecksumType.NULL) {
    return; // No checksum for this block.
  }

  DataChecksum checksum = DataChecksum.newDataChecksum(
      checksumType.getDataChecksumType(), bytesPerChecksum);

  checksum.calculateChunkedSums(
     ByteBuffer.wrap(indata, startOffset, endOffset - startOffset),
     ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:ChecksumUtil.java

示例6: testGetMetaData

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
@Test
public void testGetMetaData() throws IOException {
  final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
  ExtendedBlock b = new ExtendedBlock(bpid, 1, 5, 0);
  try {
    assertTrue(fsdataset.getMetaDataInputStream(b) == null);
    assertTrue("Expected an IO exception", false);
  } catch (IOException e) {
    // ok - as expected
  }
  addSomeBlocks(fsdataset); // Only need to add one but ....
  b = new ExtendedBlock(bpid, 1, 0, 0);
  InputStream metaInput = fsdataset.getMetaDataInputStream(b);
  DataInputStream metaDataInput = new DataInputStream(metaInput);
  short version = metaDataInput.readShort();
  assertEquals(BlockMetadataHeader.VERSION, version);
  DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
  assertEquals(DataChecksum.Type.NULL, checksum.getChecksumType());
  assertEquals(0, checksum.getChecksumSize());  
}
 
开发者ID:yncxcw,项目名称:big-c,代码行数:21,代码来源:TestSimulatedFSDataset.java

示例7: ChecksumFSOutputSummer

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);
  
  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:28,代码来源:ChecksumFs.java

示例8: IFileOutputStream

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/**
 * Create a checksum output stream that writes
 * the bytes to the given stream.
 * @param out
 */
public IFileOutputStream(OutputStream out) {
  super(out);
  sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      Integer.MAX_VALUE);
  barray = new byte[sum.getChecksumSize()];
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:IFileOutputStream.java

示例9: getChecksum4Compute

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/** 
 * @return the object for computing checksum.
 *         The type is NULL if checksum is not computed.
 */
private static DataChecksum getChecksum4Compute(DataChecksum checksum,
    HdfsFileStatus stat) {
  if (isLazyPersist(stat) && stat.getReplication() == 1) {
    // do not compute checksum for writing to single replica to memory
    return DataChecksum.newDataChecksum(Type.NULL,
        checksum.getBytesPerChecksum());
  }
  return checksum;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:DFSOutputStream.java

示例10: BlockReaderLocalLegacy

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
private BlockReaderLocalLegacy(DFSClient.Conf conf, String hdfsfile,
    ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
    long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn)
    throws IOException {
  this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
      DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
      dataIn, startOffset, null);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:BlockReaderLocalLegacy.java

示例11: fromProto

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
public static DataChecksum fromProto(ChecksumProto proto) {
  if (proto == null) return null;

  int bytesPerChecksum = proto.getBytesPerChecksum();
  DataChecksum.Type type = PBHelper.convert(proto.getType());
  return DataChecksum.newDataChecksum(type, bytesPerChecksum);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:DataTransferProtoUtil.java

示例12: createChecksum

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
private DataChecksum createChecksum(ChecksumOpt userOpt) {
  // Fill in any missing field with the default.
  ChecksumOpt myOpt = ChecksumOpt.processChecksumOpt(
      defaultChecksumOpt, userOpt);
  DataChecksum dataChecksum = DataChecksum.newDataChecksum(
      myOpt.getChecksumType(),
      myOpt.getBytesPerChecksum());
  if (dataChecksum == null) {
    throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt="
        + userOpt + ", default=" + defaultChecksumOpt
        + ", effective=null");
  }
  return dataChecksum;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:DFSClient.java

示例13: getChecksum4Compute

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/**
 * @return the object for computing checksum.
 *         The type is NULL if checksum is not computed.
 */
private static DataChecksum getChecksum4Compute(DataChecksum checksum,
    HdfsFileStatus stat) {
  if (DataStreamer.isLazyPersist(stat) && stat.getReplication() == 1) {
    // do not compute checksum for writing to single replica to memory
    return DataChecksum.newDataChecksum(Type.NULL,
        checksum.getBytesPerChecksum());
  }
  return checksum;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:14,代码来源:DFSOutputStream.java

示例14: BlockReaderLocalLegacy

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
    ExtendedBlock block, long startOffset, FileInputStream dataIn,
    Tracer tracer) throws IOException {
  this(conf, hdfsfile, block, startOffset,
      DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
      dataIn, startOffset, null, tracer);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:8,代码来源:BlockReaderLocalLegacy.java

示例15: createChecksum

import org.apache.hadoop.util.DataChecksum; //导入方法依赖的package包/类
/** create a DataChecksum with the given option. */
public DataChecksum createChecksum(ChecksumOpt userOpt) {
  // Fill in any missing field with the default.
  ChecksumOpt opt = ChecksumOpt.processChecksumOpt(
      defaultChecksumOpt, userOpt);
  DataChecksum dataChecksum = DataChecksum.newDataChecksum(
      opt.getChecksumType(),
      opt.getBytesPerChecksum());
  if (dataChecksum == null) {
    throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt="
        + userOpt + ", default=" + defaultChecksumOpt
        + ", effective=null");
  }
  return dataChecksum;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:16,代码来源:DfsClientConf.java


注:本文中的org.apache.hadoop.util.DataChecksum.newDataChecksum方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。