当前位置: 首页>>代码示例>>Java>>正文


Java DataChecksum类代码示例

本文整理汇总了Java中org.apache.hadoop.util.DataChecksum的典型用法代码示例。如果您正苦于以下问题:Java DataChecksum类的具体用法?Java DataChecksum怎么用?Java DataChecksum使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DataChecksum类属于org.apache.hadoop.util包,在下文中一共展示了DataChecksum类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getServerDefaults

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
/**
 * Return a set of server default configuration values
 * @return server default configuration values
 * @throws IOException
 * @deprecated use {@link #getServerDefaults(Path)} instead
 */
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
  Configuration conf = getConf();
  // CRC32 is chosen as default as it is available in all 
  // releases that support checksum.
  // The client trash configuration is ignored.
  return new FsServerDefaults(getDefaultBlockSize(), 
      conf.getInt("io.bytes.per.checksum", 512), 
      64 * 1024, 
      getDefaultReplication(),
      conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT),
      false,
      FS_TRASH_INTERVAL_DEFAULT,
      DataChecksum.Type.CRC32);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:22,代码来源:FileSystem.java

示例2: ChecksumFSOutputSummer

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress,
                      FsPermission permission)
  throws IOException {
  super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
      fs.getBytesPerSum()));
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
                                     bufferSize, replication, blockSize,
                                     progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
                                           permission, true, sumBufferSize,
                                           replication, blockSize, null);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:23,代码来源:ChecksumFileSystem.java

示例3: IFileInputStream

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
/**
 * Create a checksum input stream that reads
 * @param in The input stream to be verified for checksum.
 * @param len The length of the input stream including checksum bytes.
 */
public IFileInputStream(InputStream in, long len, Configuration conf) {
  this.in = in;
  this.inFd = getFileDescriptorIfAvail(in);
  sum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 
      Integer.MAX_VALUE);
  checksumSize = sum.getChecksumSize();
  length = len;
  dataLength = length - checksumSize;

  conf = (conf != null) ? conf : new Configuration();
  readahead = conf.getBoolean(MRConfig.MAPRED_IFILE_READAHEAD,
      MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD);
  readaheadLength = conf.getInt(MRConfig.MAPRED_IFILE_READAHEAD_BYTES,
      MRConfig.DEFAULT_MAPRED_IFILE_READAHEAD_BYTES);

  doReadahead();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:IFileInputStream.java

示例4: DFSOutputStream

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:DFSOutputStream.java

示例5: newStreamForAppend

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, int bufferSize, Progressable progress,
    LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
    String[] favoredNodes) throws IOException {
  TraceScope scope =
      dfsClient.getPathTraceScope("newStreamForAppend", src);
  try {
    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
        progress, lastBlock, stat, checksum);
    if (favoredNodes != null && favoredNodes.length != 0) {
      out.streamer.setFavoredNodes(favoredNodes);
    }
    out.start();
    return out;
  } finally {
    scope.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DFSOutputStream.java

示例6: RemoteBlockReader2

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
protected RemoteBlockReader2(String file, String bpid, long blockId,
    DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache) {
  this.isLocal = DFSClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  // Path is used only for printing block and file information in debug
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = peer.getInputStreamChannel();
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.filename = file;
  this.peerCache = peerCache;
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:RemoteBlockReader2.java

示例7: primitiveCreate

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
/**
 * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
 *  Progressable, int, ChecksumOpt)} except that the permission
 *  is absolute (ie has already been masked with umask.
 */
public DFSOutputStream primitiveCreate(String src, 
                           FsPermission absPermission,
                           EnumSet<CreateFlag> flag,
                           boolean createParent,
                           short replication,
                           long blockSize,
                           Progressable progress,
                           int buffersize,
                           ChecksumOpt checksumOpt)
    throws IOException, UnresolvedLinkException {
  checkOpen();
  CreateFlag.validate(flag);
  DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
  if (result == null) {
    DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
    result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
        flag, createParent, replication, blockSize, progress, buffersize,
        checksum, null);
  }
  beginFileLease(result.getFileId(), result);
  return result;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:DFSClient.java

示例8: Reader

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = DataChecksum.newCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:FSEditLogOp.java

示例9: preadHeader

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
/**
 * Read the header without changing the position of the FileChannel.
 *
 * @param fc The FileChannel to read.
 * @return the Metadata Header.
 * @throws IOException on error.
 */
public static BlockMetadataHeader preadHeader(FileChannel fc)
    throws IOException {
  final byte arr[] = new byte[getHeaderSize()];
  ByteBuffer buf = ByteBuffer.wrap(arr);

  while (buf.hasRemaining()) {
    if (fc.read(buf, 0) <= 0) {
      throw new EOFException("unexpected EOF while reading " +
          "metadata file header");
    }
  }
  short version = (short)((arr[0] << 8) | (arr[1] & 0xff));
  DataChecksum dataChecksum = DataChecksum.newDataChecksum(arr, 2);
  return new BlockMetadataHeader(version, dataChecksum);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:BlockMetadataHeader.java

示例10: TestFileDescriptorPair

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
public TestFileDescriptorPair() throws IOException {
  fis = new FileInputStream[2];
  for (int i = 0; i < 2; i++) {
    String name = dir.getDir() + "/file" + i;
    FileOutputStream fos = new FileOutputStream(name);
    if (i == 0) {
      // write 'data' file
      fos.write(1);
    } else {
      // write 'metadata' file
      BlockMetadataHeader header =
          new BlockMetadataHeader((short)1,
              DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4));
      DataOutputStream dos = new DataOutputStream(fos);
      BlockMetadataHeader.writeHeader(dos, header);
      dos.close();
    }
    fos.close();
    fis[i] = new FileInputStream(name);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestShortCircuitCache.java

示例11: getServerDefaults

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
/**
 * Return a set of server default configuration values
 * @return server default configuration values
 * @throws IOException
 * @deprecated use {@link #getServerDefaults(Path)} instead
 */
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
  Configuration conf = getConf();
  // CRC32 is chosen as default as it is available in all 
  // releases that support checksum.
  // The client trash configuration is ignored.
  return new FsServerDefaults(getDefaultBlockSize(), 
      conf.getInt("io.bytes.per.checksum", 512), 
      64 * 1024, 
      getDefaultReplication(),
      conf.getInt("io.file.buffer.size", 4096),
      false,
      CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT,
      DataChecksum.Type.CRC32);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:FileSystem.java

示例12: generateChecksums

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
/**
 * Generates a checksum for all the data in indata. The checksum is
 * written to outdata.
 * @param indata input data stream
 * @param startOffset starting offset in the indata stream from where to
 *                    compute checkums from
 * @param endOffset ending offset in the indata stream upto
 *                   which checksums needs to be computed
 * @param outdata the output buffer where checksum values are written
 * @param outOffset the starting offset in the outdata where the
 *                  checksum values are written
 * @param checksumType type of checksum
 * @param bytesPerChecksum number of bytes per checksum value
 */
static void generateChecksums(byte[] indata, int startOffset, int endOffset,
  byte[] outdata, int outOffset, ChecksumType checksumType,
  int bytesPerChecksum) throws IOException {

  if (checksumType == ChecksumType.NULL) {
    return; // No checksum for this block.
  }

  DataChecksum checksum = DataChecksum.newDataChecksum(
      checksumType.getDataChecksumType(), bytesPerChecksum);

  checksum.calculateChunkedSums(
     ByteBuffer.wrap(indata, startOffset, endOffset - startOffset),
     ByteBuffer.wrap(outdata, outOffset, outdata.length - outOffset));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:ChecksumUtil.java

示例13: newStreamForAppend

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes)
    throws IOException {
  if(stat.getErasureCodingPolicy() != null) {
    throw new IOException(
        "Not support appending to a striping layout file yet.");
  }
  try (TraceScope ignored =
           dfsClient.newPathTraceScope("newStreamForAppend", src)) {
    final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
        progress, lastBlock, stat, checksum, favoredNodes);
    out.start();
    return out;
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:DFSOutputStream.java

示例14: RemoteBlockReader2

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
protected RemoteBlockReader2(String file, long blockId,
    DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
  this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  // Path is used only for printing block and file information in debug
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = peer.getInputStreamChannel();
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.filename = file;
  this.peerCache = peerCache;
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
  this.tracer = tracer;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:RemoteBlockReader2.java

示例15: testGetMetaData

import org.apache.hadoop.util.DataChecksum; //导入依赖的package包/类
@Test
public void testGetMetaData() throws IOException {
  final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
  ExtendedBlock b = new ExtendedBlock(bpid, FIRST_BLK_ID, 5, 0);
  try {
    assertTrue(fsdataset.getMetaDataInputStream(b) == null);
    assertTrue("Expected an IO exception", false);
  } catch (IOException e) {
    // ok - as expected
  }
  addSomeBlocks(fsdataset); // Only need to add one but ....
  b = new ExtendedBlock(bpid, FIRST_BLK_ID, 0, 0);
  InputStream metaInput = fsdataset.getMetaDataInputStream(b);
  DataInputStream metaDataInput = new DataInputStream(metaInput);
  short version = metaDataInput.readShort();
  assertEquals(BlockMetadataHeader.VERSION, version);
  DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
  assertEquals(DataChecksum.Type.NULL, checksum.getChecksumType());
  assertEquals(0, checksum.getChecksumSize());  
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:21,代码来源:TestSimulatedFSDataset.java


注:本文中的org.apache.hadoop.util.DataChecksum类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。