当前位置: 首页>>代码示例>>Java>>正文


Java PureJavaCrc32类代码示例

本文整理汇总了Java中org.apache.hadoop.util.PureJavaCrc32的典型用法代码示例。如果您正苦于以下问题:Java PureJavaCrc32类的具体用法?Java PureJavaCrc32怎么用?Java PureJavaCrc32使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


PureJavaCrc32类属于org.apache.hadoop.util包,在下文中一共展示了PureJavaCrc32类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createIndexFile

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
private static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestShuffleHandler.java

示例2: ChecksumFSOutputSummer

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      int bytesPerChecksum,
                      Progressable progress)
  throws IOException {
  super(new PureJavaCrc32(), bytesPerChecksum, 4, null);
  this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                     replication, blockSize, progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerChecksum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
                                           sumBufferSize, replication,
                                           blockSize);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerChecksum);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:ChecksumFileSystem.java

示例3: createIndexFile

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
public static void createIndexFile(File indexFile, Configuration conf)
    throws IOException {
  if (indexFile.exists()) {
    System.out.println("Deleting existing file");
    indexFile.delete();
  }
  indexFile.createNewFile();
  FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
      new Path(indexFile.getAbsolutePath()));
  Checksum crc = new PureJavaCrc32();
  crc.reset();
  CheckedOutputStream chk = new CheckedOutputStream(output, crc);
  String msg = "Writing new index file. This file will be used only " +
      "for the testing.";
  chk.write(Arrays.copyOf(msg.getBytes(),
      MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
  output.writeLong(chk.getChecksum().getValue());
  output.close();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:TestShuffleHandler.java

示例4: Reader

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
@SuppressWarnings("deprecation")
public Reader(DataInputStream in, StreamLimiter limiter,
    int logVersion) {
  this.logVersion = logVersion;
  if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = new PureJavaCrc32();
  } else {
    this.checksum = null;
  }

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:FSEditLogOp.java

示例5: ChecksumFSOutputSummer

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress)
  throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                     replication, blockSize, progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
                                           sumBufferSize, replication,
                                           blockSize);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:ChecksumFileSystem.java

示例6: ChecksumFSOutputSummer

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, 
  final EnumSet<CreateFlag> createFlag,
  final FsPermission absolutePermission, final int bufferSize,
  final short replication, final long blockSize, 
  final Progressable progress, final ChecksumOpt checksumOpt,
  final boolean createParent) throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);

  // checksumOpt is passed down to the raw fs. Unless it implements
  // checksum impelemts internally, checksumOpt will be ignored.
  // If the raw fs does checksum internally, we will end up with
  // two layers of checksumming. i.e. checksumming checksum file.
  this.datas = fs.getRawFs().createInternal(file, createFlag,
      absolutePermission, bufferSize, replication, blockSize, progress,
       checksumOpt,  createParent);
  
  // Now create the chekcsumfile; adjust the buffsize
  int bytesPerSum = fs.getBytesPerSum();
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      absolutePermission, sumBufferSize, replication, blockSize, progress,
      checksumOpt, createParent);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:ChecksumFs.java

示例7: ChecksumFSOutputSummer

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                      Path file, 
                      boolean overwrite,
                      int bufferSize,
                      short replication,
                      long blockSize,
                      Progressable progress,
                      FsPermission permission)
  throws IOException {
  super(new PureJavaCrc32(), fs.getBytesPerSum(), 4);
  int bytesPerSum = fs.getBytesPerSum();
  this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
                                     bufferSize, replication, blockSize,
                                     progress);
  int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
  this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
                                           permission, true, sumBufferSize,
                                           replication, blockSize, null);
  sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
  sums.writeInt(bytesPerSum);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:22,代码来源:ChecksumFileSystem.java

示例8: Reader

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = new PureJavaCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:31,代码来源:FSEditLogOp.java

示例9: DFSOutputStream

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
private DFSOutputStream(DFSClient dfsClient, String src, long blockSize,
    Progressable progress, int bytesPerChecksum, short replication)
    throws IOException {
  super(new PureJavaCrc32(), bytesPerChecksum, 4);
  this.dfsClient = dfsClient;
  this.conf = dfsClient.conf;
  this.src = src;
  // this.blockSize = blockSize;
  this.blockReplication = replication;
  this.progress = progress;
  if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "
        + src);
  }

  if (bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) {
    throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum
        + ") and blockSize(" + blockSize + ") do not match. "
        + "blockSize should be a " + "multiple of io.bytes.per.checksum");

  }
  checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
      bytesPerChecksum);
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:25,代码来源:DFSOutputStream.java

示例10: checkImages

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
private void checkImages(FSNamesystem fsn) throws Exception {
  Iterator<StorageDirectory> iter = fsn.
          getFSImage().dirIterator(FSImage.NameNodeDirType.IMAGE);
  List<Long> checksums = new ArrayList<Long>();
  while (iter.hasNext()) {
    StorageDirectory sd = iter.next();
    File fsImage = FSImage.getImageFile(sd, FSImage.NameNodeFile.IMAGE);
    PureJavaCrc32 crc = new PureJavaCrc32();
    FileInputStream in = new FileInputStream(fsImage);
    byte[] buff = new byte[4096];
    int read = 0;
    while ((read = in.read(buff)) != -1) {
     crc.update(buff, 0, read);
    }
    long val = crc.getValue();
    checksums.add(val);
  }
  assertTrue("Not enough fsimage copies in MiniDFSCluster " + 
             "to test parallel write", checksums.size() > 1);
  for (int i = 1; i < checksums.size(); i++) {
    assertEquals(checksums.get(i - 1), checksums.get(i));
  }
}
 
开发者ID:cumulusyebl,项目名称:cumulus,代码行数:24,代码来源:TestParallelImageWrite.java

示例11: BlockXCodingMerger

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
public BlockXCodingMerger(Block block, int namespaceId,
		DataInputStream[] childInputStreams, long offsetInBlock,
		long length, String[] childAddrs, String myAddr,
		DataTransferThrottler throttler,
		int mergerLevel) throws IOException{
	super();
	this.block = block;
	this.namespaceId = namespaceId;
	this.childInputStreams = childInputStreams;
	this.offsetInBlock = offsetInBlock;
	this.length = length;
	this.childAddrs = childAddrs;
	this.myAddr = myAddr;
	this.throttler = throttler;
	this.mergerLevel = mergerLevel;
	Configuration conf = new Configuration();
	this.packetSize = conf.getInt("raid.blockreconstruct.packetsize", 4096);
	this.bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
	this.checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
			bytesPerChecksum, new PureJavaCrc32());
	this.checksumSize = checksum.getChecksumSize();
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:23,代码来源:BlockXCodingMerger.java

示例12: outputRecords

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
public static void outputRecords(OutputStream out,
                                 boolean useAscii,
                                 Unsigned16 firstRecordNumber,
                                 Unsigned16 recordsToGenerate,
                                 Unsigned16 checksum
                                 ) throws IOException {
  byte[] row = new byte[100];
  Unsigned16 recordNumber = new Unsigned16(firstRecordNumber);
  Unsigned16 lastRecordNumber = new Unsigned16(firstRecordNumber);
  Checksum crc = new PureJavaCrc32();
  Unsigned16 tmp = new Unsigned16();
  lastRecordNumber.add(recordsToGenerate);
  Unsigned16 ONE = new Unsigned16(1);
  Unsigned16 rand = Random16.skipAhead(firstRecordNumber);
  while (!recordNumber.equals(lastRecordNumber)) {
    Random16.nextRand(rand);
    if (useAscii) {
      generateAsciiRecord(row, rand, recordNumber);
    } else {
      generateRecord(row, rand, recordNumber);
    }
    if (checksum != null) {
      crc.reset();
      crc.update(row, 0, row.length);
      tmp.set(crc.getValue());
      checksum.add(tmp);
    }
    recordNumber.add(ONE);
    out.write(row);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:GenSort.java

示例13: IFileOutputStream

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
/**
 * Create a checksum output stream that writes
 * the bytes to the given stream.
 * @param out
 */
public IFileOutputStream(OutputStream out) {
  super(out);
  sum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32,
      Integer.MAX_VALUE, new PureJavaCrc32());
  barray = new byte[sum.getChecksumSize()];
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:IFileOutputStream.java

示例14: IFileInputStream

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
/**
 * Create a checksum input stream that reads
 * @param in The input stream to be verified for checksum.
 * @param len The length of the input stream including checksum bytes.
 */
public IFileInputStream(InputStream in, long len) {
  this.in = in;
  sum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 
      Integer.MAX_VALUE, new PureJavaCrc32());
  checksumSize = sum.getChecksumSize();
  length = len;
  dataLength = length - checksumSize;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:14,代码来源:IFileInputStream.java

示例15: BlockReaderAccelerator

import org.apache.hadoop.util.PureJavaCrc32; //导入依赖的package包/类
/**
 * object constructor
 */
public BlockReaderAccelerator(
  Configuration conf,
  InetSocketAddress targetAddress,
  DatanodeInfo chosenNode, 
  int dataTransferVersion,
  int namespaceId,
  String clientName,
  Socket sock,
  String hdfsfile, 
  LocatedBlock blk,
  long startOffset, 
  long length,
  boolean verifyChecksum,
  DFSClientMetrics metrics) throws IOException {

  this.conf = conf;
  this.targetAddress = targetAddress;
  this.datanodeInfo = chosenNode;
  this.dataTransferVersion = dataTransferVersion;
  this.namespaceId = namespaceId;
  this.clientName = clientName;
  this.sock = sock;
  this.hdfsfile = hdfsfile;
  this.blk = blk;
  this.startOffset = startOffset;
  this.length = length;
  this.verifyChecksum = verifyChecksum;
  this.metrics = metrics;

   // create a checksum checker
   if (this.verifyChecksum) {
     this.checker = new PureJavaCrc32();
   }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:38,代码来源:BlockReaderAccelerator.java


注:本文中的org.apache.hadoop.util.PureJavaCrc32类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。