当前位置: 首页>>代码示例>>Java>>正文


Java FSDataOutputStream.getPos方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.getPos方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.getPos方法的具体用法?Java FSDataOutputStream.getPos怎么用?Java FSDataOutputStream.getPos使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataOutputStream的用法示例。


在下文中一共展示了FSDataOutputStream.getPos方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: WBlockState

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:27,代码来源:BCFile.java

示例2: writeTrailer

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public static void writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker)
    throws IOException {
  long offset = stream.getPos();

  // Write EOF Entry
  ProcedureWALEntry.newBuilder()
    .setType(ProcedureWALEntry.Type.EOF)
    .build().writeDelimitedTo(stream);

  // Write Tracker
  tracker.writeTo(stream);

  stream.write(TRAILER_VERSION);
  StreamUtils.writeLong(stream, TRAILER_MAGIC);
  StreamUtils.writeLong(stream, offset);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:ProcedureWALFormat.java

示例3: copyPartitions

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void copyPartitions(Path mapOutputPath, Path indexPath)
  throws IOException {
  FileSystem localFs = FileSystem.getLocal(jobConf);
  FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
  FSDataOutputStream rawOutput = rfs.create(mapOutputPath, true, BUF_SIZE);
  SpillRecord spillRecord = new SpillRecord(numberOfPartitions);
  IndexRecord indexRecord = new IndexRecord();
  for (int i = 0; i < numberOfPartitions; i++) {
    indexRecord.startOffset = rawOutput.getPos();
    byte buffer[] = outStreams[i].toByteArray();
    IFileOutputStream checksumOutput = new IFileOutputStream(rawOutput);
    checksumOutput.write(buffer);
    // Write checksum.
    checksumOutput.finish();
    // Write index record
    indexRecord.rawLength = (long)buffer.length;
    indexRecord.partLength = rawOutput.getPos() - indexRecord.startOffset;
    spillRecord.putIndex(indexRecord, i);
    reporter.progress();
  }
  rawOutput.close();
  spillRecord.writeToFile(indexPath, jobConf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestMerge.java

示例4: writeFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
  long blocksBefore = stm.getPos() / BLOCK_SIZE;
  
  TestFileCreation.writeFile(stm, BLOCK_SIZE);
  // need to make sure the full block is completely flushed to the DataNodes
  // (see FSOutputSummer#flush)
  stm.flush();
  int blocksAfter = 0;
  // wait until the block is allocated by DataStreamer
  BlockLocation[] locatedBlocks;
  while(blocksAfter <= blocksBefore) {
    locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
        file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
    blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestBlockUnderConstruction.java

示例5: writeFileInfo

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private long writeFileInfo(FSDataOutputStream o) throws IOException {
  if (this.lastKeyBuffer != null) {
    // Make a copy.  The copy is stuffed into HMapWritable.  Needs a clean
    // byte buffer.  Won't take a tuple.
    byte [] b = new byte[this.lastKeyLength];
    System.arraycopy(this.lastKeyBuffer, this.lastKeyOffset, b, 0,
      this.lastKeyLength);
    appendFileInfo(this.fileinfo, FileInfo.LASTKEY, b, false);
  }
  int avgKeyLen = this.entryCount == 0? 0:
    (int)(this.keylength/this.entryCount);
  appendFileInfo(this.fileinfo, FileInfo.AVG_KEY_LEN,
    Bytes.toBytes(avgKeyLen), false);
  int avgValueLen = this.entryCount == 0? 0:
    (int)(this.valuelength/this.entryCount);
  appendFileInfo(this.fileinfo, FileInfo.AVG_VALUE_LEN,
    Bytes.toBytes(avgValueLen), false);
  appendFileInfo(this.fileinfo, FileInfo.COMPARATOR,
    Bytes.toBytes(this.comparator.getClass().getName()), false);
  long pos = o.getPos();
  this.fileinfo.write(o);
  return pos;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:IndexFile.java

示例6: writeIntermediateBlock

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writeIntermediateBlock(FSDataOutputStream out,
    BlockIndexChunk parent, BlockIndexChunk curChunk) throws IOException {
  long beginOffset = out.getPos();
  DataOutputStream dos = blockWriter.startWriting(
      BlockType.INTERMEDIATE_INDEX);
  curChunk.writeNonRoot(dos);
  byte[] curFirstKey = curChunk.getBlockKey(0);
  blockWriter.writeHeaderAndData(out);

  if (cacheConf != null) {
    HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf);
    cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching,
      beginOffset), blockForCaching);
  }

  // Add intermediate index block size
  totalBlockOnDiskSize += blockWriter.getOnDiskSizeWithoutHeader();
  totalBlockUncompressedSize +=
      blockWriter.getUncompressedSizeWithoutHeader();

  // OFFSET is the beginning offset the chunk of block index entries.
  // SIZE is the total byte size of the chunk of block index entries
  // + the secondary index size
  // FIRST_KEY is the first key in the chunk of block index
  // entries.
  parent.add(curFirstKey, beginOffset,
      blockWriter.getOnDiskSizeWithHeader());

  // clear current block index chunk
  curChunk.clear();
  curFirstKey = null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:HFileBlockIndex.java

示例7: savePartitions

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void savePartitions(Path matrixPath, FileSystem fs,
  List<Integer> partitionIds, int startPos, int endPos,
  PSModelFilesMeta serverMatrixMeta) throws IOException {

  Path destFile = new Path(matrixPath, ModelFilesUtils.fileName(context.getPs().getServerId(), partitionIds.get(startPos)));
  Path tmpDestFile = HdfsUtil.toTmpPath(destFile);

  FSDataOutputStream out = fs.create(tmpDestFile);
  long streamPos = 0;
  ServerPartition partition = null;
  for (int i = startPos; i < endPos; i++) {
    LOG.info("Write partition " + partitionIds.get(i) + " of matrix " + matrixName + " to "
      + tmpDestFile);
    streamPos = out.getPos();
    partition = partitionMaps.get(partitionIds.get(i));
    PartitionKey partKey = partition.getPartitionKey();
    ModelPartitionMeta partMeta = new ModelPartitionMeta(partKey.getPartitionId(), partKey.getStartRow(),
      partKey.getEndRow(), partKey.getStartCol(), partKey.getEndCol(), partition.elementNum(),
      destFile.getName(), streamPos, 0);
    partition.save(out, partMeta);
    partMeta.setLength(out.getPos() - streamPos);
    serverMatrixMeta.addPartitionMeta(partitionIds.get(i), partMeta);
  }
  out.flush();
  out.close();
  HdfsUtil.rename(tmpDestFile, destFile, fs);
}
 
开发者ID:Tencent,项目名称:angel,代码行数:28,代码来源:ServerMatrix.java

示例8: Writer

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Constructor
 * 
 * @param fout
 *          FS output stream.
 * @param compressionName
 *          Name of the compression algorithm, which will be used for all
 *          data blocks.
 * @throws IOException
 * @see Compression#getSupportedAlgorithms
 */
public Writer(FSDataOutputStream fout, String compressionName,
    Configuration conf) throws IOException {
  if (fout.getPos() != 0) {
    throw new IOException("Output file not at zero offset.");
  }

  this.out = fout;
  this.conf = conf;
  dataIndex = new DataIndex(compressionName);
  metaIndex = new MetaIndex();
  fsOutputBuffer = new BytesWritable();
  Magic.write(fout);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:25,代码来源:BCFile.java

示例9: writeInlineBlocks

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writeInlineBlocks(HFileBlock.Writer hbw,
    FSDataOutputStream outputStream, HFileBlockIndex.BlockIndexWriter biw,
    boolean isClosing) throws IOException {
  while (biw.shouldWriteBlock(isClosing)) {
    long offset = outputStream.getPos();
    biw.writeInlineBlock(hbw.startWriting(biw.getInlineBlockType()));
    hbw.writeHeaderAndData(outputStream);
    biw.blockWritten(offset, hbw.getOnDiskSizeWithHeader(),
        hbw.getUncompressedSizeWithoutHeader());
    LOG.info("Wrote an inline index block at " + offset + ", size " +
        hbw.getOnDiskSizeWithHeader());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestHFileBlockIndex.java

示例10: writeNewSplits

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
private static <T extends InputSplit> 
SplitMetaInfo[] writeNewSplits(Configuration conf, 
    T[] array, FSDataOutputStream out)
throws IOException, InterruptedException {

  SplitMetaInfo[] info = new SplitMetaInfo[array.length];
  if (array.length != 0) {
    SerializationFactory factory = new SerializationFactory(conf);
    int i = 0;
    int maxBlockLocations = conf.getInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,
        MRConfig.MAX_BLOCK_LOCATIONS_DEFAULT);
    long offset = out.getPos();
    for(T split: array) {
      long prevCount = out.getPos();
      Text.writeString(out, split.getClass().getName());
      Serializer<T> serializer = 
        factory.getSerializer((Class<T>) split.getClass());
      serializer.open(out);
      serializer.serialize(split);
      long currCount = out.getPos();
      String[] locations = split.getLocations();
      if (locations.length > maxBlockLocations) {
        LOG.warn("Max block location exceeded for split: "
            + split + " splitsize: " + locations.length +
            " maxsize: " + maxBlockLocations);
        locations = Arrays.copyOf(locations, maxBlockLocations);
      }
      info[i++] = 
        new JobSplit.SplitMetaInfo( 
            locations, offset,
            split.getLength());
      offset += currCount - prevCount;
    }
  }
  return info;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:38,代码来源:JobSplitWriter.java

示例11: writeHeaderAndData

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Similar to {@link #writeHeaderAndData(FSDataOutputStream)}, but records
 * the offset of this block so that it can be referenced in the next block
 * of the same type.
 *
 * @param out
 * @throws IOException
 */
public void writeHeaderAndData(FSDataOutputStream out) throws IOException {
  long offset = out.getPos();
  if (startOffset != -1 && offset != startOffset) {
    throw new IOException("A " + blockType + " block written to a "
        + "stream twice, first at offset " + startOffset + ", then at "
        + offset);
  }
  startOffset = offset;

  finishBlockAndWriteHeaderAndData((DataOutputStream) out);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:HFileBlock.java

示例12: writeToStream

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public void writeToStream(FSDataOutputStream stream) throws IOException {
  Stopwatch watch = Stopwatch.createStarted();
  ByteBuf buf = null;
  try {
    check = ThreadLocalRandom.current().nextLong();
    start = stream.getPos();
    logger.debug("Writing check value {} at position {}", check, start);
    stream.writeLong(check);
    batch.getHeader().writeDelimitedTo(stream);
    buf = batch.getBody();
    if (buf != null) {
      bodyLength = buf.capacity();
    } else {
      bodyLength = 0;
    }
    if (bodyLength > 0) {
      buf.getBytes(0, stream, bodyLength);
    }
    stream.hsync();
    FileStatus status = spillFile.getFileStatus();
    long len = status.getLen();
    logger.debug("After spooling batch, stream at position {}. File length {}", stream.getPos(), len);
    long t = watch.elapsed(TimeUnit.MICROSECONDS);
    logger.debug("Took {} us to spool {} to disk. Rate {} mb/s", t, bodyLength, bodyLength / t);
  } finally {
    // even if the try block throws an exception we still want to send an ACK and release the lock
    // the caller will add the exception to deferred attribute and it will be thrown when the poll() method is called
    try {
      batch.sendOk(); // this can also throw an exception
    } finally {
      state = BatchState.SPILLED;
      batch = null;
      if (buf != null) {
        buf.release();
      }
    }
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:39,代码来源:SpoolingRawBatchBuffer.java

示例13: CryptoFSDataOutputStream

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
    int bufferSize, byte[] key, byte[] iv) throws IOException {
  super(new CryptoOutputStream(out, codec, bufferSize, key, iv, 
      out.getPos()), null, out.getPos()); 
  this.fsOut = out;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:7,代码来源:CryptoFSDataOutputStream.java

示例14: writeWholeIndex

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writeWholeIndex(boolean useTags) throws IOException {
  assertEquals(0, keys.size());
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(true)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTags)
                      .withCompression(compr)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null,
      meta);
  FSDataOutputStream outputStream = fs.create(path);
  HFileBlockIndex.BlockIndexWriter biw =
      new HFileBlockIndex.BlockIndexWriter(hbw, null, null);

  for (int i = 0; i < NUM_DATA_BLOCKS; ++i) {
    hbw.startWriting(BlockType.DATA).write(String.valueOf(rand.nextInt(1000)).getBytes());
    long blockOffset = outputStream.getPos();
    hbw.writeHeaderAndData(outputStream);

    byte[] firstKey = null;
    byte[] family = Bytes.toBytes("f");
    byte[] qualifier = Bytes.toBytes("q");
    for (int j = 0; j < 16; ++j) {
      byte[] k =
          new KeyValue(TestHFileWriterV2.randomOrderedKey(rand, i * 16 + j), family, qualifier,
              EnvironmentEdgeManager.currentTime(), KeyValue.Type.Put).getKey();
      keys.add(k);
      if (j == 8) {
        firstKey = k;
      }
    }
    assertTrue(firstKey != null);
    if (firstKeyInFile == null) {
      firstKeyInFile = firstKey;
    }
    biw.addEntry(firstKey, blockOffset, hbw.getOnDiskSizeWithHeader());

    writeInlineBlocks(hbw, outputStream, biw, false);
  }
  writeInlineBlocks(hbw, outputStream, biw, true);
  rootIndexOffset = biw.writeIndexBlocks(outputStream);
  outputStream.close();

  numLevels = biw.getNumLevels();
  numRootEntries = biw.getNumRootEntries();

  LOG.info("Index written: numLevels=" + numLevels + ", numRootEntries=" +
      numRootEntries + ", rootIndexOffset=" + rootIndexOffset);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:51,代码来源:TestHFileBlockIndex.java

示例15: writeBlocks

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private long writeBlocks(Random rand, Compression.Algorithm compressAlgo,
    Path path, List<Long> expectedOffsets, List<Long> expectedPrevOffsets,
    List<BlockType> expectedTypes, List<ByteBuffer> expectedContents
) throws IOException {
  boolean cacheOnWrite = expectedContents != null;
  FSDataOutputStream os = fs.create(path);
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(true)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTag)
                      .withCompression(compressAlgo)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  Map<BlockType, Long> prevOffsetByType = new HashMap<BlockType, Long>();
  long totalSize = 0;
  for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
    long pos = os.getPos();
    int blockTypeOrdinal = rand.nextInt(BlockType.values().length);
    if (blockTypeOrdinal == BlockType.ENCODED_DATA.ordinal()) {
      blockTypeOrdinal = BlockType.DATA.ordinal();
    }
    BlockType bt = BlockType.values()[blockTypeOrdinal];
    DataOutputStream dos = hbw.startWriting(bt);
    int size = rand.nextInt(500);
    for (int j = 0; j < size; ++j) {
      // This might compress well.
      dos.writeShort(i + 1);
      dos.writeInt(j + 1);
    }

    if (expectedOffsets != null)
      expectedOffsets.add(os.getPos());

    if (expectedPrevOffsets != null) {
      Long prevOffset = prevOffsetByType.get(bt);
      expectedPrevOffsets.add(prevOffset != null ? prevOffset : -1);
      prevOffsetByType.put(bt, os.getPos());
    }

    expectedTypes.add(bt);

    hbw.writeHeaderAndData(os);
    totalSize += hbw.getOnDiskSizeWithHeader();

    if (cacheOnWrite)
      expectedContents.add(hbw.getUncompressedBufferWithHeader());

    if (detailedLogging) {
      LOG.info("Written block #" + i + " of type " + bt
          + ", uncompressed size " + hbw.getUncompressedSizeWithoutHeader()
          + ", packed size " + hbw.getOnDiskSizeWithoutHeader()
          + " at offset " + pos);
    }
  }
  os.close();
  LOG.info("Created a temporary file at " + path + ", "
      + fs.getFileStatus(path).getLen() + " byte, compression=" +
      compressAlgo);
  return totalSize;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:62,代码来源:TestHFileBlock.java


注:本文中的org.apache.hadoop.fs.FSDataOutputStream.getPos方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。