当前位置: 首页>>代码示例>>Java>>正文


Java ByteBufferUtils.putLong方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.ByteBufferUtils.putLong方法的典型用法代码示例。如果您正苦于以下问题:Java ByteBufferUtils.putLong方法的具体用法?Java ByteBufferUtils.putLong怎么用?Java ByteBufferUtils.putLong使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.ByteBufferUtils的用法示例。


在下文中一共展示了ByteBufferUtils.putLong方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createCellReference

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private int createCellReference(ByteBufferKeyValue cell, ByteBuffer idxBuffer, int idxOffset) {
  int offset = idxOffset;
  int dataChunkID = cell.getChunkId();

  offset = ByteBufferUtils.putInt(idxBuffer, offset, dataChunkID);    // write data chunk id
  offset = ByteBufferUtils.putInt(idxBuffer, offset, cell.getOffset());          // offset
  offset = ByteBufferUtils.putInt(idxBuffer, offset, KeyValueUtil.length(cell)); // length
  offset = ByteBufferUtils.putLong(idxBuffer, offset, cell.getSequenceId());     // seqId

  return offset;
}
 
开发者ID:apache,项目名称:hbase,代码行数:12,代码来源:CellChunkImmutableSegment.java

示例2: appendKeyTo

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
public static int appendKeyTo(Cell cell, ByteBuffer buf, int offset) {
  offset = ByteBufferUtils.putShort(buf, offset, cell.getRowLength());// RK length
  offset = CellUtil.copyRowTo(cell, buf, offset);// Row bytes
  offset = ByteBufferUtils.putByte(buf, offset, cell.getFamilyLength());// CF length
  offset = CellUtil.copyFamilyTo(cell, buf, offset);// CF bytes
  offset = CellUtil.copyQualifierTo(cell, buf, offset);// Qualifier bytes
  offset = ByteBufferUtils.putLong(buf, offset, cell.getTimestamp());// TS
  offset = ByteBufferUtils.putByte(buf, offset, cell.getTypeByte());// Type
  return offset;
}
 
开发者ID:apache,项目名称:hbase,代码行数:11,代码来源:KeyValueUtil.java

示例3: setUpCellChunkMap

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private CellChunkMap setUpCellChunkMap(boolean asc) {

    // allocate new chunks and use the data chunk to hold the full data of the cells
    // and the index chunk to hold the cell-representations
    Chunk dataChunk = chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP);
    Chunk idxChunk  = chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP);
    // the array of index chunks to be used as a basis for CellChunkMap
    Chunk chunkArray[] = new Chunk[8];  // according to test currently written 8 is way enough
    int chunkArrayIdx = 0;
    chunkArray[chunkArrayIdx++] = idxChunk;

    ByteBuffer idxBuffer = idxChunk.getData();  // the buffers of the chunks
    ByteBuffer dataBuffer = dataChunk.getData();
    int dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER;        // offset inside data buffer
    int idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER;         // skip the space for chunk ID

    Cell[] cellArray = asc ? ascCells : descCells;

    for (Cell kv: cellArray) {
      // do we have enough space to write the cell data on the data chunk?
      if (dataOffset + KeyValueUtil.length(kv) > chunkCreator.getChunkSize()) {
        // allocate more data chunks if needed
        dataChunk = chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP);
        dataBuffer = dataChunk.getData();
        dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER;
      }
      int dataStartOfset = dataOffset;
      dataOffset = KeyValueUtil.appendTo(kv, dataBuffer, dataOffset, false); // write deep cell data

      // do we have enough space to write the cell-representation on the index chunk?
      if (idxOffset + ClassSize.CELL_CHUNK_MAP_ENTRY > chunkCreator.getChunkSize()) {
        // allocate more index chunks if needed
        idxChunk = chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP);
        idxBuffer = idxChunk.getData();
        idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER;
        chunkArray[chunkArrayIdx++] = idxChunk;
      }
      idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataChunk.getId()); // write data chunk id
      idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataStartOfset);          // offset
      idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, KeyValueUtil.length(kv)); // length
      idxOffset = ByteBufferUtils.putLong(idxBuffer, idxOffset, kv.getSequenceId());     // seqId
    }

    return new CellChunkMap(CellComparator.getInstance(),chunkArray,0,NUM_OF_CELLS,!asc);
  }
 
开发者ID:apache,项目名称:hbase,代码行数:46,代码来源:TestCellFlatSet.java

示例4: setUpJumboCellChunkMap

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
private CellChunkMap setUpJumboCellChunkMap(boolean asc) {
  int smallChunkSize = SMALL_CHUNK_SIZE+8;
  // allocate new chunks and use the data JUMBO chunk to hold the full data of the cells
  // and the normal index chunk to hold the cell-representations
  Chunk dataJumboChunk =
      chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP, smallChunkSize);
  Chunk idxChunk  = chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP);
  // the array of index chunks to be used as a basis for CellChunkMap
  Chunk[] chunkArray = new Chunk[8];  // according to test currently written 8 is way enough
  int chunkArrayIdx = 0;
  chunkArray[chunkArrayIdx++] = idxChunk;

  ByteBuffer idxBuffer = idxChunk.getData();  // the buffers of the chunks
  ByteBuffer dataBuffer = dataJumboChunk.getData();
  int dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER;          // offset inside data buffer
  int idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER;           // skip the space for chunk ID

  Cell[] cellArray = asc ? ascCells : descCells;

  for (Cell kv: cellArray) {
    int dataStartOfset = dataOffset;
    dataOffset = KeyValueUtil.appendTo(kv, dataBuffer, dataOffset, false); // write deep cell data

    // do we have enough space to write the cell-representation on the index chunk?
    if (idxOffset + ClassSize.CELL_CHUNK_MAP_ENTRY > chunkCreator.getChunkSize()) {
      // allocate more index chunks if needed
      idxChunk = chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP);
      idxBuffer = idxChunk.getData();
      idxOffset = ChunkCreator.SIZEOF_CHUNK_HEADER;
      chunkArray[chunkArrayIdx++] = idxChunk;
    }
    // write data chunk id
    idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataJumboChunk.getId());
    idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, dataStartOfset);          // offset
    idxOffset = ByteBufferUtils.putInt(idxBuffer, idxOffset, KeyValueUtil.length(kv)); // length
    idxOffset = ByteBufferUtils.putLong(idxBuffer, idxOffset, kv.getSequenceId());     // seqId

    // Jumbo chunks are working only with one cell per chunk, thus always allocate a new jumbo
    // data chunk for next cell
    dataJumboChunk = chunkCreator.getChunk(CompactingMemStore.IndexType.CHUNK_MAP,smallChunkSize);
    dataBuffer = dataJumboChunk.getData();
    dataOffset = ChunkCreator.SIZEOF_CHUNK_HEADER;
  }

  return new CellChunkMap(CellComparator.getInstance(),chunkArray,0,NUM_OF_CELLS,!asc);
}
 
开发者ID:apache,项目名称:hbase,代码行数:47,代码来源:TestCellFlatSet.java

示例5: putLong

import org.apache.hadoop.hbase.util.ByteBufferUtils; //导入方法依赖的package包/类
@Override
public SingleByteBuff putLong(long value) {
  ByteBufferUtils.putLong(this.buf, value);
  return this;
}
 
开发者ID:apache,项目名称:hbase,代码行数:6,代码来源:SingleByteBuff.java


注:本文中的org.apache.hadoop.hbase.util.ByteBufferUtils.putLong方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。