当前位置: 首页>>代码示例>>Java>>正文


Java ClassSize类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.ClassSize的典型用法代码示例。如果您正苦于以下问题:Java ClassSize类的具体用法?Java ClassSize怎么用?Java ClassSize使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ClassSize类属于org.apache.hadoop.hbase.util包,在下文中一共展示了ClassSize类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: heapSize

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
public long heapSize() {
  long heapsize = ClassSize.align(ClassSize.OBJECT +
      2 * Bytes.SIZEOF_INT + (3 + 1) * ClassSize.REFERENCE);
  //Calculating the size of blockKeys
  if(blockKeys != null) {
    //Adding array + references overhead
    heapsize += ClassSize.align(ClassSize.ARRAY +
        blockKeys.length * ClassSize.REFERENCE);
    //Adding bytes
    for(byte [] bs : blockKeys) {
      heapsize += ClassSize.align(ClassSize.ARRAY + bs.length);
    }
  }
  if(blockOffsets != null) {
    heapsize += ClassSize.align(ClassSize.ARRAY +
        blockOffsets.length * Bytes.SIZEOF_LONG);
  }
  if(blockDataSizes != null) {
    heapsize += ClassSize.align(ClassSize.ARRAY +
        blockDataSizes.length * Bytes.SIZEOF_INT);
  }

  return ClassSize.align(heapsize);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:IndexFile.java

示例2: LruHashMap

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
/**
 * Constructs a new, empty map with the specified initial capacity,
 * load factor, and maximum memory usage.
 *
 * @param initialCapacity the initial capacity
 * @param loadFactor the load factor
 * @param maxMemUsage the maximum total memory usage
 * @throws IllegalArgumentException if the initial capacity is less than one
 * @throws IllegalArgumentException if the initial capacity is greater than
 * the maximum capacity
 * @throws IllegalArgumentException if the load factor is <= 0
 * @throws IllegalArgumentException if the max memory usage is too small
 * to support the base overhead
 */
public LruHashMap(int initialCapacity, float loadFactor,
long maxMemUsage) {
  if (initialCapacity < 1) {
    throw new IllegalArgumentException("Initial capacity must be > 0");
  }
  if (initialCapacity > MAXIMUM_CAPACITY) {
    throw new IllegalArgumentException("Initial capacity is too large");
  }
  if (loadFactor <= 0 || Float.isNaN(loadFactor)) {
    throw new IllegalArgumentException("Load factor must be > 0");
  }
  if (maxMemUsage <= (OVERHEAD + initialCapacity * ClassSize.REFERENCE)) {
    throw new IllegalArgumentException("Max memory usage too small to " +
    "support base overhead");
  }

  /** Find a power of 2 >= initialCapacity */
  int capacity = calculateCapacity(initialCapacity);
  this.loadFactor = loadFactor;
  this.threshold = calculateThreshold(capacity,loadFactor);
  this.entries = new Entry[capacity];
  this.memFree = maxMemUsage;
  this.memTotal = maxMemUsage;
  init();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:LruHashMap.java

示例3: LruCachedBlock

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime,
    boolean inMemory) {
  this.cacheKey = cacheKey;
  this.buf = buf;
  this.accessTime = accessTime;
  // We approximate the size of this class by the size of its name string
  // plus the size of its byte buffer plus the overhead associated with all
  // the base classes. We also include the base class
  // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with
  // their buffer lengths. This variable is used elsewhere in unit tests.
  this.size = ClassSize.align(cacheKey.heapSize())
      + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD;
  if(inMemory) {
    this.priority = BlockPriority.MEMORY;
  } else {
    this.priority = BlockPriority.SINGLE;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:LruCachedBlock.java

示例4: heapSize

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
@Override
public long heapSize() {
  long size = ClassSize.align(
      ClassSize.OBJECT +
      // Block type, byte buffer and meta references
      3 * ClassSize.REFERENCE +
      // On-disk size, uncompressed size, and next block's on-disk size
      // bytePerChecksum and onDiskDataSize
      4 * Bytes.SIZEOF_INT +
      // This and previous block offset
      2 * Bytes.SIZEOF_LONG +
      // Heap size of the meta object. meta will be always not null.
      fileContext.heapSize()
  );

  if (buf != null) {
    // Deep overhead of the byte buffer. Needs to be aligned separately.
    size += ClassSize.align(buf.capacity() + BYTE_BUFFER_HEAP_SIZE);
  }

  return ClassSize.align(size);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:HFileBlock.java

示例5: testHeapSizeForBlockIndex

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
/** Checks if the HeapSize calculator is within reason */
@Test
public void testHeapSizeForBlockIndex() throws IOException {
  Class<HFileBlockIndex.BlockIndexReader> cl =
      HFileBlockIndex.BlockIndexReader.class;
  long expected = ClassSize.estimateBase(cl, false);

  HFileBlockIndex.BlockIndexReader bi =
      new HFileBlockIndex.BlockIndexReader(KeyValue.RAW_COMPARATOR, 1);
  long actual = bi.heapSize();

  // Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
  // int [] blockDataSizes) are all null they are not going to show up in the
  // HeapSize calculation, so need to remove those array costs from expected.
  expected -= ClassSize.align(3 * ClassSize.ARRAY);

  if (expected != actual) {
    ClassSize.estimateBase(cl, true);
    assertEquals(expected, actual);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestHFileBlockIndex.java

示例6: getSizedCellScanner

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
static CellScanner getSizedCellScanner(final Cell [] cells) {
  int size = -1;
  for (Cell cell: cells) {
    size += CellUtil.estimatedSerializedSizeOf(cell);
  }
  final int totalSize = ClassSize.align(size);
  final CellScanner cellScanner = CellUtil.createCellScanner(cells);
  return new SizedCellScanner() {
    @Override
    public long heapSize() {
      return totalSize;
    }

    @Override
    public Cell current() {
      return cellScanner.current();
    }

    @Override
    public boolean advance() throws IOException {
      return cellScanner.advance();
    }
  };
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestIPCUtil.java

示例7: heapSize

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
/**
 * HeapSize implementation
 *
 * We do not count the bytes in the rowCache because it should be empty for a KeyValue in the
 * MemStore.
 */
@Override
public long heapSize() {
  int sum = 0;
  sum += ClassSize.OBJECT;// the KeyValue object itself
  sum += ClassSize.REFERENCE;// pointer to "bytes"
  sum += ClassSize.align(ClassSize.ARRAY);// "bytes"
  sum += ClassSize.align(length);// number of bytes of data in the "bytes" array
  sum += 2 * Bytes.SIZEOF_INT;// offset, length
  sum += Bytes.SIZEOF_LONG;// memstoreTS
  return ClassSize.align(sum);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:KeyValue.java

示例8: LruHashMap

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
/**
 * Constructs a new, empty map with the specified initial capacity,
 * load factor, and maximum memory usage.
 *
 * @param initialCapacity the initial capacity
 * @param loadFactor the load factor
 * @param maxMemUsage the maximum total memory usage
 * @throws IllegalArgumentException if the initial capacity is less than one
 * @throws IllegalArgumentException if the initial capacity is greater than
 * the maximum capacity
 * @throws IllegalArgumentException if the load factor is <= 0
 * @throws IllegalArgumentException if the max memory usage is too small
 * to support the base overhead
 */
public LruHashMap(int initialCapacity, float loadFactor,
long maxMemUsage) {
  if (initialCapacity < 1) {
    throw new IllegalArgumentException("Initial capacity must be > 0");
  }
  if (initialCapacity > MAXIMUM_CAPACITY) {
    throw new IllegalArgumentException("Initial capacity is too large");
  }
  if (loadFactor <= 0 || Float.isNaN(loadFactor)) {
    throw new IllegalArgumentException("Load factor must be > 0");
  }
  if (maxMemUsage <= (OVERHEAD + initialCapacity * ClassSize.REFERENCE)) {
    throw new IllegalArgumentException("Max memory usage too small to " +
    "support base overhead");
  }

  /** Find a power of 2 >= initialCapacity */
  int capacity = calculateCapacity(initialCapacity);
  this.loadFactor = loadFactor;
  this.threshold = calculateThreshold(capacity,loadFactor);
  this.entries = new Entry[capacity];
  this.memFree = maxMemUsage;
  this.memTotal = maxMemUsage;
  init();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:40,代码来源:LruHashMap.java

示例9: Slab

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
Slab(int blockSize, int numBlocks) {
  buffers = new LinkedBlockingQueue<ByteBuffer>();
  slabs = new ConcurrentLinkedQueue<ByteBuffer>();

  this.blockSize = blockSize;
  this.numBlocks = numBlocks;

  this.heapSize = ClassSize.estimateBase(this.getClass(), false);

  int maxBlocksPerSlab = Integer.MAX_VALUE / blockSize;
  int maxSlabSize = maxBlocksPerSlab * blockSize;

  int numFullSlabs = numBlocks / maxBlocksPerSlab;
  int partialSlabSize = (numBlocks % maxBlocksPerSlab) * blockSize;
  for (int i = 0; i < numFullSlabs; i++) {
    allocateAndSlice(maxSlabSize, blockSize);
  }

  if (partialSlabSize > 0) {
    allocateAndSlice(partialSlabSize, blockSize);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:23,代码来源:Slab.java

示例10: CachedBlock

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
public CachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime,
    boolean inMemory) {
  this.cacheKey = cacheKey;
  this.buf = buf;
  this.accessTime = accessTime;
  // We approximate the size of this class by the size of its name string
  // plus the size of its byte buffer plus the overhead associated with all
  // the base classes. We also include the base class
  // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with
  // their buffer lengths. This variable is used elsewhere in unit tests.
  this.size = ClassSize.align(cacheKey.heapSize())
      + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD;
  if(inMemory) {
    this.priority = BlockPriority.MEMORY;
  } else {
    this.priority = BlockPriority.SINGLE;
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:19,代码来源:CachedBlock.java

示例11: heapSize

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
@Override
public long heapSize() {
  long size = ClassSize.align(
  // Base class size, including object overhead.
      SCHEMA_CONFIGURED_UNALIGNED_HEAP_SIZE +
      // Block type and byte buffer references
          2 * ClassSize.REFERENCE +
          // On-disk size, uncompressed size, and next block's on-disk size
          // bytePerChecksum, onDiskDataSize and minorVersion
          6 * Bytes.SIZEOF_INT +
          // Checksum type
          1 * Bytes.SIZEOF_BYTE +
          // This and previous block offset
          2 * Bytes.SIZEOF_LONG +
          // "Include memstore timestamp" flag
          Bytes.SIZEOF_BOOLEAN);

  if (buf != null) {
    // Deep overhead of the byte buffer. Needs to be aligned separately.
    size += ClassSize.align(buf.capacity() + BYTE_BUFFER_HEAP_SIZE);
  }

  return ClassSize.align(size);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:25,代码来源:HFileBlock.java

示例12: testHeapSizeForBlockIndex

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
/** Checks if the HeapSize calculator is within reason */
@Test
public void testHeapSizeForBlockIndex() throws IOException {
  Class<HFileBlockIndex.BlockIndexReader> cl =
      HFileBlockIndex.BlockIndexReader.class;
  long expected = ClassSize.estimateBase(cl, false);

  HFileBlockIndex.BlockIndexReader bi =
      new HFileBlockIndex.BlockIndexReader(Bytes.BYTES_RAWCOMPARATOR, 1);
  long actual = bi.heapSize();

  // Since the arrays in BlockIndex(byte [][] blockKeys, long [] blockOffsets,
  // int [] blockDataSizes) are all null they are not going to show up in the
  // HeapSize calculation, so need to remove those array costs from expected.
  expected -= ClassSize.align(3 * ClassSize.ARRAY);

  if (expected != actual) {
    ClassSize.estimateBase(cl, true);
    assertEquals(expected, actual);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:22,代码来源:TestHFileBlockIndex.java

示例13: getSizedCellScanner

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
static CellScanner getSizedCellScanner(final Cell [] cells) {
  int size = -1;
  for (Cell cell: cells) {
    size += CellUtil.estimatedSizeOf(cell);
  }
  final int totalSize = ClassSize.align(size);
  final CellScanner cellScanner = CellUtil.createCellScanner(cells);
  return new SizedCellScanner() {
    @Override
    public long heapSize() {
      return totalSize;
    }

    @Override
    public Cell current() {
      return cellScanner.current();
    }

    @Override
    public boolean advance() throws IOException {
      return cellScanner.advance();
    }
  };
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:25,代码来源:TestIPCUtil.java

示例14: calculateHeapSizeForBlockKeys

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
@Override
protected long calculateHeapSizeForBlockKeys(long heapSize) {
  if (blockKeys != null) {
    heapSize += ClassSize.REFERENCE;
    // Adding array + references overhead
    heapSize += ClassSize.align(ClassSize.ARRAY + blockKeys.length * ClassSize.REFERENCE);

    // Adding blockKeys
    for (Cell key : blockKeys) {
      heapSize += ClassSize.align(PrivateCellUtil.estimatedHeapSizeOf(key));
    }
  }
  // Add comparator and the midkey atomicreference
  heapSize += 2 * ClassSize.REFERENCE;
  return heapSize;
}
 
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:HFileBlockIndex.java

示例15: heapSize

import org.apache.hadoop.hbase.util.ClassSize; //导入依赖的package包/类
@Override
public long heapSize() {
  // This object, block type and byte buffer reference, on-disk and
  // uncompressed size, next block's on-disk size, offset and previous
  // offset, byte buffer object, and its byte array. Might also need to add
  // some fields inside the byte buffer.

  // We only add one BYTE_BUFFER_HEAP_SIZE because at any given moment, one of
  // the bytebuffers will be null. But we do account for both references.

  // If we are on heap, then we add the capacity of buf.
  if (buf != null) {
    return ClassSize.align(ClassSize.OBJECT + 2 * ClassSize.REFERENCE + 3
        * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_LONG + BYTE_BUFFER_HEAP_SIZE)
        + ClassSize.align(buf.capacity());
  } else {

    return ClassSize.align(ClassSize.OBJECT + 2 * ClassSize.REFERENCE + 3
        * Bytes.SIZEOF_INT + 2 * Bytes.SIZEOF_LONG + BYTE_BUFFER_HEAP_SIZE);
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:22,代码来源:HFileBlock.java


注:本文中的org.apache.hadoop.hbase.util.ClassSize类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。