当前位置: 首页>>代码示例>>Java>>正文


Java CacheableDeserializer类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.CacheableDeserializer的典型用法代码示例。如果您正苦于以下问题:Java CacheableDeserializer类的具体用法?Java CacheableDeserializer怎么用?Java CacheableDeserializer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CacheableDeserializer类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了CacheableDeserializer类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: read

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
/**
 * Transfers data from file to the given byte buffer
 * @param offset The offset in the file where the first byte to be read
 * @param length The length of buffer that should be allocated for reading
 *               from the file channel
 * @return number of bytes read
 * @throws IOException
 */
@Override
public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
    throws IOException {
  Preconditions.checkArgument(length >= 0, "Length of read can not be less than 0.");
  ByteBuffer dstBuffer = ByteBuffer.allocate(length);
  if (length != 0) {
    accessFile(readAccessor, dstBuffer, offset);
    // The buffer created out of the fileChannel is formed by copying the data from the file
    // Hence in this case there is no shared memory that we point to. Even if the BucketCache evicts
    // this buffer from the file the data is already copied and there is no need to ensure that
    // the results are not corrupted before consuming them.
    if (dstBuffer.limit() != length) {
      throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, " + length
          + " expected");
    }
  }
  return deserializer.deserialize(new SingleByteBuff(dstBuffer), true, MemoryType.EXCLUSIVE);
}
 
开发者ID:apache,项目名称:hbase,代码行数:27,代码来源:FileIOEngine.java

示例2: setHFileDeserializer

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
@SuppressWarnings("unchecked")
public static void setHFileDeserializer()
{
       Field field = getProtectedField(HFileBlock.class, "blockDeserializer");

       if (field == null){
           LOG.error("Could not get access to HFileBlock.blockDeserializer");
       	return;
       }

       try
       {
       	CacheableDeserializer<Cacheable> serde = (CacheableDeserializer<Cacheable>) field.get(null);
       	if(serde != null){
       		deserializer.set(serde);
       	} else{
       		LOG.warn("HFileBlock.blockDeserializer is null");
       	}
       }
       catch (Exception e)
       {
           LOG.warn("unable to read HFileBlock.blockDeserializer");
       }
       
}
 
开发者ID:VladRodionov,项目名称:bigbase,代码行数:26,代码来源:CacheableSerializer.java

示例3: read

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
@Override
public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
    throws IOException {
  byte[] dst = new byte[length];
  bufferArray.getMultiple(offset, length, dst);
  return deserializer.deserialize(new SingleByteBuff(ByteBuffer.wrap(dst)), true,
      MemoryType.EXCLUSIVE);
}
 
开发者ID:apache,项目名称:hbase,代码行数:9,代码来源:FileMmapEngine.java

示例4: read

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
@Override
public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
    throws IOException {
  ByteBuff dstBuffer = bufferArray.asSubByteBuff(offset, length);
  // Here the buffer that is created directly refers to the buffer in the actual buckets.
  // When any cell is referring to the blocks created out of these buckets then it means that
  // those cells are referring to a shared memory area which if evicted by the BucketCache would
  // lead to corruption of results. Hence we set the type of the buffer as SHARED_MEMORY
  // so that the readers using this block are aware of this fact and do the necessary action
  // to prevent eviction till the results are either consumed or copied
  return deserializer.deserialize(dstBuffer, true, MemoryType.SHARED);
}
 
开发者ID:apache,项目名称:hbase,代码行数:13,代码来源:ByteBufferIOEngine.java

示例5: write

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
@Override
public void write(ByteBuffer buf, Cacheable obj) throws IOException {
	if( deserializer.get() == null){
		CacheableDeserializer<Cacheable> des = obj.getDeserializer();		
		deserializer.compareAndSet(null, des);
	}
	// Serializer does not honor current buffer position
	int len = obj.getSerializedLength();
	int pos = buf.position();
	obj.serialize(buf);		
	buf.limit(len + pos);
	buf.position(len+pos);
}
 
开发者ID:VladRodionov,项目名称:bigbase,代码行数:14,代码来源:CacheableSerializer.java

示例6: getBlock

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
/**
 * Get the buffer of the block with the specified key.
 * @param key block's cache key
 * @param caching true if the caller caches blocks on cache misses
 * @param repeat Whether this is a repeat lookup for the same block
 * @param updateCacheMetrics Whether we should update cache metrics or not
 * @return buffer of specified cache key, or null if not in cache
 */
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
    boolean updateCacheMetrics) {
  if (!cacheEnabled) {
    return null;
  }
  RAMQueueEntry re = ramCache.get(key);
  if (re != null) {
    if (updateCacheMetrics) {
      cacheStats.hit(caching, key.isPrimary());
    }
    re.access(accessCount.incrementAndGet());
    return re.getData();
  }
  BucketEntry bucketEntry = backingMap.get(key);
  if (bucketEntry != null) {
    long start = System.nanoTime();
    ReentrantReadWriteLock lock = offsetLock.getLock(bucketEntry.offset());
    try {
      lock.readLock().lock();
      // We can not read here even if backingMap does contain the given key because its offset
      // maybe changed. If we lock BlockCacheKey instead of offset, then we can only check
      // existence here.
      if (bucketEntry.equals(backingMap.get(key))) {
        int len = bucketEntry.getLength();
        ByteBuffer bb = ByteBuffer.allocate(len);
        int lenRead = ioEngine.read(bb, bucketEntry.offset());
        if (lenRead != len) {
          throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
        }
        CacheableDeserializer<Cacheable> deserializer =
          bucketEntry.deserializerReference(this.deserialiserMap);
        Cacheable cachedBlock = deserializer.deserialize(bb, true);
        long timeTaken = System.nanoTime() - start;
        if (updateCacheMetrics) {
          cacheStats.hit(caching, key.isPrimary());
          cacheStats.ioHit(timeTaken);
        }
        bucketEntry.access(accessCount.incrementAndGet());
        if (this.ioErrorStartTime > 0) {
          ioErrorStartTime = -1;
        }
        return cachedBlock;
      }
    } catch (IOException ioex) {
      LOG.error("Failed reading block " + key + " from bucket cache", ioex);
      checkIOErrorIsTolerated();
    } finally {
      lock.readLock().unlock();
    }
  }
  if (!repeat && updateCacheMetrics) {
    cacheStats.miss(caching, key.isPrimary());
  }
  return null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:65,代码来源:BucketCache.java

示例7: deserializerReference

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
protected CacheableDeserializer<Cacheable> deserializerReference(
    UniqueIndexMap<Integer> deserialiserMap) {
  return CacheableDeserializerIdManager.getDeserializer(deserialiserMap
      .unmap(deserialiserIndex));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:6,代码来源:BucketCache.java

示例8: setDeserialiserReference

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
protected void setDeserialiserReference(
    CacheableDeserializer<Cacheable> deserializer,
    UniqueIndexMap<Integer> deserialiserMap) {
  this.deserialiserIndex = ((byte) deserialiserMap.map(deserializer
      .getDeserialiserIdentifier()));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:BucketCache.java

示例9: CacheablePair

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
private CacheablePair(CacheableDeserializer<Cacheable> deserializer,
    ByteBuffer serializedData) {
  this.recentlyAccessed = new AtomicLong();
  this.deserializer = deserializer;
  this.serializedData = serializedData;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:7,代码来源:SingleSizeCache.java

示例10: getBlock

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
/**
 * Get the buffer of the block with the specified key.
 * @param key block's cache key
 * @param caching true if the caller caches blocks on cache misses
 * @param repeat Whether this is a repeat lookup for the same block
 * @param updateCacheMetrics Whether we should update cache metrics or not
 * @return buffer of specified cache key, or null if not in cache
 */
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
    boolean updateCacheMetrics) {
  if (!cacheEnabled)
    return null;
  RAMQueueEntry re = ramCache.get(key);
  if (re != null) {
    if (updateCacheMetrics) cacheStats.hit(caching);
    re.access(accessCount.incrementAndGet());
    return re.getData();
  }
  BucketEntry bucketEntry = backingMap.get(key);
  if (bucketEntry != null) {
    long start = System.nanoTime();
    IdLock.Entry lockEntry = null;
    try {
      lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
      if (bucketEntry.equals(backingMap.get(key))) {
        int len = bucketEntry.getLength();
        ByteBuffer bb = ByteBuffer.allocate(len);
        int lenRead = ioEngine.read(bb, bucketEntry.offset());
        if (lenRead != len) {
          throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
        }
        CacheableDeserializer<Cacheable> deserializer =
          bucketEntry.deserializerReference(this.deserialiserMap);
        Cacheable cachedBlock = deserializer.deserialize(bb, true);
        long timeTaken = System.nanoTime() - start;
        if (updateCacheMetrics) {
          cacheStats.hit(caching);
          cacheStats.ioHit(timeTaken);
        }
        bucketEntry.access(accessCount.incrementAndGet());
        if (this.ioErrorStartTime > 0) {
          ioErrorStartTime = -1;
        }
        return cachedBlock;
      }
    } catch (IOException ioex) {
      LOG.error("Failed reading block " + key + " from bucket cache", ioex);
      checkIOErrorIsTolerated();
    } finally {
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
  if (!repeat && updateCacheMetrics) cacheStats.miss(caching);
  return null;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:59,代码来源:BucketCache.java

示例11: getDeserializer

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
public static CacheableDeserializer<Cacheable> getDeserializer()
{
	return deserializer.get();
}
 
开发者ID:VladRodionov,项目名称:bigbase,代码行数:5,代码来源:CacheableSerializer.java

示例12: setSerializer

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
public static void setSerializer(CacheableDeserializer<Cacheable> ser)
{
	deserializer.set(ser);
}
 
开发者ID:VladRodionov,项目名称:bigbase,代码行数:5,代码来源:CacheableSerializer.java

示例13: getDeserializer

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
@Override
public CacheableDeserializer<Cacheable> getDeserializer() {
	return ByteArrayCacheable.deserializer;
}
 
开发者ID:VladRodionov,项目名称:bigbase,代码行数:5,代码来源:ByteArrayCacheable.java

示例14: read

import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; //导入依赖的package包/类
/**
 * Transfers data from IOEngine to a Cacheable object.
 * @param length How many bytes to be read from the offset
 * @param offset The offset in the IO engine where the first byte to be read
 * @param deserializer The deserializer to be used to make a Cacheable from the data.
 * @return Cacheable
 * @throws IOException
 * @throws RuntimeException when the length of the ByteBuff read is less than 'len'
 */
Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
    throws IOException;
 
开发者ID:apache,项目名称:hbase,代码行数:12,代码来源:IOEngine.java


注:本文中的org.apache.hadoop.hbase.io.hfile.CacheableDeserializer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。