本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.Cacheable.serialize方法的典型用法代码示例。如果您正苦于以下问题:Java Cacheable.serialize方法的具体用法?Java Cacheable.serialize怎么用?Java Cacheable.serialize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.hfile.Cacheable
的用法示例。
在下文中一共展示了Cacheable.serialize方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: storeExternal
import org.apache.hadoop.hbase.io.hfile.Cacheable; //导入方法依赖的package包/类
/**
* Store external.
*
* @param blockName the block name
* @param buf the buf
* @param inMemory the in memory
* @throws IOException Signals that an I/O exception has occurred.
*/
@SuppressWarnings("unused")
private void storeExternal(String blockName, Cacheable buf, boolean inMemory) throws IOException{
// If external storage is disable - bail out
if(overflowExtEnabled == false) return;
// Check if we have already this block in external storage cache
if(extStorageCache.contains(blockName)) return;
ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer();
deserializer.set(buf.getDeserializer());
buffer.clear();
buffer.position(4);
buffer.put( inMemory? (byte) 1: (byte) 0);
buf.serialize(buffer);
buffer.putInt(0, buffer.position() - 4);
StorageHandle handle = storage.storeData(buffer);
try{
extStorageCache.put(blockName, handle);
} catch(Exception e){
throw new IOException(e);
}
}
示例2: cacheBlock
import org.apache.hadoop.hbase.io.hfile.Cacheable; //导入方法依赖的package包/类
@Override
public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) {
ByteBuffer storedBlock;
try {
storedBlock = backingStore.alloc(toBeCached.getSerializedLength());
} catch (InterruptedException e) {
LOG.warn("SlabAllocator was interrupted while waiting for block to become available");
LOG.warn(e);
return;
}
CacheablePair newEntry = new CacheablePair(toBeCached.getDeserializer(),
storedBlock);
toBeCached.serialize(storedBlock);
synchronized (this) {
CacheablePair alreadyCached = backingMap.putIfAbsent(blockName, newEntry);
if (alreadyCached != null) {
backingStore.free(storedBlock);
throw new RuntimeException("already cached " + blockName);
}
if (actionWatcher != null) {
actionWatcher.onInsertion(blockName, this);
}
}
newEntry.recentlyAccessed.set(System.nanoTime());
this.size.addAndGet(newEntry.heapSize());
}
示例3: cacheBlock
import org.apache.hadoop.hbase.io.hfile.Cacheable; //导入方法依赖的package包/类
@Override
public void cacheBlock(BlockCacheKey blockName, Cacheable toBeCached) {
ByteBuffer storedBlock;
try {
storedBlock = backingStore.alloc(toBeCached.getSerializedLength());
} catch (InterruptedException e) {
LOG.warn("SlabAllocator was interrupted while waiting for block to become available");
LOG.warn(e);
return;
}
CacheablePair newEntry = new CacheablePair(toBeCached.getDeserializer(),
storedBlock);
toBeCached.serialize(storedBlock);
synchronized (this) {
CacheablePair alreadyCached = backingMap.putIfAbsent(blockName, newEntry);
if (alreadyCached != null) {
backingStore.free(storedBlock);
throw new RuntimeException("already cached " + blockName);
}
if (actionWatcher != null) {
actionWatcher.onInsertion(blockName, this);
}
}
newEntry.recentlyAccessed.set(System.nanoTime());
this.size.addAndGet(newEntry.heapSize());
}
示例4: write
import org.apache.hadoop.hbase.io.hfile.Cacheable; //导入方法依赖的package包/类
@Override
public void write(ByteBuffer buf, Cacheable obj) throws IOException {
if( deserializer.get() == null){
CacheableDeserializer<Cacheable> des = obj.getDeserializer();
deserializer.compareAndSet(null, des);
}
// Serializer does not honor current buffer position
int len = obj.getSerializedLength();
int pos = buf.position();
obj.serialize(buf);
buf.limit(len + pos);
buf.position(len+pos);
}