本文整理汇总了Java中org.apache.hadoop.hbase.util.IdLock类的典型用法代码示例。如果您正苦于以下问题:Java IdLock类的具体用法?Java IdLock怎么用?Java IdLock使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IdLock类属于org.apache.hadoop.hbase.util包,在下文中一共展示了IdLock类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: evictFile
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
/**
* Evicts the cached file by the name.
* @param fileName The name of a cached file.
*/
public void evictFile(String fileName) {
if (isCacheEnabled) {
IdLock.Entry lockEntry = null;
try {
// obtains the lock to close the cached file.
lockEntry = keyLock.getLockEntry(fileName.hashCode());
CachedMobFile evictedFile = map.remove(fileName);
if (evictedFile != null) {
evictedFile.close();
evictedFileCount.increment();
}
} catch (IOException e) {
LOG.error("Failed to evict the file " + fileName, e);
} finally {
if (lockEntry != null) {
keyLock.releaseLockEntry(lockEntry);
}
}
}
}
示例2: closeFile
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
/**
* Closes a mob file.
* @param file The mob file that needs to be closed.
*/
public void closeFile(MobFile file) {
IdLock.Entry lockEntry = null;
try {
if (!isCacheEnabled) {
file.close();
} else {
lockEntry = keyLock.getLockEntry(file.getFileName().hashCode());
file.close();
}
} catch (IOException e) {
LOG.error("MobFileCache, Exception happen during close " + file.getFileName(), e);
} finally {
if (lockEntry != null) {
keyLock.releaseLockEntry(lockEntry);
}
}
}
示例3: reportMobCompactionStart
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
public void reportMobCompactionStart(TableName tableName) throws IOException {
IdLock.Entry lockEntry = null;
try {
lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode());
AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
if (compactionsCount == null) {
compactionsCount = new AtomicInteger(0);
mobCompactionStates.put(tableName, compactionsCount);
}
compactionsCount.incrementAndGet();
} finally {
if (lockEntry != null) {
mobCompactionLock.releaseLockEntry(lockEntry);
}
}
}
示例4: reportMobCompactionEnd
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
public void reportMobCompactionEnd(TableName tableName) throws IOException {
IdLock.Entry lockEntry = null;
try {
lockEntry = mobCompactionLock.getLockEntry(tableName.hashCode());
AtomicInteger compactionsCount = mobCompactionStates.get(tableName);
if (compactionsCount != null) {
int count = compactionsCount.decrementAndGet();
// remove the entry if the count is 0.
if (count == 0) {
mobCompactionStates.remove(tableName);
}
}
} finally {
if (lockEntry != null) {
mobCompactionLock.releaseLockEntry(lockEntry);
}
}
}
示例5: evictFile
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
/**
* Evicts the cached file by the name.
* @param fileName The name of a cached file.
*/
public void evictFile(String fileName) {
if (isCacheEnabled) {
IdLock.Entry lockEntry = null;
try {
// obtains the lock to close the cached file.
lockEntry = keyLock.getLockEntry(fileName.hashCode());
CachedMobFile evictedFile = map.remove(fileName);
if (evictedFile != null) {
evictedFile.close();
}
} catch (IOException e) {
LOG.error("Fail to evict the file " + fileName, e);
} finally {
if (lockEntry != null) {
keyLock.releaseLockEntry(lockEntry);
}
}
}
}
示例6: evictBlock
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
if (!cacheEnabled) return false;
RAMQueueEntry removedBlock = ramCache.remove(cacheKey);
if (removedBlock != null) {
this.blockNumber.decrementAndGet();
this.heapSize.addAndGet(-1 * removedBlock.getData().heapSize());
}
BucketEntry bucketEntry = backingMap.get(cacheKey);
if (bucketEntry != null) {
IdLock.Entry lockEntry = null;
try {
lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
if (bucketEntry.equals(backingMap.remove(cacheKey))) {
bucketAllocator.freeBlock(bucketEntry.offset());
realCacheSize.addAndGet(-1 * bucketEntry.getLength());
blocksByHFile.remove(cacheKey.getHfileName(), cacheKey);
if (removedBlock == null) {
this.blockNumber.decrementAndGet();
}
} else {
return false;
}
} catch (IOException ie) {
LOG.warn("Failed evicting block " + cacheKey);
return false;
} finally {
if (lockEntry != null) {
offsetLock.releaseLockEntry(lockEntry);
}
}
}
cacheStats.evicted(bucketEntry == null? 0: bucketEntry.getCachedTime());
return true;
}
示例7: evictBlock
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
if (!cacheEnabled) return false;
RAMQueueEntry removedBlock = ramCache.remove(cacheKey);
if (removedBlock != null) {
this.blockNumber.decrementAndGet();
this.heapSize.addAndGet(-1 * removedBlock.getData().heapSize());
}
BucketEntry bucketEntry = backingMap.get(cacheKey);
if (bucketEntry != null) {
IdLock.Entry lockEntry = null;
try {
lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
if (bucketEntry.equals(backingMap.remove(cacheKey))) {
bucketAllocator.freeBlock(bucketEntry.offset());
realCacheSize.addAndGet(-1 * bucketEntry.getLength());
blocksByHFile.remove(cacheKey.getHfileName(), cacheKey);
if (removedBlock == null) {
this.blockNumber.decrementAndGet();
}
} else {
return false;
}
} catch (IOException ie) {
LOG.warn("Failed evicting block " + cacheKey);
return false;
} finally {
if (lockEntry != null) {
offsetLock.releaseLockEntry(lockEntry);
}
}
}
cacheStats.evicted();
return true;
}
示例8: openFile
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
/**
* Opens a mob file.
* @param fs The current file system.
* @param path The file path.
* @param cacheConf The current MobCacheConfig
* @return A opened mob file.
* @throws IOException
*/
public MobFile openFile(FileSystem fs, Path path, MobCacheConfig cacheConf) throws IOException {
if (!isCacheEnabled) {
MobFile mobFile = MobFile.create(fs, path, conf, cacheConf);
mobFile.open();
return mobFile;
} else {
String fileName = path.getName();
CachedMobFile cached = map.get(fileName);
IdLock.Entry lockEntry = keyLock.getLockEntry(fileName.hashCode());
try {
if (cached == null) {
cached = map.get(fileName);
if (cached == null) {
if (map.size() > mobFileMaxCacheSize) {
evict();
}
cached = CachedMobFile.create(fs, path, conf, cacheConf);
cached.open();
map.put(fileName, cached);
miss.increment();
}
}
cached.open();
cached.access(count.incrementAndGet());
} finally {
keyLock.releaseLockEntry(lockEntry);
}
return cached;
}
}
示例9: openFile
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
/**
* Opens a mob file.
* @param fs The current file system.
* @param path The file path.
* @param cacheConf The current MobCacheConfig
* @return A opened mob file.
* @throws IOException
*/
public MobFile openFile(FileSystem fs, Path path, MobCacheConfig cacheConf) throws IOException {
if (!isCacheEnabled) {
return MobFile.create(fs, path, conf, cacheConf);
} else {
String fileName = path.getName();
CachedMobFile cached = map.get(fileName);
IdLock.Entry lockEntry = keyLock.getLockEntry(fileName.hashCode());
try {
if (cached == null) {
cached = map.get(fileName);
if (cached == null) {
if (map.size() > mobFileMaxCacheSize) {
evict();
}
cached = CachedMobFile.create(fs, path, conf, cacheConf);
cached.open();
map.put(fileName, cached);
miss.incrementAndGet();
}
}
cached.open();
cached.access(count.incrementAndGet());
} finally {
keyLock.releaseLockEntry(lockEntry);
}
return cached;
}
}
示例10: evictBlock
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
if (!cacheEnabled) return false;
RAMQueueEntry removedBlock = ramCache.remove(cacheKey);
if (removedBlock != null) {
this.blockNumber.decrementAndGet();
this.heapSize.addAndGet(-1 * removedBlock.getData().heapSize());
}
BucketEntry bucketEntry = backingMap.get(cacheKey);
if (bucketEntry != null) {
IdLock.Entry lockEntry = null;
try {
lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
if (bucketEntry.equals(backingMap.remove(cacheKey))) {
bucketAllocator.freeBlock(bucketEntry.offset());
realCacheSize.addAndGet(-1 * bucketEntry.getLength());
if (removedBlock == null) {
this.blockNumber.decrementAndGet();
}
} else {
return false;
}
} catch (IOException ie) {
LOG.warn("Failed evicting block " + cacheKey);
return false;
} finally {
if (lockEntry != null) {
offsetLock.releaseLockEntry(lockEntry);
}
}
}
cacheStats.evicted();
return true;
}
示例11: getBlock
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
/**
* Get the buffer of the block with the specified key.
* @param key block's cache key
* @param caching true if the caller caches blocks on cache misses
* @param repeat Whether this is a repeat lookup for the same block
* @param updateCacheMetrics Whether we should update cache metrics or not
* @return buffer of specified cache key, or null if not in cache
*/
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
if (!cacheEnabled)
return null;
RAMQueueEntry re = ramCache.get(key);
if (re != null) {
if (updateCacheMetrics) cacheStats.hit(caching);
re.access(accessCount.incrementAndGet());
return re.getData();
}
BucketEntry bucketEntry = backingMap.get(key);
if (bucketEntry != null) {
long start = System.nanoTime();
IdLock.Entry lockEntry = null;
try {
lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
if (bucketEntry.equals(backingMap.get(key))) {
int len = bucketEntry.getLength();
ByteBuffer bb = ByteBuffer.allocate(len);
int lenRead = ioEngine.read(bb, bucketEntry.offset());
if (lenRead != len) {
throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
}
CacheableDeserializer<Cacheable> deserializer =
bucketEntry.deserializerReference(this.deserialiserMap);
Cacheable cachedBlock = deserializer.deserialize(bb, true);
long timeTaken = System.nanoTime() - start;
if (updateCacheMetrics) {
cacheStats.hit(caching);
cacheStats.ioHit(timeTaken);
}
bucketEntry.access(accessCount.incrementAndGet());
if (this.ioErrorStartTime > 0) {
ioErrorStartTime = -1;
}
return cachedBlock;
}
} catch (IOException ioex) {
LOG.error("Failed reading block " + key + " from bucket cache", ioex);
checkIOErrorIsTolerated();
} finally {
if (lockEntry != null) {
offsetLock.releaseLockEntry(lockEntry);
}
}
}
if (!repeat && updateCacheMetrics) cacheStats.miss(caching);
return null;
}
示例12: getBlock
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
/**
* Get the buffer of the block with the specified key.
* @param key block's cache key
* @param caching true if the caller caches blocks on cache misses
* @param repeat Whether this is a repeat lookup for the same block
* @param updateCacheMetrics Whether we should update cache metrics or not
* @return buffer of specified cache key, or null if not in cache
*/
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
if (!cacheEnabled)
return null;
RAMQueueEntry re = ramCache.get(key);
if (re != null) {
if (updateCacheMetrics) cacheStats.hit(caching);
re.access(accessCount.incrementAndGet());
return re.getData();
}
BucketEntry bucketEntry = backingMap.get(key);
if(bucketEntry!=null) {
long start = System.nanoTime();
IdLock.Entry lockEntry = null;
try {
lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
if (bucketEntry.equals(backingMap.get(key))) {
int len = bucketEntry.getLength();
ByteBuffer bb = ByteBuffer.allocate(len);
int lenRead = ioEngine.read(bb, bucketEntry.offset());
if (lenRead != len) {
throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
}
Cacheable cachedBlock = bucketEntry.deserializerReference(
deserialiserMap).deserialize(bb, true);
long timeTaken = System.nanoTime() - start;
if (updateCacheMetrics) {
cacheStats.hit(caching);
cacheStats.ioHit(timeTaken);
}
bucketEntry.access(accessCount.incrementAndGet());
if (this.ioErrorStartTime > 0) {
ioErrorStartTime = -1;
}
return cachedBlock;
}
} catch (IOException ioex) {
LOG.error("Failed reading block " + key + " from bucket cache", ioex);
checkIOErrorIsTolerated();
} finally {
if (lockEntry != null) {
offsetLock.releaseLockEntry(lockEntry);
}
}
}
if (!repeat && updateCacheMetrics) cacheStats.miss(caching);
return null;
}
示例13: resolve
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
/**
* Reads the cell from the mob file.
* @param reference The cell found in the HBase, its value is a path to a mob file.
* @param cacheBlocks Whether the scanner should cache blocks.
* @param readPt the read point.
* @param readEmptyValueOnMobCellMiss Whether return null value when the mob file is
* missing or corrupt.
* @return The cell found in the mob file.
* @throws IOException
*/
public Cell resolve(Cell reference, boolean cacheBlocks, long readPt,
boolean readEmptyValueOnMobCellMiss) throws IOException {
Cell result = null;
if (MobUtils.hasValidMobRefCellValue(reference)) {
String fileName = MobUtils.getMobFileName(reference);
Tag tableNameTag = MobUtils.getTableNameTag(reference);
if (tableNameTag != null) {
String tableNameString = Tag.getValueAsString(tableNameTag);
List<Path> locations = map.get(tableNameString);
if (locations == null) {
IdLock.Entry lockEntry = keyLock.getLockEntry(tableNameString.hashCode());
try {
locations = map.get(tableNameString);
if (locations == null) {
locations = new ArrayList<>(2);
TableName tn = TableName.valueOf(tableNameString);
locations.add(MobUtils.getMobFamilyPath(conf, tn, family.getNameAsString()));
locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils
.getMobRegionInfo(tn).getEncodedName(), family.getNameAsString()));
map.put(tableNameString, locations);
}
} finally {
keyLock.releaseLockEntry(lockEntry);
}
}
result = readCell(locations, fileName, reference, cacheBlocks, readPt,
readEmptyValueOnMobCellMiss);
}
}
if (result == null) {
LOG.warn("The Cell result is null, assemble a new Cell with the same row,family,"
+ "qualifier,timestamp,type and tags but with an empty value to return.");
result = ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY)
.setRow(reference.getRowArray(), reference.getRowOffset(), reference.getRowLength())
.setFamily(reference.getFamilyArray(), reference.getFamilyOffset(),
reference.getFamilyLength())
.setQualifier(reference.getQualifierArray(),
reference.getQualifierOffset(), reference.getQualifierLength())
.setTimestamp(reference.getTimestamp())
.setType(reference.getTypeByte())
.setValue(HConstants.EMPTY_BYTE_ARRAY)
.setTags(reference.getTagsArray(), reference.getTagsOffset(),
reference.getTagsLength())
.build();
}
return result;
}
示例14: readBlock
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
/**
* Read in a file block.
*
* @param dataBlockOffset offset to read.
* @param onDiskBlockSize size of the block
* @param cacheBlock
* @param pread Use positional read instead of seek+read (positional is better
* doing random reads whereas seek+read is better scanning).
* @param isCompaction is this block being read as part of a compaction
* @return Block wrapped in a ByteBuffer.
* @throws IOException
*/
@Override
public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize,
final boolean cacheBlock, boolean pread, final boolean isCompaction)
throws IOException {
if (dataBlockIndexReader == null) {
throw new IOException("Block index not loaded");
}
if (dataBlockOffset < 0
|| dataBlockOffset >= trailer.getLoadOnOpenDataOffset()) {
throw new IOException("Requested block is out of range: "
+ dataBlockOffset + ", lastDataBlockOffset: "
+ trailer.getLastDataBlockOffset());
}
// For any given block from any given file, synchronize reads for said
// block.
// Without a cache, this synchronizing is needless overhead, but really
// the other choice is to duplicate work (which the cache would prevent you
// from doing).
BlockCacheKey cacheKey = HFile.getBlockCacheKey(name, dataBlockOffset);
IdLock.Entry lockEntry = offsetLock.getLockEntry(dataBlockOffset);
try {
blockLoads.incrementAndGet();
if (cacheConf.isBlockCacheEnabled()) {
HFileBlock cachedBlock =
(HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey, cacheBlock);
if (cachedBlock != null) {
cacheHits.incrementAndGet();
return cachedBlock;
}
// Carry on, please load.
}
// Load block from filesystem.
long startTimeNs = System.nanoTime();
HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset,
onDiskBlockSize, -1, pread);
BlockCategory blockCategory = hfileBlock.getBlockType().getCategory();
HFile.readTimeNano.addAndGet(System.nanoTime() - startTimeNs);
HFile.readOps.incrementAndGet();
// Cache the block
if (cacheBlock && cacheConf.shouldCacheBlockOnRead(
hfileBlock.getBlockType().getCategory())) {
cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock,
cacheConf.isInMemory());
}
return hfileBlock;
} finally {
offsetLock.releaseLockEntry(lockEntry);
}
}
示例15: getBlock
import org.apache.hadoop.hbase.util.IdLock; //导入依赖的package包/类
/**
* Get the buffer of the block with the specified key.
* @param key block's cache key
* @param caching true if the caller caches blocks on cache misses
* @param repeat Whether this is a repeat lookup for the same block
* @param updateCacheMetrics Whether we should update cache metrics or not
* @return buffer of specified cache key, or null if not in cache
*/
@Override
public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
if (!cacheEnabled)
return null;
RAMQueueEntry re = ramCache.get(key);
if (re != null) {
if (updateCacheMetrics) cacheStats.hit(caching);
re.access(accessCount.incrementAndGet());
return re.getData();
}
BucketEntry bucketEntry = backingMap.get(key);
if (bucketEntry != null) {
long start = System.nanoTime();
IdLock.Entry lockEntry = null;
try {
lockEntry = offsetLock.getLockEntry(bucketEntry.offset());
if (bucketEntry.equals(backingMap.get(key))) {
int len = bucketEntry.getLength();
ByteBuffer bb = ByteBuffer.allocate(len);
int lenRead = ioEngine.read(bb, bucketEntry.offset());
if (lenRead != len) {
throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
}
Cacheable cachedBlock = bucketEntry.deserializerReference(
deserialiserMap).deserialize(bb, true);
long timeTaken = System.nanoTime() - start;
if (updateCacheMetrics) {
cacheStats.hit(caching);
cacheStats.ioHit(timeTaken);
}
bucketEntry.access(accessCount.incrementAndGet());
if (this.ioErrorStartTime > 0) {
ioErrorStartTime = -1;
}
return cachedBlock;
}
} catch (IOException ioex) {
LOG.error("Failed reading block " + key + " from bucket cache", ioex);
checkIOErrorIsTolerated();
} finally {
if (lockEntry != null) {
offsetLock.releaseLockEntry(lockEntry);
}
}
}
if (!repeat && updateCacheMetrics) cacheStats.miss(caching);
return null;
}