本文整理汇总了Java中org.apache.cassandra.cache.RowCacheKey类的典型用法代码示例。如果您正苦于以下问题:Java RowCacheKey类的具体用法?Java RowCacheKey怎么用?Java RowCacheKey使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RowCacheKey类属于org.apache.cassandra.cache包,在下文中一共展示了RowCacheKey类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getBounds
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
private ArrayList<Bounds<Token>> getBounds(int nElements)
{
ColumnFamilyStore store = Keyspace.open(KEYSPACE_CACHED).getColumnFamilyStore(CF_CACHED);
TreeSet<DecoratedKey> orderedKeys = new TreeSet<>();
for(Iterator<RowCacheKey> it = CacheService.instance.rowCache.keyIterator();it.hasNext();)
orderedKeys.add(store.decorateKey(ByteBuffer.wrap(it.next().key)));
ArrayList<Bounds<Token>> boundsToInvalidate = new ArrayList<>();
Iterator<DecoratedKey> iterator = orderedKeys.iterator();
while (iterator.hasNext())
{
Token startRange = iterator.next().getToken();
for (int i = 0; i < nElements-2; i++)
iterator.next();
Token endRange = iterator.next().getToken();
boundsToInvalidate.add(new Bounds<>(startRange, endRange));
}
return boundsToInvalidate;
}
示例2: invalidate
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
/** call when dropping or renaming a CF. Performs mbean housekeeping and invalidates CFS to other operations */
public void invalidate()
{
valid = false;
try
{
unregisterMBean();
}
catch (Exception e)
{
// this shouldn't block anything.
logger.warn("Failed unregistering mbean: {}", mbeanName, e);
}
compactionStrategy.shutdown();
SystemKeyspace.removeTruncationRecord(metadata.cfId);
data.unreferenceSSTables();
indexManager.invalidate();
for (RowCacheKey key : CacheService.instance.rowCache.getKeySet())
if (key.cfId == metadata.cfId)
invalidateCachedRow(key);
}
示例3: invalidate
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
/** call when dropping or renaming a CF. Performs mbean housekeeping and invalidates CFS to other operations */
public void invalidate()
{
try
{
valid = false;
unregisterMBean();
SystemTable.removeTruncationRecord(metadata.cfId);
data.unreferenceSSTables();
indexManager.invalidate();
for (RowCacheKey key : CacheService.instance.rowCache.getKeySet())
{
if (key.cfId == metadata.cfId)
invalidateCachedRow(key);
}
}
catch (Exception e)
{
// this shouldn't block anything.
logger.warn("Failed unregistering mbean: " + mbeanName, e);
}
}
示例4: invalidate
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
/** call when dropping or renaming a CF. Performs mbean housekeeping and invalidates CFS to other operations */
public void invalidate()
{
valid = false;
try
{
unregisterMBean();
}
catch (Exception e)
{
// this shouldn't block anything.
logger.warn("Failed unregistering mbean: " + mbeanName, e);
}
compactionStrategy.shutdown();
SystemTable.removeTruncationRecord(metadata.cfId);
data.unreferenceSSTables();
indexManager.invalidate();
for (RowCacheKey key : CacheService.instance.rowCache.getKeySet())
if (key.cfId == metadata.cfId)
invalidateCachedRow(key);
}
示例5: maybeUpdateRowCache
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
public void maybeUpdateRowCache(DecoratedKey key)
{
if (!isRowCacheEnabled())
return;
RowCacheKey cacheKey = new RowCacheKey(metadata.cfId, key);
invalidateCachedRow(cacheKey);
}
示例6: cleanupCache
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
public void cleanupCache()
{
Collection<Range<Token>> ranges = StorageService.instance.getLocalRanges(keyspace.getName());
for (RowCacheKey key : CacheService.instance.rowCache.getKeySet())
{
DecoratedKey dk = partitioner.decorateKey(ByteBuffer.wrap(key.key));
if (key.cfId == metadata.cfId && !Range.isInRanges(dk.token, ranges))
invalidateCachedRow(dk);
}
}
示例7: getRawCachedRow
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
/**
* @return the cached row for @param key if it is already present in the cache.
* That is, unlike getThroughCache, it will not readAndCache the row if it is not present, nor
* are these calls counted in cache statistics.
*
* Note that this WILL cause deserialization of a SerializingCache row, so if all you
* need to know is whether a row is present or not, use containsCachedRow instead.
*/
public ColumnFamily getRawCachedRow(DecoratedKey key)
{
if (!isRowCacheEnabled())
return null;
IRowCacheEntry cached = CacheService.instance.rowCache.getInternal(new RowCacheKey(metadata.cfId, key));
return cached == null || cached instanceof RowCacheSentinel ? null : (ColumnFamily) cached;
}
示例8: invalidateCachedRow
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
public void invalidateCachedRow(DecoratedKey key)
{
UUID cfId = Schema.instance.getId(keyspace.getName(), this.name);
if (cfId == null)
return; // secondary index
invalidateCachedRow(new RowCacheKey(cfId, key));
}
示例9: submitTruncate
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
public Future<?> submitTruncate(final ColumnFamilyStore main, final long truncatedAt)
{
Runnable runnable = new Runnable()
{
public void run()
{
compactionLock.writeLock().lock();
try
{
ReplayPosition replayAfter = main.discardSSTables(truncatedAt);
for (SecondaryIndex index : main.indexManager.getIndexes())
index.truncate(truncatedAt);
SystemTable.saveTruncationRecord(main, truncatedAt, replayAfter);
for (RowCacheKey key : CacheService.instance.rowCache.getKeySet())
{
if (key.cfId == main.metadata.cfId)
CacheService.instance.rowCache.remove(key);
}
}
finally
{
compactionLock.writeLock().unlock();
}
}
};
return executor.submit(runnable);
}
示例10: maybeUpdateRowCache
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
public void maybeUpdateRowCache(DecoratedKey key, ColumnFamily columnFamily)
{
if (!isRowCacheEnabled())
return;
RowCacheKey cacheKey = new RowCacheKey(metadata.cfId, key);
// always invalidate a copying cache value
if (CacheService.instance.rowCache.isPutCopying())
{
invalidateCachedRow(cacheKey);
return;
}
// invalidate a normal cache value if it's a sentinel, so the read will retry (and include the new update)
IRowCacheEntry cachedRow = CacheService.instance.rowCache.getInternal(cacheKey);
if (cachedRow != null)
{
if (cachedRow instanceof RowCacheSentinel)
invalidateCachedRow(cacheKey);
else
// columnFamily is what is written in the commit log. Because of the PeriodicCommitLog, this can be done in concurrency
// with this. So columnFamily shouldn't be modified and if it contains super columns, neither should they. So for super
// columns, we must make sure to clone them when adding to the cache. That's what addAllWithSCCopy does (see #3957)
((ColumnFamily) cachedRow).addAllWithSCCopy(columnFamily, HeapAllocator.instance);
}
}
示例11: getRawCachedRow
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
/**
* @return the cached row for @param key if it is already present in the cache.
* That is, unlike getThroughCache, it will not readAndCache the row if it is not present, nor
* are these calls counted in cache statistics.
*
* Note that this WILL cause deserialization of a SerializingCache row, so if all you
* need to know is whether a row is present or not, use containsCachedRow instead.
*/
public ColumnFamily getRawCachedRow(DecoratedKey key)
{
if (!isRowCacheEnabled() || metadata.cfId == null)
return null; // secondary index
IRowCacheEntry cached = CacheService.instance.rowCache.getInternal(new RowCacheKey(metadata.cfId, key));
return cached == null || cached instanceof RowCacheSentinel ? null : (ColumnFamily) cached;
}
示例12: invalidateCachedRow
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
public void invalidateCachedRow(DecoratedKey key)
{
UUID cfId = Schema.instance.getId(table.name, this.columnFamily);
if (cfId == null)
return; // secondary index
invalidateCachedRow(new RowCacheKey(cfId, key));
}
示例13: containsCachedRow
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
/**
* @return true if @param key is contained in the row cache
*/
public boolean containsCachedRow(DecoratedKey key)
{
return CacheService.instance.rowCache.getCapacity() != 0 && CacheService.instance.rowCache.containsKey(new RowCacheKey(metadata.cfId, key));
}
示例14: truncateBlocking
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
/**
* Truncate deletes the entire column family's data with no expensive tombstone creation
*/
public void truncateBlocking()
{
// We have two goals here:
// - truncate should delete everything written before truncate was invoked
// - but not delete anything that isn't part of the snapshot we create.
// We accomplish this by first flushing manually, then snapshotting, and
// recording the timestamp IN BETWEEN those actions. Any sstables created
// with this timestamp or greater time, will not be marked for delete.
//
// Bonus complication: since we store replay position in sstable metadata,
// truncating those sstables means we will replay any CL segments from the
// beginning if we restart before they [the CL segments] are discarded for
// normal reasons post-truncate. To prevent this, we store truncation
// position in the System keyspace.
logger.debug("truncating {}", name);
if (DatabaseDescriptor.isAutoSnapshot())
{
// flush the CF being truncated before forcing the new segment
forceBlockingFlush();
// sleep a little to make sure that our truncatedAt comes after any sstable
// that was part of the flushed we forced; otherwise on a tie, it won't get deleted.
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.MILLISECONDS);
}
// nuke the memtable data w/o writing to disk first
Keyspace.switchLock.writeLock().lock();
try
{
for (ColumnFamilyStore cfs : concatWithIndexes())
{
Memtable mt = cfs.getMemtableThreadSafe();
if (!mt.isClean())
mt.cfs.data.renewMemtable();
}
}
finally
{
Keyspace.switchLock.writeLock().unlock();
}
Runnable truncateRunnable = new Runnable()
{
public void run()
{
logger.debug("Discarding sstable data for truncated CF + indexes");
final long truncatedAt = System.currentTimeMillis();
if (DatabaseDescriptor.isAutoSnapshot())
snapshot(Keyspace.getTimestampedSnapshotName(name));
ReplayPosition replayAfter = discardSSTables(truncatedAt);
for (SecondaryIndex index : indexManager.getIndexes())
index.truncateBlocking(truncatedAt);
SystemKeyspace.saveTruncationRecord(ColumnFamilyStore.this, truncatedAt, replayAfter);
logger.debug("cleaning out row cache");
for (RowCacheKey key : CacheService.instance.rowCache.getKeySet())
{
if (key.cfId == metadata.cfId)
CacheService.instance.rowCache.remove(key);
}
}
};
runWithCompactionsDisabled(Executors.callable(truncateRunnable), true);
logger.debug("truncate complete");
}
示例15: testRoundTrip
import org.apache.cassandra.cache.RowCacheKey; //导入依赖的package包/类
@Test
public void testRoundTrip() throws Exception
{
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
String cf = "CachedIntCF";
ColumnFamilyStore cachedStore = keyspace.getColumnFamilyStore(cf);
long startRowCacheHits = cachedStore.metric.rowCacheHit.getCount();
long startRowCacheOutOfRange = cachedStore.metric.rowCacheHitOutOfRange.getCount();
// empty the row cache
CacheService.instance.invalidateRowCache();
// set global row cache size to 1 MB
CacheService.instance.setRowCacheCapacityInMB(1);
ByteBuffer key = ByteBufferUtil.bytes("rowcachekey");
DecoratedKey dk = cachedStore.decorateKey(key);
RowCacheKey rck = new RowCacheKey(cachedStore.metadata.ksAndCFName, dk);
RowUpdateBuilder rub = new RowUpdateBuilder(cachedStore.metadata, System.currentTimeMillis(), key);
rub.clustering(String.valueOf(0));
rub.add("val", ByteBufferUtil.bytes("val" + 0));
rub.build().applyUnsafe();
// populate row cache, we should not get a row cache hit;
Util.getAll(Util.cmd(cachedStore, dk).withLimit(1).build());
assertEquals(startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
// do another query, limit is 20, which is < 100 that we cache, we should get a hit and it should be in range
Util.getAll(Util.cmd(cachedStore, dk).withLimit(1).build());
assertEquals(++startRowCacheHits, cachedStore.metric.rowCacheHit.getCount());
assertEquals(startRowCacheOutOfRange, cachedStore.metric.rowCacheHitOutOfRange.getCount());
CachedPartition cachedCf = (CachedPartition)CacheService.instance.rowCache.get(rck);
assertEquals(1, cachedCf.rowCount());
for (Unfiltered unfiltered : Util.once(cachedCf.unfilteredIterator(ColumnFilter.selection(cachedCf.columns()), Slices.ALL, false)))
{
Row r = (Row) unfiltered;
for (ColumnData c : r)
{
assertEquals(((Cell)c).value(), ByteBufferUtil.bytes("val" + 0));
}
}
cachedStore.truncateBlocking();
}