本文整理汇总了Java中org.apache.cassandra.db.commitlog.ReplayPosition类的典型用法代码示例。如果您正苦于以下问题:Java ReplayPosition类的具体用法?Java ReplayPosition怎么用?Java ReplayPosition使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ReplayPosition类属于org.apache.cassandra.db.commitlog包,在下文中一共展示了ReplayPosition类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: FlushRunnable
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
FlushRunnable(ReplayPosition context)
{
this.context = context;
long keySize = 0;
for (RowPosition key : rows.keySet())
{
// make sure we don't write non-sensical keys
assert key instanceof DecoratedKey;
keySize += ((DecoratedKey)key).getKey().remaining();
}
estimatedSize = (long) ((keySize // index entries
+ keySize // keys in data file
+ liveDataSize.get()) // data
* 1.2); // bloom filter and row index overhead
}
示例2: discardSSTables
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
/**
* Discard all SSTables that were created before given timestamp.
*
* Caller should first ensure that comapctions have quiesced.
*
* @param truncatedAt The timestamp of the truncation
* (all SSTables before that timestamp are going be marked as compacted)
*
* @return the most recent replay position of the truncated data
*/
public ReplayPosition discardSSTables(long truncatedAt)
{
assert data.getCompacting().isEmpty() : data.getCompacting();
List<SSTableReader> truncatedSSTables = new ArrayList<SSTableReader>();
for (SSTableReader sstable : getSSTables())
{
if (!sstable.newSince(truncatedAt))
truncatedSSTables.add(sstable);
}
if (truncatedSSTables.isEmpty())
return ReplayPosition.NONE;
markObsolete(truncatedSSTables, OperationType.UNKNOWN);
return ReplayPosition.getReplayPosition(truncatedSSTables);
}
示例3: getMemtableFor
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
public Memtable getMemtableFor(OpOrder.Group opGroup, ReplayPosition replayPosition)
{
// since any new memtables appended to the list after we fetch it will be for operations started
// after us, we can safely assume that we will always find the memtable that 'accepts' us;
// if the barrier for any memtable is set whilst we are reading the list, it must accept us.
// there may be multiple memtables in the list that would 'accept' us, however we only ever choose
// the oldest such memtable, as accepts() only prevents us falling behind (i.e. ensures we don't
// assign operations to a memtable that was retired/queued before we started)
for (Memtable memtable : view.get().liveMemtables)
{
if (memtable.accepts(opGroup, replayPosition))
return memtable;
}
throw new AssertionError(view.get().liveMemtables.toString());
}
示例4: defaultStatsMetadata
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
public static StatsMetadata defaultStatsMetadata()
{
return new StatsMetadata(defaultRowSizeHistogram(),
defaultColumnCountHistogram(),
ReplayPosition.NONE,
Long.MIN_VALUE,
Long.MAX_VALUE,
Integer.MAX_VALUE,
NO_COMPRESSION_RATIO,
defaultTombstoneDropTimeHistogram(),
0,
Collections.<ByteBuffer>emptyList(),
Collections.<ByteBuffer>emptyList(),
true,
ActiveRepairService.UNREPAIRED_SSTABLE);
}
示例5: MetadataCollector
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
public MetadataCollector(Collection<SSTableReader> sstables, CellNameType columnNameComparator, int level)
{
this(columnNameComparator);
replayPosition(ReplayPosition.getReplayPosition(sstables));
sstableLevel(level);
// Get the max timestamp of the precompacted sstables
// and adds generation of live ancestors
for (SSTableReader sstable : sstables)
{
addAncestor(sstable.descriptor.generation);
for (Integer i : sstable.getAncestors())
if (new File(sstable.descriptor.withGeneration(i).filenameFor(Component.DATA)).exists())
addAncestor(i);
}
}
示例6: FlushRunnable
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
FlushRunnable(CountDownLatch latch, Future<ReplayPosition> context)
{
this.latch = latch;
this.context = context;
long keySize = 0;
for (RowPosition key : rows.keySet())
{
// make sure we don't write non-sensical keys
assert key instanceof DecoratedKey;
keySize += ((DecoratedKey)key).key.remaining();
}
estimatedSize = (long) ((keySize // index entries
+ keySize // keys in data file
+ currentSize.get()) // data
* 1.2); // bloom filter and row index overhead
}
示例7: truncationAsMapEntry
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
private static String truncationAsMapEntry(ColumnFamilyStore cfs, long truncatedAt, ReplayPosition position)
{
DataOutputBuffer out = new DataOutputBuffer();
try
{
ReplayPosition.serializer.serialize(position, out);
out.writeLong(truncatedAt);
}
catch (IOException e)
{
throw new RuntimeException(e);
}
return String.format("{%s: 0x%s}",
cfs.metadata.cfId,
ByteBufferUtil.bytesToHex(ByteBuffer.wrap(out.getData(), 0, out.getLength())));
}
示例8: getTruncationRecords
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
public static Map<UUID, Pair<ReplayPosition, Long>> getTruncationRecords()
{
String req = "SELECT truncated_at FROM system.%s WHERE key = '%s'";
UntypedResultSet rows = processInternal(String.format(req, LOCAL_CF, LOCAL_KEY));
if (rows.isEmpty())
return Collections.emptyMap();
UntypedResultSet.Row row = rows.one();
Map<UUID, ByteBuffer> rawMap = row.getMap("truncated_at", UUIDType.instance, BytesType.instance);
if (rawMap == null)
return Collections.emptyMap();
Map<UUID, Pair<ReplayPosition, Long>> positions = new HashMap<UUID, Pair<ReplayPosition, Long>>();
for (Map.Entry<UUID, ByteBuffer> entry : rawMap.entrySet())
positions.put(entry.getKey(), truncationRecordFromBlob(entry.getValue()));
return positions;
}
示例9: SSTableMetadata
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
private SSTableMetadata()
{
this(defaultRowSizeHistogram(),
defaultColumnCountHistogram(),
ReplayPosition.NONE,
Long.MIN_VALUE,
Long.MAX_VALUE,
Integer.MAX_VALUE,
NO_BLOOM_FLITER_FP_CHANCE,
NO_COMPRESSION_RATIO,
null,
defaultTombstoneDropTimeHistogram(),
0,
Collections.<ByteBuffer>emptyList(),
Collections.<ByteBuffer>emptyList());
}
示例10: createCollector
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
public static Collector createCollector(Collection<SSTableReader> sstables, AbstractType<?> columnNameComparator, int level)
{
Collector collector = new Collector(columnNameComparator);
collector.replayPosition(ReplayPosition.getReplayPosition(sstables));
collector.sstableLevel(level);
// Get the max timestamp of the precompacted sstables
// and adds generation of live ancestors
for (SSTableReader sstable : sstables)
{
collector.addAncestor(sstable.descriptor.generation);
for (Integer i : sstable.getAncestors())
{
if (new File(sstable.descriptor.withGeneration(i).filenameFor(Component.DATA)).exists())
collector.addAncestor(i);
}
}
return collector;
}
示例11: legacySerialize
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
/**
* Used to serialize to an old version - needed to be able to update sstable level without a full compaction.
*
* @deprecated will be removed when it is assumed that the minimum upgrade-from-version is the version that this
* patch made it into
*
* @param sstableStats
* @param legacyDesc
* @param out
* @throws IOException
*/
@Deprecated
public void legacySerialize(SSTableMetadata sstableStats, Set<Integer> ancestors, Descriptor legacyDesc, DataOutput out) throws IOException
{
EstimatedHistogram.serializer.serialize(sstableStats.estimatedRowSize, out);
EstimatedHistogram.serializer.serialize(sstableStats.estimatedColumnCount, out);
ReplayPosition.serializer.serialize(sstableStats.replayPosition, out);
out.writeLong(sstableStats.minTimestamp);
out.writeLong(sstableStats.maxTimestamp);
if (legacyDesc.version.tracksMaxLocalDeletionTime)
out.writeInt(sstableStats.maxLocalDeletionTime);
if (legacyDesc.version.hasBloomFilterFPChance)
out.writeDouble(sstableStats.bloomFilterFPChance);
out.writeDouble(sstableStats.compressionRatio);
out.writeUTF(sstableStats.partitioner);
out.writeInt(ancestors.size());
for (Integer g : ancestors)
out.writeInt(g);
StreamingHistogram.serializer.serialize(sstableStats.estimatedTombstoneDropTime, out);
out.writeInt(sstableStats.sstableLevel);
if (legacyDesc.version.tracksMaxMinColumnNames)
serializeMinMaxColumnNames(sstableStats.minColumnNames, sstableStats.maxColumnNames, out);
}
示例12: getMemtableFor
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
/**
* get the Memtable that the ordered writeOp should be directed to
*/
public Memtable getMemtableFor(OpOrder.Group opGroup, ReplayPosition replayPosition)
{
// since any new memtables appended to the list after we fetch it will be for operations started
// after us, we can safely assume that we will always find the memtable that 'accepts' us;
// if the barrier for any memtable is set whilst we are reading the list, it must accept us.
// there may be multiple memtables in the list that would 'accept' us, however we only ever choose
// the oldest such memtable, as accepts() only prevents us falling behind (i.e. ensures we don't
// assign operations to a memtable that was retired/queued before we started)
for (Memtable memtable : view.get().liveMemtables)
{
if (memtable.accepts(opGroup, replayPosition))
return memtable;
}
throw new AssertionError(view.get().liveMemtables.toString());
}
示例13: waitForFlushes
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
/**
* @return a Future yielding the commit log position that can be guaranteed to have been successfully written
* to sstables for this table once the future completes
*/
private ListenableFuture<ReplayPosition> waitForFlushes()
{
// we grab the current memtable; once any preceding memtables have flushed, we know its
// commitLogLowerBound has been set (as this it is set with the upper bound of the preceding memtable)
final Memtable current = data.getView().getCurrentMemtable();
ListenableFutureTask<ReplayPosition> task = ListenableFutureTask.create(new Callable<ReplayPosition>()
{
public ReplayPosition call()
{
logger.debug("forceFlush requested but everything is clean in {}", name);
return current.getCommitLogLowerBound();
}
});
postFlushExecutor.execute(task);
return task;
}
示例14: apply
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
/**
* Insert/Update the column family for this key.
* Caller is responsible for acquiring Keyspace.switchLock
* param @ lock - lock that needs to be used.
* param @ key - key for update/insert
* param @ columnFamily - columnFamily changes
*/
public void apply(PartitionUpdate update, UpdateTransaction indexer, OpOrder.Group opGroup, ReplayPosition replayPosition)
{
long start = System.nanoTime();
Memtable mt = data.getMemtableFor(opGroup, replayPosition);
try
{
long timeDelta = mt.put(update, indexer, opGroup);
DecoratedKey key = update.partitionKey();
maybeUpdateRowCache(key);
metric.samplers.get(Sampler.WRITES).addSample(key.getKey(), key.hashCode(), 1);
metric.writeLatency.addNano(System.nanoTime() - start);
if(timeDelta < Long.MAX_VALUE)
metric.colUpdateTimeDeltaHistogram.update(timeDelta);
}
catch (RuntimeException e)
{
throw new RuntimeException(e.getMessage()
+ " for ks: "
+ keyspace.getName() + ", table: " + name, e);
}
}
示例15: discardSSTables
import org.apache.cassandra.db.commitlog.ReplayPosition; //导入依赖的package包/类
/**
* Discard all SSTables that were created before given timestamp. Caller is responsible to obtain compactionLock.
*
* @param truncatedAt The timestamp of the truncation
* (all SSTables before that timestamp are going be marked as compacted)
*
* @return the most recent replay position of the truncated data
*/
public ReplayPosition discardSSTables(long truncatedAt)
{
List<SSTableReader> truncatedSSTables = new ArrayList<SSTableReader>();
for (SSTableReader sstable : getSSTables())
{
if (!sstable.newSince(truncatedAt))
truncatedSSTables.add(sstable);
}
if (truncatedSSTables.isEmpty())
return ReplayPosition.NONE;
markCompacted(truncatedSSTables, OperationType.UNKNOWN);
return ReplayPosition.getReplayPosition(truncatedSSTables);
}