本文整理汇总了Java中org.apache.cassandra.db.commitlog.ReplayPosition.NONE属性的典型用法代码示例。如果您正苦于以下问题:Java ReplayPosition.NONE属性的具体用法?Java ReplayPosition.NONE怎么用?Java ReplayPosition.NONE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.cassandra.db.commitlog.ReplayPosition
的用法示例。
在下文中一共展示了ReplayPosition.NONE属性的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: defaultStatsMetadata
public static StatsMetadata defaultStatsMetadata()
{
return new StatsMetadata(defaultRowSizeHistogram(),
defaultColumnCountHistogram(),
ReplayPosition.NONE,
Long.MIN_VALUE,
Long.MAX_VALUE,
Integer.MAX_VALUE,
NO_COMPRESSION_RATIO,
defaultTombstoneDropTimeHistogram(),
0,
Collections.<ByteBuffer>emptyList(),
Collections.<ByteBuffer>emptyList(),
true,
ActiveRepairService.UNREPAIRED_SSTABLE);
}
示例2: SSTableMetadata
private SSTableMetadata()
{
this(defaultRowSizeHistogram(),
defaultColumnCountHistogram(),
ReplayPosition.NONE,
Long.MIN_VALUE,
Long.MAX_VALUE,
Integer.MAX_VALUE,
NO_BLOOM_FLITER_FP_CHANCE,
NO_COMPRESSION_RATIO,
null,
defaultTombstoneDropTimeHistogram(),
0,
Collections.<ByteBuffer>emptyList(),
Collections.<ByteBuffer>emptyList());
}
示例3: discardSSTables
/**
* Discard all SSTables that were created before given timestamp. Caller is responsible to obtain compactionLock.
*
* @param truncatedAt The timestamp of the truncation
* (all SSTables before that timestamp are going be marked as compacted)
*
* @return the most recent replay position of the truncated data
*/
public ReplayPosition discardSSTables(long truncatedAt)
{
List<SSTableReader> truncatedSSTables = new ArrayList<SSTableReader>();
for (SSTableReader sstable : getSSTables())
{
if (!sstable.newSince(truncatedAt))
truncatedSSTables.add(sstable);
}
if (truncatedSSTables.isEmpty())
return ReplayPosition.NONE;
markCompacted(truncatedSSTables, OperationType.UNKNOWN);
return ReplayPosition.getReplayPosition(truncatedSSTables);
}
示例4: SSTableMetadata
private SSTableMetadata()
{
this(defaultRowSizeHistogram(),
defaultColumnCountHistogram(),
ReplayPosition.NONE,
Long.MIN_VALUE,
Long.MAX_VALUE,
NO_COMPRESSION_RATIO,
null,
defaultTombstoneDropTimeHistogram());
}
示例5: deserialize
public Pair<SSTableMetadata, Set<Integer>> deserialize(DataInputStream dis, Descriptor desc) throws IOException
{
EstimatedHistogram rowSizes = EstimatedHistogram.serializer.deserialize(dis);
EstimatedHistogram columnCounts = EstimatedHistogram.serializer.deserialize(dis);
ReplayPosition replayPosition = desc.version.metadataIncludesReplayPosition
? ReplayPosition.serializer.deserialize(dis)
: ReplayPosition.NONE;
if (!desc.version.metadataIncludesModernReplayPosition)
{
// replay position may be "from the future" thanks to older versions generating them with nanotime.
// make sure we don't omit replaying something that we should. see CASSANDRA-4782
replayPosition = ReplayPosition.NONE;
}
long minTimestamp = desc.version.tracksMinTimestamp ? dis.readLong() : Long.MIN_VALUE;
long maxTimestamp = desc.version.containsTimestamp() ? dis.readLong() : Long.MAX_VALUE;
if (!desc.version.tracksMaxTimestamp) // see javadoc to Descriptor.containsTimestamp
maxTimestamp = Long.MAX_VALUE;
double compressionRatio = desc.version.hasCompressionRatio
? dis.readDouble()
: NO_COMPRESSION_RATIO;
String partitioner = desc.version.hasPartitioner ? dis.readUTF() : null;
int nbAncestors = desc.version.hasAncestors ? dis.readInt() : 0;
Set<Integer> ancestors = new HashSet<Integer>(nbAncestors);
for (int i = 0; i < nbAncestors; i++)
ancestors.add(dis.readInt());
StreamingHistogram tombstoneHistogram = desc.version.tracksTombstones
? StreamingHistogram.serializer.deserialize(dis)
: defaultTombstoneDropTimeHistogram();
return Pair.create(new SSTableMetadata(rowSizes,
columnCounts,
replayPosition,
minTimestamp,
maxTimestamp,
compressionRatio,
partitioner,
tombstoneHistogram),
ancestors);
}
示例6: defaultStatsMetadata
public static StatsMetadata defaultStatsMetadata()
{
return new StatsMetadata(defaultRowSizeHistogram(),
defaultColumnCountHistogram(),
ReplayPosition.NONE,
Long.MIN_VALUE,
Long.MAX_VALUE,
Integer.MAX_VALUE,
NO_COMPRESSION_RATIO,
defaultTombstoneDropTimeHistogram(),
0,
Collections.<ByteBuffer>emptyList(),
Collections.<ByteBuffer>emptyList());
}
示例7: call
public ReplayPosition call()
{
writeBarrier.await();
/**
* we can flush 2is as soon as the barrier completes, as they will be consistent with (or ahead of) the
* flushed memtables and CL position, which is as good as we can guarantee.
* TODO: SecondaryIndex should support setBarrier(), so custom implementations can co-ordinate exactly
* with CL as we do with memtables/CFS-backed SecondaryIndexes.
*/
if (flushSecondaryIndexes)
indexManager.flushAllNonCFSBackedIndexesBlocking();
try
{
// we wait on the latch for the commitLogUpperBound to be set, and so that waiters
// on this task can rely on all prior flushes being complete
latch.await();
}
catch (InterruptedException e)
{
throw new IllegalStateException();
}
ReplayPosition commitLogUpperBound = ReplayPosition.NONE;
// If a flush errored out but the error was ignored, make sure we don't discard the commit log.
if (flushFailure == null && !memtables.isEmpty())
{
Memtable memtable = memtables.get(0);
commitLogUpperBound = memtable.getCommitLogUpperBound();
CommitLog.instance.discardCompletedSegments(metadata.cfId, memtable.getCommitLogLowerBound(), commitLogUpperBound);
}
metric.pendingFlushes.dec();
if (flushFailure != null)
throw flushFailure;
return commitLogUpperBound;
}
示例8: deserialize
public StatsMetadata deserialize(Version version, DataInputPlus in) throws IOException
{
EstimatedHistogram partitionSizes = EstimatedHistogram.serializer.deserialize(in);
EstimatedHistogram columnCounts = EstimatedHistogram.serializer.deserialize(in);
ReplayPosition commitLogLowerBound = ReplayPosition.NONE, commitLogUpperBound;
commitLogUpperBound = ReplayPosition.serializer.deserialize(in);
long minTimestamp = in.readLong();
long maxTimestamp = in.readLong();
// We use MAX_VALUE as that's the default value for "no deletion time"
int minLocalDeletionTime = version.storeRows() ? in.readInt() : Integer.MAX_VALUE;
int maxLocalDeletionTime = in.readInt();
int minTTL = version.storeRows() ? in.readInt() : 0;
int maxTTL = version.storeRows() ? in.readInt() : Integer.MAX_VALUE;
double compressionRatio = in.readDouble();
StreamingHistogram tombstoneHistogram = StreamingHistogram.serializer.deserialize(in);
int sstableLevel = in.readInt();
long repairedAt = 0;
if (version.hasRepairedAt())
repairedAt = in.readLong();
int colCount = in.readInt();
List<ByteBuffer> minClusteringValues = new ArrayList<>(colCount);
for (int i = 0; i < colCount; i++)
minClusteringValues.add(ByteBufferUtil.readWithShortLength(in));
colCount = in.readInt();
List<ByteBuffer> maxClusteringValues = new ArrayList<>(colCount);
for (int i = 0; i < colCount; i++)
maxClusteringValues.add(ByteBufferUtil.readWithShortLength(in));
boolean hasLegacyCounterShards = true;
if (version.tracksLegacyCounterShards())
hasLegacyCounterShards = in.readBoolean();
long totalColumnsSet = version.storeRows() ? in.readLong() : -1L;
long totalRows = version.storeRows() ? in.readLong() : -1L;
if (version.hasCommitLogLowerBound())
commitLogLowerBound = ReplayPosition.serializer.deserialize(in);
IntervalSet<ReplayPosition> commitLogIntervals;
if (version.hasCommitLogIntervals())
commitLogIntervals = replayPositionSetSerializer.deserialize(in);
else
commitLogIntervals = new IntervalSet<ReplayPosition>(commitLogLowerBound, commitLogUpperBound);
return new StatsMetadata(partitionSizes,
columnCounts,
commitLogIntervals,
minTimestamp,
maxTimestamp,
minLocalDeletionTime,
maxLocalDeletionTime,
minTTL,
maxTTL,
compressionRatio,
tombstoneHistogram,
sstableLevel,
minClusteringValues,
maxClusteringValues,
hasLegacyCounterShards,
repairedAt,
totalColumnsSet,
totalRows);
}