本文整理汇总了Java中org.apache.cassandra.utils.concurrent.OpOrder.Barrier方法的典型用法代码示例。如果您正苦于以下问题:Java OpOrder.Barrier方法的具体用法?Java OpOrder.Barrier怎么用?Java OpOrder.Barrier使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.utils.concurrent.OpOrder
的用法示例。
在下文中一共展示了OpOrder.Barrier方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setDiscarding
import org.apache.cassandra.utils.concurrent.OpOrder; //导入方法依赖的package包/类
void setDiscarding(OpOrder.Barrier writeBarrier, AtomicReference<ReplayPosition> lastReplayPosition)
{
assert this.writeBarrier == null;
this.lastReplayPosition = lastReplayPosition;
this.writeBarrier = writeBarrier;
allocator.setDiscarding();
}
示例2: accepts
import org.apache.cassandra.utils.concurrent.OpOrder; //导入方法依赖的package包/类
public boolean accepts(OpOrder.Group opGroup, ReplayPosition replayPosition)
{
// if the barrier hasn't been set yet, then this memtable is still taking ALL writes
OpOrder.Barrier barrier = this.writeBarrier;
if (barrier == null)
return true;
// if the barrier has been set, but is in the past, we are definitely destined for a future memtable
if (!barrier.isAfter(opGroup))
return false;
// if we aren't durable we are directed only by the barrier
if (replayPosition == null)
return true;
while (true)
{
// otherwise we check if we are in the past/future wrt the CL boundary;
// if the boundary hasn't been finalised yet, we simply update it to the max of
// its current value and ours; if it HAS been finalised, we simply accept its judgement
// this permits us to coordinate a safe boundary, as the boundary choice is made
// atomically wrt our max() maintenance, so an operation cannot sneak into the past
ReplayPosition currentLast = lastReplayPosition.get();
if (currentLast instanceof LastReplayPosition)
return currentLast.compareTo(replayPosition) >= 0;
if (currentLast != null && currentLast.compareTo(replayPosition) >= 0)
return true;
if (lastReplayPosition.compareAndSet(currentLast, replayPosition))
return true;
}
}
示例3: tidy
import org.apache.cassandra.utils.concurrent.OpOrder; //导入方法依赖的package包/类
public void tidy()
{
// don't try to cleanup if the sstablereader was never fully constructed
if (!setup)
return;
final ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(metadata.cfId);
final OpOrder.Barrier barrier;
if (cfs != null)
{
barrier = cfs.readOrdering.newBarrier();
barrier.issue();
}
else
barrier = null;
ScheduledExecutors.nonPeriodicTasks.execute(new Runnable()
{
public void run()
{
if (barrier != null)
barrier.await();
bf.close();
if (summary != null)
summary.close();
if (runOnClose != null)
runOnClose.run();
dfile.close();
ifile.close();
typeRef.release();
}
});
}
示例4: setDiscarding
import org.apache.cassandra.utils.concurrent.OpOrder; //导入方法依赖的package包/类
@VisibleForTesting
public void setDiscarding(OpOrder.Barrier writeBarrier, AtomicReference<ReplayPosition> lastReplayPosition)
{
assert this.writeBarrier == null;
this.commitLogUpperBound = lastReplayPosition;
this.writeBarrier = writeBarrier;
allocator.setDiscarding();
}
示例5: accepts
import org.apache.cassandra.utils.concurrent.OpOrder; //导入方法依赖的package包/类
public boolean accepts(OpOrder.Group opGroup, ReplayPosition replayPosition)
{
// if the barrier hasn't been set yet, then this memtable is still taking ALL writes
OpOrder.Barrier barrier = this.writeBarrier;
if (barrier == null)
return true;
// if the barrier has been set, but is in the past, we are definitely destined for a future memtable
if (!barrier.isAfter(opGroup))
return false;
// if we aren't durable we are directed only by the barrier
if (replayPosition == null)
return true;
while (true)
{
// otherwise we check if we are in the past/future wrt the CL boundary;
// if the boundary hasn't been finalised yet, we simply update it to the max of
// its current value and ours; if it HAS been finalised, we simply accept its judgement
// this permits us to coordinate a safe boundary, as the boundary choice is made
// atomically wrt our max() maintenance, so an operation cannot sneak into the past
ReplayPosition currentLast = commitLogUpperBound.get();
if (currentLast instanceof LastReplayPosition)
return currentLast.compareTo(replayPosition) >= 0;
if (currentLast != null && currentLast.compareTo(replayPosition) >= 0)
return true;
if (commitLogUpperBound.compareAndSet(currentLast, replayPosition))
return true;
}
}
示例6: PostFlush
import org.apache.cassandra.utils.concurrent.OpOrder; //导入方法依赖的package包/类
private PostFlush(boolean flushSecondaryIndexes, OpOrder.Barrier writeBarrier,
List<Memtable> memtables)
{
this.writeBarrier = writeBarrier;
this.flushSecondaryIndexes = flushSecondaryIndexes;
this.memtables = memtables;
}
示例7: reclaim
import org.apache.cassandra.utils.concurrent.OpOrder; //导入方法依赖的package包/类
private void reclaim(final Memtable memtable)
{
// issue a read barrier for reclaiming the memory, and offload the wait to another thread
final OpOrder.Barrier readBarrier = readOrdering.newBarrier();
readBarrier.issue();
reclaimExecutor.execute(new WrappedRunnable()
{
public void runMayThrow() throws InterruptedException, ExecutionException
{
readBarrier.await();
memtable.setDiscarded();
}
});
}
示例8: tidy
import org.apache.cassandra.utils.concurrent.OpOrder; //导入方法依赖的package包/类
public void tidy()
{
// don't try to cleanup if the sstablereader was never fully constructed
if (!setup)
return;
final ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(metadata.cfId);
final OpOrder.Barrier barrier;
if (cfs != null)
{
barrier = cfs.readOrdering.newBarrier();
barrier.issue();
}
else
barrier = null;
ScheduledExecutors.nonPeriodicTasks.execute(new Runnable()
{
public void run()
{
if (barrier != null)
barrier.await();
if (bf != null)
bf.close();
if (summary != null)
summary.close();
if (runOnClose != null)
runOnClose.run();
if (dfile != null)
dfile.close();
if (ifile != null)
ifile.close();
globalRef.release();
}
});
}
示例9: testMemtableReplacement
import org.apache.cassandra.utils.concurrent.OpOrder; //导入方法依赖的package包/类
@Test
public void testMemtableReplacement()
{
boolean backups = DatabaseDescriptor.isIncrementalBackupsEnabled();
DatabaseDescriptor.setIncrementalBackupsEnabled(false);
ColumnFamilyStore cfs = MockSchema.newCFS();
MockListener listener = new MockListener(false);
Tracker tracker = cfs.getTracker();
tracker.subscribe(listener);
Memtable prev1 = tracker.switchMemtable(true, new Memtable(new AtomicReference<>(CommitLog.instance.getContext()), cfs));
OpOrder.Group write1 = cfs.keyspace.writeOrder.getCurrent();
OpOrder.Barrier barrier1 = cfs.keyspace.writeOrder.newBarrier();
prev1.setDiscarding(barrier1, new AtomicReference<>(CommitLog.instance.getContext()));
barrier1.issue();
Memtable prev2 = tracker.switchMemtable(false, new Memtable(new AtomicReference<>(CommitLog.instance.getContext()), cfs));
OpOrder.Group write2 = cfs.keyspace.writeOrder.getCurrent();
OpOrder.Barrier barrier2 = cfs.keyspace.writeOrder.newBarrier();
prev2.setDiscarding(barrier2, new AtomicReference<>(CommitLog.instance.getContext()));
barrier2.issue();
Memtable cur = tracker.getView().getCurrentMemtable();
OpOrder.Group writecur = cfs.keyspace.writeOrder.getCurrent();
Assert.assertEquals(prev1, tracker.getMemtableFor(write1, ReplayPosition.NONE));
Assert.assertEquals(prev2, tracker.getMemtableFor(write2, ReplayPosition.NONE));
Assert.assertEquals(cur, tracker.getMemtableFor(writecur, ReplayPosition.NONE));
Assert.assertEquals(1, listener.received.size());
Assert.assertTrue(listener.received.get(0) instanceof MemtableRenewedNotification);
listener.received.clear();
tracker.markFlushing(prev2);
Assert.assertEquals(1, tracker.getView().flushingMemtables.size());
Assert.assertTrue(tracker.getView().flushingMemtables.contains(prev2));
tracker.markFlushing(prev1);
Assert.assertTrue(tracker.getView().flushingMemtables.contains(prev1));
Assert.assertEquals(2, tracker.getView().flushingMemtables.size());
tracker.replaceFlushed(prev1, Collections.emptyList());
Assert.assertEquals(1, tracker.getView().flushingMemtables.size());
Assert.assertTrue(tracker.getView().flushingMemtables.contains(prev2));
SSTableReader reader = MockSchema.sstable(0, 10, false, cfs);
tracker.replaceFlushed(prev2, Collections.singleton(reader));
Assert.assertEquals(1, tracker.getView().sstables.size());
Assert.assertEquals(1, listener.received.size());
Assert.assertEquals(singleton(reader), ((SSTableAddedNotification) listener.received.get(0)).added);
listener.received.clear();
Assert.assertTrue(reader.isKeyCacheSetup());
Assert.assertEquals(10, cfs.metric.liveDiskSpaceUsed.getCount());
// test invalidated CFS
cfs = MockSchema.newCFS();
tracker = cfs.getTracker();
listener = new MockListener(false);
tracker.subscribe(listener);
prev1 = tracker.switchMemtable(false, new Memtable(new AtomicReference<>(CommitLog.instance.getContext()), cfs));
tracker.markFlushing(prev1);
reader = MockSchema.sstable(0, 10, true, cfs);
cfs.invalidate(false);
tracker.replaceFlushed(prev1, singleton(reader));
Assert.assertEquals(0, tracker.getView().sstables.size());
Assert.assertEquals(0, tracker.getView().flushingMemtables.size());
Assert.assertEquals(0, cfs.metric.liveDiskSpaceUsed.getCount());
Assert.assertEquals(3, listener.received.size());
Assert.assertEquals(singleton(reader), ((SSTableAddedNotification) listener.received.get(0)).added);
Assert.assertTrue(listener.received.get(1) instanceof SSTableDeletingNotification);
Assert.assertEquals(1, ((SSTableListChangedNotification) listener.received.get(2)).removed.size());
DatabaseDescriptor.setIncrementalBackupsEnabled(backups);
}