本文整理汇总了Java中org.apache.cassandra.db.columniterator.OnDiskAtomIterator类的典型用法代码示例。如果您正苦于以下问题:Java OnDiskAtomIterator类的具体用法?Java OnDiskAtomIterator怎么用?Java OnDiskAtomIterator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
OnDiskAtomIterator类属于org.apache.cassandra.db.columniterator包,在下文中一共展示了OnDiskAtomIterator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: LazilyCompactedRow
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public LazilyCompactedRow(CompactionController controller, List<? extends OnDiskAtomIterator> rows)
{
super(rows.get(0).getKey());
this.rows = rows;
this.controller = controller;
indexer = controller.cfs.indexManager.gcUpdaterFor(key);
// Combine top-level tombstones, keeping the one with the highest markedForDeleteAt timestamp. This may be
// purged (depending on gcBefore), but we need to remember it to properly delete columns during the merge
maxRowTombstone = DeletionTime.LIVE;
for (OnDiskAtomIterator row : rows)
{
DeletionTime rowTombstone = row.getColumnFamily().deletionInfo().getTopLevelDeletion();
if (maxRowTombstone.compareTo(rowTombstone) < 0)
maxRowTombstone = rowTombstone;
}
emptyColumnFamily = ArrayBackedSortedColumns.factory.create(controller.cfs.metadata);
emptyColumnFamily.delete(maxRowTombstone);
if (!maxRowTombstone.isLive() && maxRowTombstone.markedForDeleteAt < getMaxPurgeableTimestamp())
emptyColumnFamily.purgeTombstones(controller.gcBefore);
reducer = new Reducer();
merger = Iterators.filter(MergeIterator.get(rows, emptyColumnFamily.getComparator().onDiskAtomComparator(), reducer), Predicates.notNull());
}
示例2: assertContentEquals
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public static void assertContentEquals(OnDiskAtomIterator lhs, OnDiskAtomIterator rhs)
{
assertEquals(lhs.getKey(), rhs.getKey());
// check metadata
ColumnFamily lcf = lhs.getColumnFamily();
ColumnFamily rcf = rhs.getColumnFamily();
if (lcf == null)
{
if (rcf == null)
return;
throw new AssertionError("LHS had no content for " + rhs.getKey());
}
else if (rcf == null)
throw new AssertionError("RHS had no content for " + lhs.getKey());
assertEquals(lcf.deletionInfo(), rcf.deletionInfo());
// iterate columns
while (lhs.hasNext())
{
Cell clhs = (Cell)lhs.next();
assert rhs.hasNext() : "LHS contained more columns than RHS for " + lhs.getKey();
Cell crhs = (Cell)rhs.next();
assertEquals("Mismatched columns for " + lhs.getKey(), clhs, crhs);
}
assert !rhs.hasNext() : "RHS contained more columns than LHS for " + lhs.getKey();
}
示例3: LazilyCompactedRow
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public LazilyCompactedRow(CompactionController controller, List<? extends OnDiskAtomIterator> rows)
{
super(rows.get(0).getKey());
this.rows = rows;
this.controller = controller;
indexer = controller.cfs.indexManager.updaterFor(key);
maxDelTimestamp = Long.MIN_VALUE;
for (OnDiskAtomIterator row : rows)
{
ColumnFamily cf = row.getColumnFamily();
maxDelTimestamp = Math.max(maxDelTimestamp, cf.deletionInfo().maxTimestamp());
if (emptyColumnFamily == null)
emptyColumnFamily = cf;
else
emptyColumnFamily.delete(cf);
}
this.shouldPurge = controller.shouldPurge(key, maxDelTimestamp);
}
示例4: assertContentEquals
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public static void assertContentEquals(OnDiskAtomIterator lhs, OnDiskAtomIterator rhs) throws IOException
{
assertEquals(lhs.getKey(), rhs.getKey());
// check metadata
ColumnFamily lcf = lhs.getColumnFamily();
ColumnFamily rcf = rhs.getColumnFamily();
if (lcf == null)
{
if (rcf == null)
return;
throw new AssertionError("LHS had no content for " + rhs.getKey());
}
else if (rcf == null)
throw new AssertionError("RHS had no content for " + lhs.getKey());
assertEquals(lcf.deletionInfo(), rcf.deletionInfo());
// iterate columns
while (lhs.hasNext())
{
Column clhs = (Column)lhs.next();
assert rhs.hasNext() : "LHS contained more columns than RHS for " + lhs.getKey();
Column crhs = (Column)rhs.next();
assertEquals("Mismatched columns for " + lhs.getKey(), clhs, crhs);
}
assert !rhs.hasNext() : "RHS contained more columns than LHS for " + lhs.getKey();
}
示例5: assertContentEquals
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public static void assertContentEquals(OnDiskAtomIterator lhs, OnDiskAtomIterator rhs) throws IOException
{
assertEquals(lhs.getKey(), rhs.getKey());
// check metadata
ColumnFamily lcf = lhs.getColumnFamily();
ColumnFamily rcf = rhs.getColumnFamily();
if (lcf == null)
{
if (rcf == null)
return;
throw new AssertionError("LHS had no content for " + rhs.getKey());
}
else if (rcf == null)
throw new AssertionError("RHS had no content for " + lhs.getKey());
assertEquals(lcf.deletionInfo(), rcf.deletionInfo());
// iterate columns
while (lhs.hasNext())
{
IColumn clhs = (IColumn)lhs.next();
assert rhs.hasNext() : "LHS contained more columns than RHS for " + lhs.getKey();
IColumn crhs = (IColumn)rhs.next();
assertEquals("Mismatched columns for " + lhs.getKey(), clhs, crhs);
}
assert !rhs.hasNext() : "RHS contained more columns than LHS for " + lhs.getKey();
}
示例6: assertContentEquals
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public static void assertContentEquals(OnDiskAtomIterator lhs, OnDiskAtomIterator rhs) throws IOException
{
assertEquals(lhs.getKey(), rhs.getKey());
// check metadata
ColumnFamily lcf = lhs.getColumnFamily();
ColumnFamily rcf = rhs.getColumnFamily();
if (lcf == null)
{
if (rcf == null)
return;
throw new AssertionError("LHS had no content for " + rhs.getKey());
}
else if (rcf == null)
throw new AssertionError("RHS had no content for " + lhs.getKey());
assertEquals(lcf.deletionInfo(), rcf.deletionInfo());
// iterate columns
while (lhs.hasNext())
{
Cell clhs = (Cell)lhs.next();
assert rhs.hasNext() : "LHS contained more columns than RHS for " + lhs.getKey();
Cell crhs = (Cell)rhs.next();
assertEquals("Mismatched columns for " + lhs.getKey(), clhs, crhs);
}
assert !rhs.hasNext() : "RHS contained more columns than LHS for " + lhs.getKey();
}
示例7: next
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public OnDiskAtomIterator next()
{
try
{
if (row != null)
dfile.seek(finishedAt);
assert !dfile.isEOF();
// Read data header
DecoratedKey key = sstable.decodeKey(ByteBufferUtil.readWithShortLength(dfile));
long dataSize = SSTableReader.readRowSize(dfile, sstable.descriptor);
long dataStart = dfile.getFilePointer();
finishedAt = dataStart + dataSize;
row = new SSTableIdentityIterator(sstable, dfile, key, dataStart, dataSize);
return row;
}
catch (IOException e)
{
sstable.markSuspect();
throw new CorruptSSTableException(e, dfile.getPath());
}
}
示例8: next
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public OnDiskAtomIterator next()
{
final Map.Entry<DecoratedKey, ColumnFamily> entry = iter.next();
return new LazyColumnIterator(entry.getKey(), new IColumnIteratorFactory()
{
public OnDiskAtomIterator create()
{
return range.columnFilter(entry.getKey().getKey()).getColumnIterator(entry.getKey(), entry.getValue());
}
});
}
示例9: close
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public void close()
{
for (OnDiskAtomIterator row : rows)
{
try
{
row.close();
}
catch (IOException e)
{
throw new RuntimeException(e);
}
}
closed = true;
}
示例10: computeNext
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
protected OnDiskAtomIterator computeNext()
{
if (currentScanner == null)
return endOfData();
try
{
while (true)
{
if (currentScanner.hasNext())
return currentScanner.next();
positionOffset += currentScanner.getLengthInBytes();
currentScanner.close();
if (!sstableIterator.hasNext())
{
// reset to null so getCurrentPosition does not return wrong value
currentScanner = null;
return endOfData();
}
currentScanner = sstableIterator.next().getScanner(range, CompactionManager.instance.getRateLimiter());
}
}
catch (IOException e)
{
throw new RuntimeException(e);
}
}
示例11: getColumnIterator
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public OnDiskAtomIterator getColumnIterator(final DecoratedKey key, final ColumnFamily cf)
{
assert cf != null;
final Iterator<Cell> iter = getColumnIterator(cf);
return new OnDiskAtomIterator()
{
public ColumnFamily getColumnFamily()
{
return cf;
}
public DecoratedKey getKey()
{
return key;
}
public boolean hasNext()
{
return iter.hasNext();
}
public OnDiskAtom next()
{
return iter.next();
}
public void close() throws IOException { }
public void remove()
{
throw new UnsupportedOperationException();
}
};
}
示例12: next
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public OnDiskAtomIterator next()
{
final Map.Entry<DecoratedKey, T> entry = iter.next();
return new LazyColumnIterator(entry.getKey(), new IColumnIteratorFactory()
{
public OnDiskAtomIterator create()
{
return range.columnFilter(entry.getKey().key).getColumnFamilyIterator(entry.getKey(), entry.getValue());
}
});
}
示例13: filterColumnFamily
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
/**
* Filter a cached row, which will not be modified by the filter, but may be modified by throwing out
* tombstones that are no longer relevant.
* The returned column family won't be thread safe.
*/
ColumnFamily filterColumnFamily(ColumnFamily cached, QueryFilter filter)
{
ColumnFamily cf = cached.cloneMeShallow(ArrayBackedSortedColumns.factory, filter.filter.isReversed());
OnDiskAtomIterator ci = filter.getColumnFamilyIterator(cached);
int gcBefore = gcBefore(filter.timestamp);
filter.collateOnDiskAtom(cf, ci, gcBefore);
return removeDeletedCF(cf, gcBefore);
}
示例14: getMemtableColumnIterator
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public OnDiskAtomIterator getMemtableColumnIterator(Memtable memtable)
{
ColumnFamily cf = memtable.getColumnFamily(key);
if (cf == null)
return null;
return getColumnFamilyIterator(cf);
}
示例15: prune
import org.apache.cassandra.db.columniterator.OnDiskAtomIterator; //导入依赖的package包/类
public ColumnFamily prune(DecoratedKey rowKey, ColumnFamily data)
{
if (optimizedFilter == null)
return data;
ColumnFamily pruned = data.cloneMeShallow();
IDiskAtomFilter filter = dataRange.columnFilter(rowKey.key);
OnDiskAtomIterator iter = filter.getColumnFamilyIterator(rowKey, data);
filter.collectReducedColumns(pruned, QueryFilter.gatherTombstones(pruned, iter), cfs.gcBefore(timestamp), timestamp);
return pruned;
}