本文整理汇总了Java中org.apache.cassandra.db.filter.QueryFilter.getIdentityFilter方法的典型用法代码示例。如果您正苦于以下问题:Java QueryFilter.getIdentityFilter方法的具体用法?Java QueryFilter.getIdentityFilter怎么用?Java QueryFilter.getIdentityFilter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.db.filter.QueryFilter
的用法示例。
在下文中一共展示了QueryFilter.getIdentityFilter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getOldLocalCounterIds
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
public static List<CounterId.CounterIdRecord> getOldLocalCounterIds()
{
List<CounterId.CounterIdRecord> l = new ArrayList<CounterId.CounterIdRecord>();
Keyspace keyspace = Keyspace.open(Keyspace.SYSTEM_KS);
QueryFilter filter = QueryFilter.getIdentityFilter(decorate(ALL_LOCAL_NODE_ID_KEY), COUNTER_ID_CF, System.currentTimeMillis());
ColumnFamily cf = keyspace.getColumnFamilyStore(COUNTER_ID_CF).getColumnFamily(filter);
CounterId previous = null;
for (Column c : cf)
{
if (previous != null)
l.add(new CounterId.CounterIdRecord(previous, c.timestamp()));
// this will ignore the last column on purpose since it is the
// current local node id
previous = CounterId.wrap(c.name());
}
return l;
}
示例2: getOldLocalCounterIds
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
public static List<CounterId.CounterIdRecord> getOldLocalCounterIds()
{
List<CounterId.CounterIdRecord> l = new ArrayList<CounterId.CounterIdRecord>();
Table table = Table.open(Table.SYSTEM_KS);
QueryFilter filter = QueryFilter.getIdentityFilter(decorate(ALL_LOCAL_NODE_ID_KEY), new QueryPath(COUNTER_ID_CF));
ColumnFamily cf = table.getColumnFamilyStore(COUNTER_ID_CF).getColumnFamily(filter);
CounterId previous = null;
for (IColumn c : cf)
{
if (previous != null)
l.add(new CounterId.CounterIdRecord(previous, c.timestamp()));
// this will ignore the last column on purpose since it is the
// current local node id
previous = CounterId.wrap(c.name());
}
return l;
}
示例3: testRemoveSubColumnAndContainer
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Test
public void testRemoveSubColumnAndContainer()
{
Keyspace keyspace = Keyspace.open("Keyspace1");
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Super1");
Mutation rm;
DecoratedKey dk = Util.dk("key2");
// add data
rm = new Mutation("Keyspace1", dk.getKey());
Util.addMutation(rm, "Super1", "SC1", 1, "asdf", 0);
rm.apply();
store.forceBlockingFlush();
// remove the SC
ByteBuffer scName = ByteBufferUtil.bytes("SC1");
CellName cname = CellNames.compositeDense(scName, getBytes(1L));
rm = new Mutation("Keyspace1", dk.getKey());
rm.deleteRange("Super1", SuperColumns.startOf(scName), SuperColumns.endOf(scName), 1);
rm.apply();
// Mark current time and make sure the next insert happens at least
// one second after the previous one (since gc resolution is the second)
QueryFilter filter = QueryFilter.getIdentityFilter(dk, "Super1", System.currentTimeMillis());
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
// remove the column itself
rm = new Mutation("Keyspace1", dk.getKey());
rm.delete("Super1", cname, 2);
rm.apply();
ColumnFamily retrieved = store.getColumnFamily(filter);
assertFalse(retrieved.getColumn(cname).isLive());
assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE));
}
示例4: index
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Override
public void index(ByteBuffer rowKey, ColumnFamily cf)
{
QueryFilter filter = QueryFilter.getIdentityFilter(DatabaseDescriptor.getPartitioner().decorateKey(rowKey),
baseCfs.getColumnFamilyName(),
System.currentTimeMillis());
LAST_INDEXED_ROW = cf;
LAST_INDEXED_KEY = rowKey;
}
示例5: validateData
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
private void validateData(ColumnFamilyStore cfs, int numRows)
{
for (int i = 0; i < numRows; i++)
{
DecoratedKey key = Util.dk(String.format("%3d", i));
QueryFilter filter = QueryFilter.getIdentityFilter(key, cfs.getColumnFamilyName(), System.currentTimeMillis());
ColumnFamily row = cfs.getColumnFamily(filter);
assertNotNull(row);
Cell cell = row.getColumn(Util.cellname("column"));
assertNotNull(cell);
assertEquals(100, cell.value().array().length);
}
}
示例6: index
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Override
public void index(ByteBuffer rowKey, ColumnFamily cf)
{
QueryFilter filter = QueryFilter.getIdentityFilter(DatabaseDescriptor.getPartitioner().decorateKey(rowKey),
baseCfs.getColumnFamilyName(),
System.currentTimeMillis());
LAST_INDEXED_ROW = baseCfs.getColumnFamily(filter);
LAST_INDEXED_KEY = rowKey;
}
示例7: row
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
/**
* Returns the CQL3 {@link Row} identified by the specified key pair, using the specified time stamp to ignore
* deleted columns. The {@link Row} is retrieved from the storage engine, so it involves IO operations.
*
* @param partitionKey The partition key.
* @param timestamp The time stamp to ignore deleted columns.
* @return The CQL3 {@link Row} identified by the specified key pair.
*/
private Row row(DecoratedKey partitionKey, long timestamp) {
QueryFilter queryFilter = QueryFilter.getIdentityFilter(partitionKey, metadata.cfName, timestamp);
ColumnFamily columnFamily = baseCfs.getColumnFamily(queryFilter);
if (columnFamily != null) {
ColumnFamily cleanColumnFamily = cleanExpired(columnFamily, timestamp);
return new Row(partitionKey, cleanColumnFamily);
}
return null;
}
示例8: index
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Override
public void index(ByteBuffer rowKey, ColumnFamily cf)
{
QueryFilter filter = QueryFilter.getIdentityFilter(DatabaseDescriptor.getPartitioner().decorateKey(rowKey),
new QueryPath(baseCfs.getColumnFamilyName()));
LAST_INDEXED_ROW = baseCfs.getColumnFamily(filter);
LAST_INDEXED_KEY = rowKey;
}
示例9: testDontPurgeAccidentaly
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
private void testDontPurgeAccidentaly(String k, String cfname) throws InterruptedException
{
// This test catches the regression of CASSANDRA-2786
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfname);
// disable compaction while flushing
cfs.clearUnsafe();
cfs.disableAutoCompaction();
// Add test row
DecoratedKey key = Util.dk(k);
Mutation rm = new Mutation(KEYSPACE1, key.getKey());
rm.add(cfname, Util.cellname(ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes("c")), ByteBufferUtil.EMPTY_BYTE_BUFFER, 0);
rm.apply();
cfs.forceBlockingFlush();
Collection<SSTableReader> sstablesBefore = cfs.getSSTables();
QueryFilter filter = QueryFilter.getIdentityFilter(key, cfname, System.currentTimeMillis());
assertTrue(cfs.getColumnFamily(filter).hasColumns());
// Remove key
rm = new Mutation(KEYSPACE1, key.getKey());
rm.delete(cfname, 2);
rm.apply();
ColumnFamily cf = cfs.getColumnFamily(filter);
assertTrue("should be empty: " + cf, cf == null || !cf.hasColumns());
// Sleep one second so that the removal is indeed purgeable even with gcgrace == 0
Thread.sleep(1000);
cfs.forceBlockingFlush();
Collection<SSTableReader> sstablesAfter = cfs.getSSTables();
Collection<SSTableReader> toCompact = new ArrayList<SSTableReader>();
for (SSTableReader sstable : sstablesAfter)
if (!sstablesBefore.contains(sstable))
toCompact.add(sstable);
Util.compact(cfs, toCompact);
cf = cfs.getColumnFamily(filter);
assertTrue("should be empty: " + cf, cf == null || !cf.hasColumns());
}
示例10: getTopLevelColumnsSkipsSSTablesModifiedBeforeRowDelete
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Test
public void getTopLevelColumnsSkipsSSTablesModifiedBeforeRowDelete()
{
Keyspace keyspace = Keyspace.open("Keyspace1");
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");
Mutation rm;
DecoratedKey dk = Util.dk("key1");
// add data
rm = new Mutation(keyspace.getName(), dk.getKey());
rm.add(cfs.name, Util.cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
rm.apply();
cfs.forceBlockingFlush();
// remove
rm = new Mutation(keyspace.getName(), dk.getKey());
rm.delete(cfs.name, 10);
rm.apply();
// add another mutation because sstable maxtimestamp isn't set
// correctly during flush if the most recent mutation is a row delete
rm = new Mutation(keyspace.getName(), Util.dk("key2").getKey());
rm.add(cfs.name, Util.cellname("Column1"), ByteBufferUtil.bytes("zxcv"), 20);
rm.apply();
cfs.forceBlockingFlush();
// add yet one more mutation
rm = new Mutation(keyspace.getName(), dk.getKey());
rm.add(cfs.name, Util.cellname("Column1"), ByteBufferUtil.bytes("foobar"), 30);
rm.apply();
cfs.forceBlockingFlush();
// A NamesQueryFilter goes down one code path (through collectTimeOrderedData())
// It should only iterate the last flushed sstable, since it probably contains the most recent value for Column1
QueryFilter filter = Util.namesQueryFilter(cfs, dk, "Column1");
CollationController controller = new CollationController(cfs, filter, Integer.MIN_VALUE);
controller.getTopLevelColumns(true);
assertEquals(1, controller.getSstablesIterated());
// SliceQueryFilter goes down another path (through collectAllData())
// We will read "only" the last sstable in that case, but because the 2nd sstable has a tombstone that is more
// recent than the maxTimestamp of the very first sstable we flushed, we should only read the 2 first sstables.
filter = QueryFilter.getIdentityFilter(dk, cfs.name, System.currentTimeMillis());
controller = new CollationController(cfs, filter, Integer.MIN_VALUE);
controller.getTopLevelColumns(true);
assertEquals(2, controller.getSstablesIterated());
}
示例11: getTopLevelColumnsSkipsSSTablesModifiedBeforeRowDelete
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Test
public void getTopLevelColumnsSkipsSSTablesModifiedBeforeRowDelete()
throws IOException, ExecutionException, InterruptedException
{
Keyspace keyspace = Keyspace.open("Keyspace1");
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");
RowMutation rm;
DecoratedKey dk = Util.dk("key1");
// add data
rm = new RowMutation(keyspace.getName(), dk.key);
rm.add(cfs.name, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
rm.apply();
cfs.forceBlockingFlush();
// remove
rm = new RowMutation(keyspace.getName(), dk.key);
rm.delete(cfs.name, 10);
rm.apply();
// add another mutation because sstable maxtimestamp isn't set
// correctly during flush if the most recent mutation is a row delete
rm = new RowMutation(keyspace.getName(), Util.dk("key2").key);
rm.add(cfs.name, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("zxcv"), 20);
rm.apply();
cfs.forceBlockingFlush();
// add yet one more mutation
rm = new RowMutation(keyspace.getName(), dk.key);
rm.add(cfs.name, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("foobar"), 30);
rm.apply();
cfs.forceBlockingFlush();
// A NamesQueryFilter goes down one code path (through collectTimeOrderedData())
// It should only iterate the last flushed sstable, since it probably contains the most recent value for Column1
QueryFilter filter = QueryFilter.getNamesFilter(dk, cfs.name, ByteBufferUtil.bytes("Column1"), System.currentTimeMillis());
CollationController controller = new CollationController(cfs, filter, Integer.MIN_VALUE);
controller.getTopLevelColumns();
assertEquals(1, controller.getSstablesIterated());
// SliceQueryFilter goes down another path (through collectAllData())
// We will read "only" the last sstable in that case, but because the 2nd sstable has a tombstone that is more
// recent than the maxTimestamp of the very first sstable we flushed, we should only read the 2 first sstables.
filter = QueryFilter.getIdentityFilter(dk, cfs.name, System.currentTimeMillis());
controller = new CollationController(cfs, filter, Integer.MIN_VALUE);
controller.getTopLevelColumns();
assertEquals(2, controller.getSstablesIterated());
}
示例12: testCompactionPurgeTombstonedRow
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Test
public void testCompactionPurgeTombstonedRow() throws ExecutionException, InterruptedException
{
CompactionManager.instance.disableAutoCompaction();
String keyspaceName = "Keyspace1";
String cfName = "Standard1";
Keyspace keyspace = Keyspace.open(keyspaceName);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key3");
Mutation rm;
QueryFilter filter = QueryFilter.getIdentityFilter(key, cfName, System.currentTimeMillis());
// inserts
rm = new Mutation(keyspaceName, key.key);
for (int i = 0; i < 10; i++)
rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
rm.apply();
// deletes row with timestamp such that not all columns are deleted
rm = new Mutation(keyspaceName, key.key);
rm.delete(cfName, 4);
rm.apply();
ColumnFamily cf = cfs.getColumnFamily(filter);
assertTrue(cf.isMarkedForDelete());
// flush and major compact (with tombstone purging)
cfs.forceBlockingFlush();
Util.compactAll(cfs, Integer.MAX_VALUE).get();
assertFalse(cfs.getColumnFamily(filter).isMarkedForDelete());
// re-inserts with timestamp lower than delete
rm = new Mutation(keyspaceName, key.key);
for (int i = 0; i < 5; i++)
rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
rm.apply();
// Check that the second insert went in
cf = cfs.getColumnFamily(filter);
assertEquals(10, cf.getColumnCount());
for (Cell c : cf)
assertFalse(c.isMarkedForDelete(System.currentTimeMillis()));
}
示例13: testCompactionPurgeTombstonedRow
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Test
public void testCompactionPurgeTombstonedRow() throws ExecutionException, InterruptedException
{
CompactionManager.instance.disableAutoCompaction();
String keyspaceName = KEYSPACE1;
String cfName = "Standard1";
Keyspace keyspace = Keyspace.open(keyspaceName);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("key3");
Mutation rm;
QueryFilter filter = QueryFilter.getIdentityFilter(key, cfName, System.currentTimeMillis());
// inserts
rm = new Mutation(keyspaceName, key.getKey());
for (int i = 0; i < 10; i++)
rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
rm.applyUnsafe();
// deletes row with timestamp such that not all columns are deleted
rm = new Mutation(keyspaceName, key.getKey());
rm.delete(cfName, 4);
rm.applyUnsafe();
ColumnFamily cf = cfs.getColumnFamily(filter);
assertTrue(cf.isMarkedForDelete());
// flush and major compact (with tombstone purging)
cfs.forceBlockingFlush();
Util.compactAll(cfs, Integer.MAX_VALUE).get();
assertFalse(cfs.getColumnFamily(filter).isMarkedForDelete());
// re-inserts with timestamp lower than delete
rm = new Mutation(keyspaceName, key.getKey());
for (int i = 0; i < 5; i++)
rm.add(cfName, cellname(String.valueOf(i)), ByteBufferUtil.EMPTY_BYTE_BUFFER, i);
rm.applyUnsafe();
// Check that the second insert went in
cf = cfs.getColumnFamily(filter);
assertEquals(10, cf.getColumnCount());
for (Cell c : cf)
assertTrue(c.isLive());
}
示例14: getTopLevelColumnsSkipsSSTablesModifiedBeforeRowDelete
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Test
public void getTopLevelColumnsSkipsSSTablesModifiedBeforeRowDelete()
throws IOException, ExecutionException, InterruptedException
{
Table table = Table.open("Keyspace1");
ColumnFamilyStore store = table.getColumnFamilyStore("Standard1");
RowMutation rm;
DecoratedKey dk = Util.dk("key1");
QueryPath path = new QueryPath("Standard1", null, ByteBufferUtil.bytes("Column1"));
// add data
rm = new RowMutation("Keyspace1", dk.key);
rm.add(path, ByteBufferUtil.bytes("asdf"), 0);
rm.apply();
store.forceBlockingFlush();
// remove
rm = new RowMutation("Keyspace1", dk.key);
rm.delete(new QueryPath("Standard1"), 10);
rm.apply();
// add another mutation because sstable maxtimestamp isn't set
// correctly during flush if the most recent mutation is a row delete
rm = new RowMutation("Keyspace1", Util.dk("key2").key);
rm.add(path, ByteBufferUtil.bytes("zxcv"), 20);
rm.apply();
store.forceBlockingFlush();
// add yet one more mutation
rm = new RowMutation("Keyspace1", dk.key);
rm.add(path, ByteBufferUtil.bytes("foobar"), 30);
rm.apply();
store.forceBlockingFlush();
// A NamesQueryFilter goes down one code path (through collectTimeOrderedData())
// It should only iterate the last flushed sstable, since it probably contains the most recent value for Column1
QueryFilter filter = QueryFilter.getNamesFilter(dk, path, ByteBufferUtil.bytes("Column1"));
CollationController controller = new CollationController(store, false, filter, Integer.MIN_VALUE);
controller.getTopLevelColumns();
assertEquals(1, controller.getSstablesIterated());
// SliceQueryFilter goes down another path (through collectAllData())
// We will read "only" the last sstable in that case, but because the 2nd sstable has a tombstone that is more
// recent than the maxTimestamp of the very first sstable we flushed, we should only read the 2 first sstables.
filter = QueryFilter.getIdentityFilter(dk, path);
controller = new CollationController(store, false, filter, Integer.MIN_VALUE);
controller.getTopLevelColumns();
assertEquals(2, controller.getSstablesIterated());
}
示例15: getTopLevelColumnsSkipsSSTablesModifiedBeforeRowDelete
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Test
public void getTopLevelColumnsSkipsSSTablesModifiedBeforeRowDelete()
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
Mutation rm;
DecoratedKey dk = Util.dk("key1");
// add data
rm = new Mutation(keyspace.getName(), dk.getKey());
rm.add(cfs.name, Util.cellname("Column1"), ByteBufferUtil.bytes("asdf"), 0);
rm.applyUnsafe();
cfs.forceBlockingFlush();
// remove
rm = new Mutation(keyspace.getName(), dk.getKey());
rm.delete(cfs.name, 10);
rm.applyUnsafe();
// add another mutation because sstable maxtimestamp isn't set
// correctly during flush if the most recent mutation is a row delete
rm = new Mutation(keyspace.getName(), Util.dk("key2").getKey());
rm.add(cfs.name, Util.cellname("Column1"), ByteBufferUtil.bytes("zxcv"), 20);
rm.applyUnsafe();
cfs.forceBlockingFlush();
// add yet one more mutation
rm = new Mutation(keyspace.getName(), dk.getKey());
rm.add(cfs.name, Util.cellname("Column1"), ByteBufferUtil.bytes("foobar"), 30);
rm.applyUnsafe();
cfs.forceBlockingFlush();
// A NamesQueryFilter goes down one code path (through collectTimeOrderedData())
// It should only iterate the last flushed sstable, since it probably contains the most recent value for Column1
QueryFilter filter = Util.namesQueryFilter(cfs, dk, "Column1");
CollationController controller = new CollationController(cfs, filter, Integer.MIN_VALUE);
controller.getTopLevelColumns(true);
assertEquals(1, controller.getSstablesIterated());
// SliceQueryFilter goes down another path (through collectAllData())
// We will read "only" the last sstable in that case, but because the 2nd sstable has a tombstone that is more
// recent than the maxTimestamp of the very first sstable we flushed, we should only read the 2 first sstables.
filter = QueryFilter.getIdentityFilter(dk, cfs.name, System.currentTimeMillis());
controller = new CollationController(cfs, filter, Integer.MIN_VALUE);
controller.getTopLevelColumns(true);
assertEquals(2, controller.getSstablesIterated());
}