本文整理汇总了Java中org.apache.cassandra.db.filter.QueryFilter.getNamesFilter方法的典型用法代码示例。如果您正苦于以下问题:Java QueryFilter.getNamesFilter方法的具体用法?Java QueryFilter.getNamesFilter怎么用?Java QueryFilter.getNamesFilter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.db.filter.QueryFilter
的用法示例。
在下文中一共展示了QueryFilter.getNamesFilter方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: isIndexBuilt
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
public static boolean isIndexBuilt(String keyspaceName, String indexName)
{
ColumnFamilyStore cfs = Keyspace.open(Keyspace.SYSTEM_KS).getColumnFamilyStore(INDEX_CF);
QueryFilter filter = QueryFilter.getNamesFilter(decorate(ByteBufferUtil.bytes(keyspaceName)),
INDEX_CF,
FBUtilities.singleton(cfs.getComparator().makeCellName(indexName), cfs.getComparator()),
System.currentTimeMillis());
return ColumnFamilyStore.removeDeleted(cfs.getColumnFamily(filter), Integer.MAX_VALUE) != null;
}
示例2: namesQueryFilter
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
public static QueryFilter namesQueryFilter(ColumnFamilyStore cfs, DecoratedKey key, String... names)
{
SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
for (String str : names)
s.add(cellname(str));
return QueryFilter.getNamesFilter(key, cfs.name, s, System.currentTimeMillis());
}
示例3: isIndexBuilt
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
public static boolean isIndexBuilt(String keyspaceName, String indexName)
{
ColumnFamilyStore cfs = Keyspace.open(Keyspace.SYSTEM_KS).getColumnFamilyStore(INDEX_CF);
QueryFilter filter = QueryFilter.getNamesFilter(decorate(ByteBufferUtil.bytes(keyspaceName)),
INDEX_CF,
ByteBufferUtil.bytes(indexName),
System.currentTimeMillis());
return ColumnFamilyStore.removeDeleted(cfs.getColumnFamily(filter), Integer.MAX_VALUE) != null;
}
示例4: isIndexBuilt
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
public static boolean isIndexBuilt(String table, String indexName)
{
ColumnFamilyStore cfs = Table.open(Table.SYSTEM_KS).getColumnFamilyStore(INDEX_CF);
QueryFilter filter = QueryFilter.getNamesFilter(decorate(ByteBufferUtil.bytes(table)),
new QueryPath(INDEX_CF),
ByteBufferUtil.bytes(indexName));
return ColumnFamilyStore.removeDeleted(cfs.getColumnFamily(filter), Integer.MAX_VALUE) != null;
}
示例5: getLastMigrationId
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
/**
* Used only in case node has old style migration schema (newly updated)
* @return the UUID identifying version of the last applied migration
*/
@Deprecated
public static UUID getLastMigrationId()
{
DecoratedKey dkey = StorageService.getPartitioner().decorateKey(LAST_MIGRATION_KEY);
Table defs = Table.open(Table.SYSTEM_KS);
ColumnFamilyStore cfStore = defs.getColumnFamilyStore(DefsTable.OLD_SCHEMA_CF);
QueryFilter filter = QueryFilter.getNamesFilter(dkey, new QueryPath(DefsTable.OLD_SCHEMA_CF), LAST_MIGRATION_KEY);
ColumnFamily cf = cfStore.getColumnFamily(filter);
if (cf == null || cf.getColumnNames().size() == 0)
return null;
else
return UUIDGen.getUUID(cf.getColumn(LAST_MIGRATION_KEY).value());
}
示例6: namesQueryFilter
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
public static QueryFilter namesQueryFilter(ColumnFamilyStore cfs, DecoratedKey key, CellName... names)
{
SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
for (CellName n : names)
s.add(n);
return QueryFilter.getNamesFilter(key, cfs.name, s, System.currentTimeMillis());
}
示例7: getTopLevelColumnsSkipsSSTablesModifiedBeforeRowDelete
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
@Test
public void getTopLevelColumnsSkipsSSTablesModifiedBeforeRowDelete()
throws IOException, ExecutionException, InterruptedException
{
Keyspace keyspace = Keyspace.open("Keyspace1");
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");
RowMutation rm;
DecoratedKey dk = Util.dk("key1");
// add data
rm = new RowMutation(keyspace.getName(), dk.key);
rm.add(cfs.name, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("asdf"), 0);
rm.apply();
cfs.forceBlockingFlush();
// remove
rm = new RowMutation(keyspace.getName(), dk.key);
rm.delete(cfs.name, 10);
rm.apply();
// add another mutation because sstable maxtimestamp isn't set
// correctly during flush if the most recent mutation is a row delete
rm = new RowMutation(keyspace.getName(), Util.dk("key2").key);
rm.add(cfs.name, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("zxcv"), 20);
rm.apply();
cfs.forceBlockingFlush();
// add yet one more mutation
rm = new RowMutation(keyspace.getName(), dk.key);
rm.add(cfs.name, ByteBufferUtil.bytes("Column1"), ByteBufferUtil.bytes("foobar"), 30);
rm.apply();
cfs.forceBlockingFlush();
// A NamesQueryFilter goes down one code path (through collectTimeOrderedData())
// It should only iterate the last flushed sstable, since it probably contains the most recent value for Column1
QueryFilter filter = QueryFilter.getNamesFilter(dk, cfs.name, ByteBufferUtil.bytes("Column1"), System.currentTimeMillis());
CollationController controller = new CollationController(cfs, filter, Integer.MIN_VALUE);
controller.getTopLevelColumns();
assertEquals(1, controller.getSstablesIterated());
// SliceQueryFilter goes down another path (through collectAllData())
// We will read "only" the last sstable in that case, but because the 2nd sstable has a tombstone that is more
// recent than the maxTimestamp of the very first sstable we flushed, we should only read the 2 first sstables.
filter = QueryFilter.getIdentityFilter(dk, cfs.name, System.currentTimeMillis());
controller = new CollationController(cfs, filter, Integer.MIN_VALUE);
controller.getTopLevelColumns();
assertEquals(2, controller.getSstablesIterated());
}
示例8: upgradeSystemData
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
/** if system data becomes incompatible across versions of cassandra, that logic (and associated purging) is managed here */
private static void upgradeSystemData() throws ExecutionException, InterruptedException
{
Table table = Table.open(Table.SYSTEM_KS);
ColumnFamilyStore oldStatusCfs = table.getColumnFamilyStore(OLD_STATUS_CF);
if (oldStatusCfs.getSSTables().size() > 0)
{
logger.info("Old system data found in {}.{}; migrating to new format in {}.{}", Table.SYSTEM_KS, OLD_STATUS_CF, Table.SYSTEM_KS, LOCAL_CF);
SortedSet<ByteBuffer> cols = new TreeSet<ByteBuffer>(BytesType.instance);
cols.add(ByteBufferUtil.bytes("ClusterName"));
cols.add(ByteBufferUtil.bytes("Token"));
QueryFilter filter = QueryFilter.getNamesFilter(decorate(ByteBufferUtil.bytes("L")), new QueryPath(OLD_STATUS_CF), cols);
ColumnFamily oldCf = oldStatusCfs.getColumnFamily(filter);
Iterator<IColumn> oldColumns = oldCf.columns.iterator();
String clusterName = null;
try
{
clusterName = ByteBufferUtil.string(oldColumns.next().value());
}
catch (CharacterCodingException e)
{
throw new RuntimeException(e);
}
// serialize the old token as a collection of (one )tokens.
Token token = StorageService.getPartitioner().getTokenFactory().fromByteArray(oldColumns.next().value());
String tokenBytes = tokensAsSet(Collections.singleton(token));
// (assume that any node getting upgraded was bootstrapped, since that was stored in a separate row for no particular reason)
String req = "INSERT INTO system.%s (key, cluster_name, tokens, bootstrapped) VALUES ('%s', '%s', %s, '%s')";
processInternal(String.format(req, LOCAL_CF, LOCAL_KEY, clusterName, tokenBytes, BootstrapState.COMPLETED.name()));
oldStatusCfs.truncate();
}
ColumnFamilyStore oldHintsCfs = table.getColumnFamilyStore(OLD_HINTS_CF);
if (oldHintsCfs.getSSTables().size() > 0)
{
logger.info("Possible old-format hints found. Truncating");
oldHintsCfs.truncate();
}
migrateKeyAlias();
}
示例9: checkHealth
import org.apache.cassandra.db.filter.QueryFilter; //导入方法依赖的package包/类
/**
* One of three things will happen if you try to read the system table:
* 1. files are present and you can read them: great
* 2. no files are there: great (new node is assumed)
* 3. files are present but you can't read them: bad
* @throws ConfigurationException
*/
public static void checkHealth() throws ConfigurationException
{
Table table;
try
{
table = Table.open(Table.SYSTEM_KS);
}
catch (AssertionError err)
{
// this happens when a user switches from OPP to RP.
ConfigurationException ex = new ConfigurationException("Could not read system table!");
ex.initCause(err);
throw ex;
}
String savedClusterName;
// See if there is still data in System.LocationInfo, indicating that the system data has not yet been
// upgraded by SystemTable.upgradeSystemData()
ColumnFamilyStore oldStatusCfs = table.getColumnFamilyStore(OLD_STATUS_CF);
if (oldStatusCfs.getSSTables().size() > 0)
{
logger.debug("Detected system data in {}.{}, checking saved cluster name", Table.SYSTEM_KS, OLD_STATUS_CF);
SortedSet<ByteBuffer> cols = new TreeSet<ByteBuffer>(BytesType.instance);
cols.add(ByteBufferUtil.bytes("ClusterName"));
QueryFilter filter = QueryFilter.getNamesFilter(decorate(ByteBufferUtil.bytes("L")), new QueryPath(OLD_STATUS_CF), cols);
ColumnFamily oldCf = oldStatusCfs.getColumnFamily(filter);
try
{
savedClusterName = ByteBufferUtil.string(oldCf.getColumn(ByteBufferUtil.bytes("ClusterName")).value());
}
catch (CharacterCodingException e)
{
throw new RuntimeException(e);
}
}
else
{
ColumnFamilyStore cfs = table.getColumnFamilyStore(LOCAL_CF);
String req = "SELECT cluster_name FROM system.%s WHERE key='%s'";
UntypedResultSet result = processInternal(String.format(req, LOCAL_CF, LOCAL_KEY));
if (result.isEmpty() || !result.one().has("cluster_name"))
{
// this is a brand new node
if (!cfs.getSSTables().isEmpty())
throw new ConfigurationException("Found system table files, but they couldn't be loaded!");
// no system files. this is a new node.
req = "INSERT INTO system.%s (key, cluster_name) VALUES ('%s', '%s')";
processInternal(String.format(req, LOCAL_CF, LOCAL_KEY, DatabaseDescriptor.getClusterName()));
return;
}
savedClusterName = result.one().getString("cluster_name");
}
if (!DatabaseDescriptor.getClusterName().equals(savedClusterName))
throw new ConfigurationException("Saved cluster name " + savedClusterName + " != configured name " + DatabaseDescriptor.getClusterName());
}