本文整理汇总了Java中org.apache.cassandra.config.KSMetaData类的典型用法代码示例。如果您正苦于以下问题:Java KSMetaData类的具体用法?Java KSMetaData怎么用?Java KSMetaData使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
KSMetaData类属于org.apache.cassandra.config包,在下文中一共展示了KSMetaData类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: clearStorage
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
@Override
public void clearStorage() throws BackendException {
openStores.clear();
try {
KSMetaData ksMetaData = Schema.instance.getKSMetaData(keySpaceName);
// Not a big deal if Keyspace doesn't not exist (dropped manually by user or tests).
// This is called on per test setup basis to make sure that previous test cleaned
// everything up, so first invocation would always fail as Keyspace doesn't yet exist.
if (ksMetaData == null)
return;
for (String cfName : ksMetaData.cfMetaData().keySet())
StorageService.instance.truncate(keySpaceName, cfName);
} catch (Exception e) {
throw new PermanentBackendException(e);
}
}
示例2: validate
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
public void validate(ClientState state) throws RequestValidationException
{
KSMetaData ksm = Schema.instance.getKSMetaData(name);
if (ksm == null)
throw new InvalidRequestException("Unknown keyspace " + name);
if (ksm.name.equalsIgnoreCase(Keyspace.SYSTEM_KS))
throw new InvalidRequestException("Cannot alter system keyspace");
attrs.validate();
if (attrs.getReplicationStrategyClass() == null && !attrs.getReplicationOptions().isEmpty())
{
throw new ConfigurationException("Missing replication strategy class");
}
else if (attrs.getReplicationStrategyClass() != null)
{
// The strategy is validated through KSMetaData.validate() in announceKeyspaceUpdate below.
// However, for backward compatibility with thrift, this doesn't validate unexpected options yet,
// so doing proper validation here.
AbstractReplicationStrategy.validateReplicationStrategy(name,
AbstractReplicationStrategy.getClass(attrs.getReplicationStrategyClass()),
StorageService.instance.getTokenMetadata(),
DatabaseDescriptor.getEndpointSnitch(),
attrs.getReplicationOptions());
}
}
示例3: findIndexedCF
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
private CFMetaData findIndexedCF() throws InvalidRequestException
{
KSMetaData ksm = Schema.instance.getKSMetaData(keyspace());
if (ksm == null)
throw new KeyspaceNotDefinedException("Keyspace " + keyspace() + " does not exist");
for (CFMetaData cfm : ksm.cfMetaData().values())
{
if (findIndexedColumn(cfm) != null)
return cfm;
}
if (ifExists)
return null;
else
throw new InvalidRequestException("Index '" + indexName + "' could not be found in any of the tables of keyspace '" + keyspace() + '\'');
}
示例4: hasSameReplication
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
private boolean hasSameReplication(List<String> list)
{
if (list.isEmpty())
return false;
for (int i = 0; i < list.size() -1; i++)
{
KSMetaData ksm1 = Schema.instance.getKSMetaData(list.get(i));
KSMetaData ksm2 = Schema.instance.getKSMetaData(list.get(i + 1));
if (!ksm1.strategyClass.equals(ksm2.strategyClass) ||
!Iterators.elementsEqual(ksm1.strategyOptions.entrySet().iterator(),
ksm2.strategyOptions.entrySet().iterator()))
return false;
}
return true;
}
示例5: finishStartup
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
public static void finishStartup()
{
setupVersion();
migrateIndexInterval();
migrateCachingOption();
// add entries to system schema columnfamilies for the hardcoded system definitions
KSMetaData ksmd = Schema.instance.getKSMetaData(Keyspace.SYSTEM_KS);
// delete old, possibly obsolete entries in schema columnfamilies
for (String cfname : Arrays.asList(SystemKeyspace.SCHEMA_KEYSPACES_CF,
SystemKeyspace.SCHEMA_COLUMNFAMILIES_CF,
SystemKeyspace.SCHEMA_COLUMNS_CF,
SystemKeyspace.SCHEMA_TRIGGERS_CF,
SystemKeyspace.SCHEMA_USER_TYPES_CF))
executeOnceInternal(String.format("DELETE FROM system.%s WHERE keyspace_name = ?", cfname), ksmd.name);
// (+1 to timestamp to make sure we don't get shadowed by the tombstones we just added)
ksmd.toSchema(FBUtilities.timestampMicros() + 1).apply();
}
示例6: loadFromKeyspace
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
/**
* Load keyspace definitions for the system keyspace (system.SCHEMA_KEYSPACES_CF)
*
* @return Collection of found keyspace definitions
*/
public static Collection<KSMetaData> loadFromKeyspace()
{
List<Row> serializedSchema = SystemKeyspace.serializedSchema(SystemKeyspace.SCHEMA_KEYSPACES_CF);
List<KSMetaData> keyspaces = new ArrayList<>(serializedSchema.size());
for (Row row : serializedSchema)
{
if (Schema.invalidSchemaRow(row) || Schema.ignoredSchemaRow(row))
continue;
keyspaces.add(KSMetaData.fromSchema(row, serializedColumnFamilies(row.key), serializedUserTypes(row.key)));
}
return keyspaces;
}
示例7: system_update_keyspace
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
/** update an existing keyspace, but do not allow column family modifications.
* @throws SchemaDisagreementException
*/
public String system_update_keyspace(KsDef ks_def)
throws InvalidRequestException, SchemaDisagreementException, TException
{
logger.debug("update_keyspace");
try
{
ThriftValidation.validateKeyspaceNotSystem(ks_def.name);
state().hasKeyspaceAccess(ks_def.name, Permission.ALTER);
ThriftValidation.validateKeyspace(ks_def.name);
if (ks_def.getCf_defs() != null && ks_def.getCf_defs().size() > 0)
throw new InvalidRequestException("Keyspace update must not contain any column family definitions.");
MigrationManager.announceKeyspaceUpdate(KSMetaData.fromThrift(ks_def));
return Schema.instance.getVersion().toString();
}
catch (RequestValidationException e)
{
throw ThriftConversion.toThrift(e);
}
}
示例8: testBigIntegerEndpointsA
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
/**
* 4 same rack endpoints
*
* @throws UnknownHostException
*/
@Test
public void testBigIntegerEndpointsA() throws UnknownHostException
{
RackInferringSnitch endpointSnitch = new RackInferringSnitch();
AbstractReplicationStrategy strategy = new OldNetworkTopologyStrategy("Keyspace1", tmd, endpointSnitch, KSMetaData.optsWithRF(1));
addEndpoint("0", "5", "254.0.0.1");
addEndpoint("10", "15", "254.0.0.2");
addEndpoint("20", "25", "254.0.0.3");
addEndpoint("30", "35", "254.0.0.4");
expectedResults.put("5", buildResult("254.0.0.2", "254.0.0.3", "254.0.0.4"));
expectedResults.put("15", buildResult("254.0.0.3", "254.0.0.4", "254.0.0.1"));
expectedResults.put("25", buildResult("254.0.0.4", "254.0.0.1", "254.0.0.2"));
expectedResults.put("35", buildResult("254.0.0.1", "254.0.0.2", "254.0.0.3"));
testGetEndpoints(strategy, keyTokens.toArray(new Token[0]));
}
示例9: testBigIntegerEndpointsB
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
/**
* 3 same rack endpoints
* 1 external datacenter
*
* @throws UnknownHostException
*/
@Test
public void testBigIntegerEndpointsB() throws UnknownHostException
{
RackInferringSnitch endpointSnitch = new RackInferringSnitch();
AbstractReplicationStrategy strategy = new OldNetworkTopologyStrategy("Keyspace1", tmd, endpointSnitch, KSMetaData.optsWithRF(1));
addEndpoint("0", "5", "254.0.0.1");
addEndpoint("10", "15", "254.0.0.2");
addEndpoint("20", "25", "254.1.0.3");
addEndpoint("30", "35", "254.0.0.4");
expectedResults.put("5", buildResult("254.0.0.2", "254.1.0.3", "254.0.0.4"));
expectedResults.put("15", buildResult("254.1.0.3", "254.0.0.4", "254.0.0.1"));
expectedResults.put("25", buildResult("254.0.0.4", "254.1.0.3", "254.0.0.1"));
expectedResults.put("35", buildResult("254.0.0.1", "254.1.0.3", "254.0.0.2"));
testGetEndpoints(strategy, keyTokens.toArray(new Token[0]));
}
示例10: testBigIntegerEndpointsC
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
/**
* 2 same rack endpoints
* 1 same datacenter, different rack endpoints
* 1 external datacenter
*
* @throws UnknownHostException
*/
@Test
public void testBigIntegerEndpointsC() throws UnknownHostException
{
RackInferringSnitch endpointSnitch = new RackInferringSnitch();
AbstractReplicationStrategy strategy = new OldNetworkTopologyStrategy("Keyspace1", tmd, endpointSnitch, KSMetaData.optsWithRF(1));
addEndpoint("0", "5", "254.0.0.1");
addEndpoint("10", "15", "254.0.0.2");
addEndpoint("20", "25", "254.0.1.3");
addEndpoint("30", "35", "254.1.0.4");
expectedResults.put("5", buildResult("254.0.0.2", "254.0.1.3", "254.1.0.4"));
expectedResults.put("15", buildResult("254.0.1.3", "254.1.0.4", "254.0.0.1"));
expectedResults.put("25", buildResult("254.1.0.4", "254.0.0.1", "254.0.0.2"));
expectedResults.put("35", buildResult("254.0.0.1", "254.0.1.3", "254.1.0.4"));
testGetEndpoints(strategy, keyTokens.toArray(new Token[0]));
}
示例11: newKsContainsCfWithTrigger
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
@Test
public void newKsContainsCfWithTrigger() throws Exception
{
TriggerDefinition td = TriggerDefinition.create(triggerName, triggerClass);
CFMetaData cfm1 = CFMetaData.compile(String.format("CREATE TABLE %s (k int PRIMARY KEY, v int)", cfName), ksName);
cfm1.addTriggerDefinition(td);
KSMetaData ksm = KSMetaData.newKeyspace(ksName,
SimpleStrategy.class,
Collections.singletonMap("replication_factor", "1"),
true,
Collections.singletonList(cfm1));
MigrationManager.announceNewKeyspace(ksm);
CFMetaData cfm2 = Schema.instance.getCFMetaData(ksName, cfName);
assertFalse(cfm2.getTriggers().isEmpty());
assertEquals(1, cfm2.getTriggers().size());
assertEquals(td, cfm2.getTriggers().get(triggerName));
}
示例12: addNewCfWithTriggerToKs
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
@Test
public void addNewCfWithTriggerToKs() throws Exception
{
KSMetaData ksm = KSMetaData.newKeyspace(ksName,
SimpleStrategy.class,
Collections.singletonMap("replication_factor", "1"),
true,
Collections.EMPTY_LIST);
MigrationManager.announceNewKeyspace(ksm);
CFMetaData cfm1 = CFMetaData.compile(String.format("CREATE TABLE %s (k int PRIMARY KEY, v int)", cfName), ksName);
TriggerDefinition td = TriggerDefinition.create(triggerName, triggerClass);
cfm1.addTriggerDefinition(td);
MigrationManager.announceNewColumnFamily(cfm1);
CFMetaData cfm2 = Schema.instance.getCFMetaData(ksName, cfName);
assertFalse(cfm2.getTriggers().isEmpty());
assertEquals(1, cfm2.getTriggers().size());
assertEquals(td, cfm2.getTriggers().get(triggerName));
}
示例13: addTriggerToCf
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
@Test
public void addTriggerToCf() throws Exception
{
CFMetaData cfm1 = CFMetaData.compile(String.format("CREATE TABLE %s (k int PRIMARY KEY, v int)", cfName), ksName);
KSMetaData ksm = KSMetaData.newKeyspace(ksName,
SimpleStrategy.class,
Collections.singletonMap("replication_factor", "1"),
true,
Collections.singletonList(cfm1));
MigrationManager.announceNewKeyspace(ksm);
CFMetaData cfm2 = Schema.instance.getCFMetaData(ksName, cfName).copy();
TriggerDefinition td = TriggerDefinition.create(triggerName, triggerClass);
cfm2.addTriggerDefinition(td);
MigrationManager.announceColumnFamilyUpdate(cfm2, false);
CFMetaData cfm3 = Schema.instance.getCFMetaData(ksName, cfName);
assertFalse(cfm3.getTriggers().isEmpty());
assertEquals(1, cfm3.getTriggers().size());
assertEquals(td, cfm3.getTriggers().get(triggerName));
}
示例14: removeTriggerFromCf
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
@Test
public void removeTriggerFromCf() throws Exception
{
TriggerDefinition td = TriggerDefinition.create(triggerName, triggerClass);
CFMetaData cfm1 = CFMetaData.compile(String.format("CREATE TABLE %s (k int PRIMARY KEY, v int)", cfName), ksName);
cfm1.addTriggerDefinition(td);
KSMetaData ksm = KSMetaData.newKeyspace(ksName,
SimpleStrategy.class,
Collections.singletonMap("replication_factor", "1"),
true,
Collections.singletonList(cfm1));
MigrationManager.announceNewKeyspace(ksm);
CFMetaData cfm2 = Schema.instance.getCFMetaData(ksName, cfName).copy();
cfm2.removeTrigger(triggerName);
MigrationManager.announceColumnFamilyUpdate(cfm2, false);
CFMetaData cfm3 = Schema.instance.getCFMetaData(ksName, cfName).copy();
assertTrue(cfm3.getTriggers().isEmpty());
}
示例15: finishStartup
import org.apache.cassandra.config.KSMetaData; //导入依赖的package包/类
public static void finishStartup()
{
setupVersion();
// add entries to system schema columnfamilies for the hardcoded system definitions
for (String ksname : Schema.systemKeyspaceNames)
{
KSMetaData ksmd = Schema.instance.getKSMetaData(ksname);
// delete old, possibly obsolete entries in schema columnfamilies
for (String cfname : Arrays.asList(SystemKeyspace.SCHEMA_KEYSPACES_CF, SystemKeyspace.SCHEMA_COLUMNFAMILIES_CF, SystemKeyspace.SCHEMA_COLUMNS_CF))
{
String req = String.format("DELETE FROM system.%s WHERE keyspace_name = '%s'", cfname, ksmd.name);
processInternal(req);
}
// (+1 to timestamp to make sure we don't get shadowed by the tombstones we just added)
ksmd.toSchema(FBUtilities.timestampMicros() + 1).apply();
}
}