本文整理汇总了Java中org.apache.cassandra.service.StorageService类的典型用法代码示例。如果您正苦于以下问题:Java StorageService类的具体用法?Java StorageService怎么用?Java StorageService使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
StorageService类属于org.apache.cassandra.service包,在下文中一共展示了StorageService类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: before
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
@Override protected void before() throws Throwable {
if (server != null) return;
DatabaseDescriptor.daemonInitialization();
// Cleanup first
try {
cleanupAndLeaveDirs();
} catch (IOException e) {
throw new RuntimeException("Failed to cleanup and recreate directories.", e);
}
Keyspace.setInitialized();
SystemKeyspace.persistLocalMetadata();
SystemKeyspace.finishStartup();
StorageService.instance.initServer();
server = new Server.Builder().withHost(nativeAddr).withPort(nativePort).build();
server.start();
}
示例2: testRac
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
@Test
public void testRac() throws IOException, ConfigurationException
{
az = "us-east-1d";
Ec2Snitch snitch = new TestEC2Snitch();
InetAddress local = InetAddress.getByName("127.0.0.1");
InetAddress nonlocal = InetAddress.getByName("127.0.0.7");
Gossiper.instance.addSavedEndpoint(nonlocal);
Map<ApplicationState,VersionedValue> stateMap = Gossiper.instance.getEndpointStateForEndpoint(nonlocal).getApplicationStateMap();
stateMap.put(ApplicationState.DC, StorageService.instance.valueFactory.datacenter("us-west"));
stateMap.put(ApplicationState.RACK, StorageService.instance.valueFactory.datacenter("1a"));
assertEquals("us-west", snitch.getDatacenter(nonlocal));
assertEquals("1a", snitch.getRack(nonlocal));
assertEquals("us-east", snitch.getDatacenter(local));
assertEquals("1d", snitch.getRack(local));
}
示例3: defineSchema
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
@BeforeClass
public static void defineSchema() throws Exception
{
SchemaLoader.prepareServer();
StorageService.instance.initServer();
SchemaLoader.createKeyspace(KEYSPACE1,
KeyspaceParams.simple(1),
SchemaLoader.standardCFMD(KEYSPACE1, CF_STANDARD),
CFMetaData.Builder.create(KEYSPACE1, CF_COUNTER, false, true, true)
.addPartitionKey("key", BytesType.instance)
.build(),
CFMetaData.Builder.create(KEYSPACE1, CF_STANDARDINT)
.addPartitionKey("key", AsciiType.instance)
.addClusteringColumn("cols", Int32Type.instance)
.addRegularColumn("val", BytesType.instance)
.build(),
SchemaLoader.compositeIndexCFMD(KEYSPACE1, CF_INDEX, true));
SchemaLoader.createKeyspace(KEYSPACE2,
KeyspaceParams.simple(1));
SchemaLoader.createKeyspace(KEYSPACE_CACHEKEY,
KeyspaceParams.simple(1),
SchemaLoader.standardCFMD(KEYSPACE_CACHEKEY, CF_STANDARD),
SchemaLoader.standardCFMD(KEYSPACE_CACHEKEY, CF_STANDARD2),
SchemaLoader.standardCFMD(KEYSPACE_CACHEKEY, CF_STANDARD3));
}
示例4: testSatisfiedByWithMultipleTerms
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
@Test
public void testSatisfiedByWithMultipleTerms()
{
final ByteBuffer comment = UTF8Type.instance.decompose("comment");
final ColumnFamilyStore store = Keyspace.open("sasecondaryindex").getColumnFamilyStore("saindexed1");
final IPartitioner<?> partitioner = StorageService.getPartitioner();
ColumnFamily cf = ArrayBackedSortedColumns.factory.create(store.metadata);
cf.addColumn(new Column(comment, UTF8Type.instance.decompose("software engineer is working on a project"), System.currentTimeMillis()));
Operation.Builder builder = new Operation.Builder(OperationType.AND, UTF8Type.instance, controller,
new IndexExpression(comment, IndexOperator.EQ, UTF8Type.instance.decompose("eng is a work")));
Operation op = builder.complete();
Assert.assertTrue(op.satisfiedBy(new Row(partitioner.decorateKey(UTF8Type.instance.decompose("key1")), cf), null, false));
builder = new Operation.Builder(OperationType.AND, UTF8Type.instance, controller,
new IndexExpression(comment, IndexOperator.EQ, UTF8Type.instance.decompose("soft works fine")));
op = builder.complete();
Assert.assertTrue(op.satisfiedBy(new Row(partitioner.decorateKey(UTF8Type.instance.decompose("key1")), cf), null, false));
}
示例5: doVerb
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
public void doVerb(MessageIn<ReadCommand> message, int id)
{
if (StorageService.instance.isBootstrapMode())
{
throw new RuntimeException("Cannot service reads while bootstrapping!");
}
ReadCommand command = message.payload;
Keyspace keyspace = Keyspace.open(command.ksName);
Row row;
try
{
row = command.getRow(keyspace);
}
catch (TombstoneOverwhelmingException e)
{
// error already logged. Drop the request
return;
}
MessageOut<ReadResponse> reply = new MessageOut<ReadResponse>(MessagingService.Verb.REQUEST_RESPONSE,
getResponse(command, row),
ReadResponse.serializer);
Tracing.trace("Enqueuing response to {}", message.from);
MessagingService.instance().sendReply(reply, id, message.from);
}
示例6: fetchAsync
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
public StreamResultFuture fetchAsync()
{
for (Map.Entry<String, Map.Entry<InetAddress, Collection<Range<Token>>>> entry : toFetch.entries())
{
String keyspace = entry.getKey();
InetAddress source = entry.getValue().getKey();
InetAddress preferred = SystemKeyspace.getPreferredIP(source);
Collection<Range<Token>> ranges = entry.getValue().getValue();
// filter out already streamed ranges
Set<Range<Token>> availableRanges = stateStore.getAvailableRanges(keyspace, StorageService.instance.getTokenMetadata().partitioner);
if (ranges.removeAll(availableRanges))
{
logger.info("Some ranges of {} are already available. Skipping streaming those ranges.", availableRanges);
}
if (logger.isTraceEnabled())
logger.trace("{}ing from {} ranges {}", description, source, StringUtils.join(ranges, ", "));
/* Send messages to respective folks to stream data over to me */
streamPlan.requestRanges(source, preferred, keyspace, ranges);
}
return streamPlan.execute();
}
示例7: scheduleSaving
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
public void scheduleSaving(int savePeriodInSeconds, final int keysToSave)
{
if (saveTask != null)
{
saveTask.cancel(false); // Do not interrupt an in-progress save
saveTask = null;
}
if (savePeriodInSeconds > 0)
{
Runnable runnable = new Runnable()
{
public void run()
{
submitWrite(keysToSave);
}
};
saveTask = StorageService.optionalTasks.scheduleWithFixedDelay(runnable,
savePeriodInSeconds,
savePeriodInSeconds,
TimeUnit.SECONDS);
}
}
示例8: writeFile
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
private SSTableReader writeFile(ColumnFamilyStore cfs, int count)
{
ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfs.metadata);
for (int i = 0; i < count / 100; i++)
cf.addColumn(Util.cellname(i), random(0, 1000), 1);
File dir = cfs.directories.getDirectoryForNewSSTables();
String filename = cfs.getTempSSTablePath(dir);
SSTableWriter writer = new SSTableWriter(filename,
0,
0,
cfs.metadata,
StorageService.getPartitioner(),
new MetadataCollector(cfs.metadata.comparator));
for (int i = 0; i < count * 5; i++)
writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
return writer.closeAndOpenReader();
}
示例9: testCommitFailurePolicy_ignore_afterStartup
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
@Test
public void testCommitFailurePolicy_ignore_afterStartup() throws Exception
{
CassandraDaemon daemon = new CassandraDaemon();
daemon.completeSetup(); //startup must be completed, otherwise commit log failure must kill JVM regardless of failure policy
StorageService.instance.registerDaemon(daemon);
KillerForTests killerForTests = new KillerForTests();
JVMStabilityInspector.Killer originalKiller = JVMStabilityInspector.replaceKiller(killerForTests);
Config.CommitFailurePolicy oldPolicy = DatabaseDescriptor.getCommitFailurePolicy();
try
{
DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.ignore);
CommitLog.handleCommitError("Testing ignore policy", new Throwable());
//error policy is set to IGNORE, so JVM must not be killed if error ocurs after startup
Assert.assertFalse(killerForTests.wasKilled());
}
finally
{
DatabaseDescriptor.setCommitFailurePolicy(oldPolicy);
JVMStabilityInspector.replaceKiller(originalKiller);
}
}
示例10: testCommitFailurePolicy_die
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
@Test
public void testCommitFailurePolicy_die()
{
CassandraDaemon daemon = new CassandraDaemon();
daemon.completeSetup(); //startup must be completed, otherwise commit log failure must kill JVM regardless of failure policy
StorageService.instance.registerDaemon(daemon);
KillerForTests killerForTests = new KillerForTests();
JVMStabilityInspector.Killer originalKiller = JVMStabilityInspector.replaceKiller(killerForTests);
Config.CommitFailurePolicy oldPolicy = DatabaseDescriptor.getCommitFailurePolicy();
try
{
DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.die);
CommitLog.handleCommitError("Testing die policy", new Throwable());
Assert.assertTrue(killerForTests.wasKilled());
Assert.assertFalse(killerForTests.wasKilledQuietly()); //only killed quietly on startup failure
}
finally
{
DatabaseDescriptor.setCommitFailurePolicy(oldPolicy);
JVMStabilityInspector.replaceKiller(originalKiller);
}
}
示例11: testRac
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
@Test
public void testRac() throws IOException, ConfigurationException
{
az = "us-east-1d";
Ec2Snitch snitch = new TestEC2Snitch();
InetAddress local = InetAddress.getByName("127.0.0.1");
InetAddress nonlocal = InetAddress.getByName("127.0.0.7");
Gossiper.instance.addSavedEndpoint(nonlocal);
Map<ApplicationState, VersionedValue> stateMap = new EnumMap<>(ApplicationState.class);
stateMap.put(ApplicationState.DC, StorageService.instance.valueFactory.datacenter("us-west"));
stateMap.put(ApplicationState.RACK, StorageService.instance.valueFactory.datacenter("1a"));
Gossiper.instance.getEndpointStateForEndpoint(nonlocal).addApplicationStates(stateMap);
assertEquals("us-west", snitch.getDatacenter(nonlocal));
assertEquals("1a", snitch.getRack(nonlocal));
assertEquals("us-east", snitch.getDatacenter(local));
assertEquals("1d", snitch.getRack(local));
}
示例12: start
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
public void start()
{
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try
{
mbs.registerMBean(this, new ObjectName(MBEAN_NAME));
}
catch (Exception e)
{
throw new RuntimeException(e);
}
batchlogTasks.scheduleWithFixedDelay(this::replayFailedBatches,
StorageService.RING_DELAY,
REPLAY_INTERVAL,
TimeUnit.MILLISECONDS);
}
示例13: testRac
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
@Test
public void testRac() throws IOException, ConfigurationException
{
az = "us-central1-a";
GoogleCloudSnitch snitch = new TestGoogleCloudSnitch();
InetAddress local = InetAddress.getByName("127.0.0.1");
InetAddress nonlocal = InetAddress.getByName("127.0.0.7");
Gossiper.instance.addSavedEndpoint(nonlocal);
Map<ApplicationState,VersionedValue> stateMap = Gossiper.instance.getEndpointStateForEndpoint(nonlocal).getApplicationStateMap();
stateMap.put(ApplicationState.DC, StorageService.instance.valueFactory.datacenter("europe-west1"));
stateMap.put(ApplicationState.RACK, StorageService.instance.valueFactory.datacenter("a"));
assertEquals("europe-west1", snitch.getDatacenter(nonlocal));
assertEquals("a", snitch.getRack(nonlocal));
assertEquals("us-central1", snitch.getDatacenter(local));
assertEquals("a", snitch.getRack(local));
}
示例14: describe_splits_ex
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
public List<CfSplit> describe_splits_ex(String cfName, String start_token, String end_token, int keys_per_split)
throws InvalidRequestException, TException
{
try
{
Token.TokenFactory tf = StorageService.getPartitioner().getTokenFactory();
Range<Token> tr = new Range<Token>(tf.fromString(start_token), tf.fromString(end_token));
List<Pair<Range<Token>, Long>> splits =
StorageService.instance.getSplits(state().getKeyspace(), cfName, tr, keys_per_split);
List<CfSplit> result = new ArrayList<CfSplit>(splits.size());
for (Pair<Range<Token>, Long> split : splits)
result.add(new CfSplit(split.left.left.toString(), split.left.right.toString(), split.right));
return result;
}
catch (RequestValidationException e)
{
throw ThriftConversion.toThrift(e);
}
}
示例15: run
import org.apache.cassandra.service.StorageService; //导入依赖的package包/类
public void run()
{
double report = -1;
try
{
report = getIOWait();
}
catch (IOException e)
{
// ignore;
if (FBUtilities.hasProcFS())
logger.warn("Couldn't read /proc/stats");
}
if (report == -1d)
report = compaction_severity.get();
if (!Gossiper.instance.isEnabled())
return;
report += manual_severity.get(); // add manual severity setting.
VersionedValue updated = StorageService.instance.valueFactory.severity(report);
Gossiper.instance.addLocalApplicationState(ApplicationState.SEVERITY, updated);
}