本文整理汇总了Java中org.apache.cassandra.db.Keyspace.open方法的典型用法代码示例。如果您正苦于以下问题:Java Keyspace.open方法的具体用法?Java Keyspace.open怎么用?Java Keyspace.open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.db.Keyspace
的用法示例。
在下文中一共展示了Keyspace.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getStore
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
private ColumnFamilyStore getStore(String ksName, String cfName) {
// Start by validating keyspace name
if (Schema.instance.getKSMetaData(ksName) == null) {
System.err.println(String.format("Reference to nonexistent keyspace: %s!", ksName));
System.exit(1);
}
Keyspace keyspace = Keyspace.open(ksName);
// Make it works for indexes too - find parent cf if necessary
String baseName = cfName;
if (cfName.contains(".")) {
String[] parts = cfName.split("\\.", 2);
baseName = parts[0];
}
// IllegalArgumentException will be thrown here if ks/cf pair does not exist
try {
return keyspace.getColumnFamilyStore(baseName);
} catch (Throwable t) {
System.err.println(String.format(
"The provided column family is not part of this cassandra keyspace: keyspace = %s, column family = %s",
ksName, cfName));
System.exit(1);
}
return null;
}
示例2: antiCompactionSizeTest
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
@Test
public void antiCompactionSizeTest() throws ExecutionException, InterruptedException, IOException
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
long origSize = s.bytesOnDisk();
Range<Token> range = new Range<Token>(new BytesToken(ByteBufferUtil.bytes(0)), new BytesToken(ByteBufferUtil.bytes(500)));
Collection<SSTableReader> sstables = cfs.getSSTables();
CompactionManager.instance.performAnticompaction(cfs, Arrays.asList(range), Refs.tryRef(sstables), 12345);
long sum = 0;
for (SSTableReader x : cfs.getSSTables())
sum += x.bytesOnDisk();
assertEquals(sum, cfs.metric.liveDiskSpaceUsed.count());
assertEquals(origSize, cfs.metric.liveDiskSpaceUsed.count(), 100000);
}
示例3: testGetScannerForNoIntersectingRanges
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
/** see CASSANDRA-5407 */
@Test
public void testGetScannerForNoIntersectingRanges()
{
Keyspace keyspace = Keyspace.open("Keyspace1");
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
ByteBuffer key = ByteBufferUtil.bytes(String.valueOf("k1"));
Mutation rm = new Mutation("Keyspace1", key);
rm.add("Standard1", cellname("xyz"), ByteBufferUtil.bytes("abc"), 0);
rm.apply();
store.forceBlockingFlush();
boolean foundScanner = false;
for (SSTableReader s : store.getSSTables())
{
ISSTableScanner scanner = s.getScanner(new Range<Token>(t(0), t(1), s.partitioner), null);
scanner.next(); // throws exception pre 5407
foundScanner = true;
}
assertTrue(foundScanner);
}
示例4: findSuitableEndpoint
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
/**
* Find a suitable replica as leader for counter update.
* For now, we pick a random replica in the local DC (or ask the snitch if
* there is no replica alive in the local DC).
* TODO: if we track the latency of the counter writes (which makes sense
* contrarily to standard writes since there is a read involved), we could
* trust the dynamic snitch entirely, which may be a better solution. It
* is unclear we want to mix those latencies with read latencies, so this
* may be a bit involved.
*/
private static InetAddress findSuitableEndpoint(String keyspaceName, ByteBuffer key, String localDataCenter, ConsistencyLevel cl) throws UnavailableException
{
Keyspace keyspace = Keyspace.open(keyspaceName);
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
List<InetAddress> endpoints = StorageService.instance.getLiveNaturalEndpoints(keyspace, key);
if (endpoints.isEmpty())
// TODO have a way to compute the consistency level
throw new UnavailableException(cl, cl.blockFor(keyspace), 0);
List<InetAddress> localEndpoints = new ArrayList<InetAddress>();
for (InetAddress endpoint : endpoints)
{
if (snitch.getDatacenter(endpoint).equals(localDataCenter))
localEndpoints.add(endpoint);
}
if (localEndpoints.isEmpty())
{
// No endpoint in local DC, pick the closest endpoint according to the snitch
snitch.sortByProximity(FBUtilities.getBroadcastAddress(), endpoints);
return endpoints.get(0);
}
else
{
return localEndpoints.get(FBUtilities.threadLocalRandom().nextInt(localEndpoints.size()));
}
}
示例5: testDifference
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
@Test
public void testDifference() throws Throwable
{
Range<Token> range = new Range<>(partirioner.getMinimumToken(), partirioner.getRandomToken());
UUID parentRepairSession = UUID.randomUUID();
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");
ActiveRepairService.instance.registerParentRepairSession(parentRepairSession, FBUtilities.getBroadcastAddress(), Arrays.asList(cfs), Arrays.asList(range), false, System.currentTimeMillis(), false);
RepairJobDesc desc = new RepairJobDesc(parentRepairSession, UUID.randomUUID(), KEYSPACE1, "Standard1", Arrays.asList(range));
MerkleTrees tree1 = createInitialTree(desc);
MerkleTrees tree2 = createInitialTree(desc);
// change a range in one of the trees
Token token = partirioner.midpoint(range.left, range.right);
tree1.invalidate(token);
MerkleTree.TreeRange changed = tree1.get(token);
changed.hash("non-empty hash!".getBytes());
Set<Range<Token>> interesting = new HashSet<>();
interesting.add(changed);
// difference the trees
// note: we reuse the same endpoint which is bogus in theory but fine here
TreeResponse r1 = new TreeResponse(InetAddress.getByName("127.0.0.1"), tree1);
TreeResponse r2 = new TreeResponse(InetAddress.getByName("127.0.0.2"), tree2);
LocalSyncTask task = new LocalSyncTask(desc, r1, r2, ActiveRepairService.UNREPAIRED_SSTABLE);
task.run();
// ensure that the changed range was recorded
assertEquals("Wrong differing ranges", interesting.size(), task.getCurrentStat().numberOfDifferences);
}
示例6: runMayThrow
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
protected void runMayThrow()
{
Keyspace keyspace = Keyspace.open(command.ksName);
Row r = command.getRow(keyspace);
ReadResponse result = ReadVerbHandler.getResponse(command, r);
MessagingService.instance().addLatency(FBUtilities.getBroadcastAddress(), TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
handler.response(result);
}
示例7: syncWriteToBatchlog
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
private static void syncWriteToBatchlog(Collection<Mutation> mutations, Collection<InetAddress> endpoints, UUID uuid)
throws WriteTimeoutException
{
AbstractWriteResponseHandler handler = new WriteResponseHandler(endpoints,
Collections.<InetAddress>emptyList(),
ConsistencyLevel.ONE,
Keyspace.open(Keyspace.SYSTEM_KS),
null,
WriteType.BATCH_LOG);
MessageOut<Mutation> message = BatchlogManager.getBatchlogMutationFor(mutations, uuid, MessagingService.current_version)
.createMessage();
for (InetAddress target : endpoints)
{
int targetVersion = MessagingService.instance().getVersion(target);
if (target.equals(FBUtilities.getBroadcastAddress()) && OPTIMIZE_LOCAL_REQUESTS)
{
insertLocal(message.payload, handler);
}
else if (targetVersion == MessagingService.current_version)
{
MessagingService.instance().sendRR(message, target, handler, false);
}
else
{
MessagingService.instance().sendRR(BatchlogManager.getBatchlogMutationFor(mutations, uuid, targetVersion)
.createMessage(),
target,
handler,
false);
}
}
handler.get();
}
示例8: testPrepBucket
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
@Test
public void testPrepBucket()
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF_STANDARD1);
cfs.disableAutoCompaction();
ByteBuffer value = ByteBuffer.wrap(new byte[100]);
// create 3 sstables
int numSSTables = 3;
for (int r = 0; r < numSSTables; r++)
{
DecoratedKey key = Util.dk(String.valueOf(r));
new RowUpdateBuilder(cfs.metadata, r, key.getKey())
.clustering("column")
.add("val", value).build().applyUnsafe();
cfs.forceBlockingFlush();
}
cfs.forceBlockingFlush();
List<SSTableReader> sstrs = new ArrayList<>(cfs.getLiveSSTables());
List<SSTableReader> newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 9, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
assertTrue("incoming bucket should not be accepted when it has below the min threshold SSTables", newBucket.isEmpty());
newBucket = newestBucket(Collections.singletonList(sstrs.subList(0, 2)), 4, 32, 10, 10, Long.MAX_VALUE, new SizeTieredCompactionStrategyOptions());
assertFalse("non-incoming bucket should be accepted when it has at least 2 SSTables", newBucket.isEmpty());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(0).getMinTimestamp(), sstrs.get(0).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(1).getMinTimestamp(), sstrs.get(1).getMaxTimestamp());
assertEquals("an sstable with a single value should have equal min/max timestamps", sstrs.get(2).getMinTimestamp(), sstrs.get(2).getMaxTimestamp());
cfs.truncateBlocking();
}
示例9: testGetFullyExpiredSSTables
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
@Test
public void testGetFullyExpiredSSTables()
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF2);
cfs.truncateBlocking();
DecoratedKey key = Util.dk("k1");
long timestamp1 = FBUtilities.timestampMicros(); // latest timestamp
long timestamp2 = timestamp1 - 5;
long timestamp3 = timestamp2 - 5; // oldest timestamp
// create sstable with tombstone that should be expired in no older timestamps
applyDeleteMutation(cfs.metadata, key, timestamp2);
cfs.forceBlockingFlush();
// first sstable with tombstone is compacting
Set<SSTableReader> compacting = Sets.newHashSet(cfs.getLiveSSTables());
// create another sstable with more recent timestamp
applyMutation(cfs.metadata, key, timestamp1);
cfs.forceBlockingFlush();
// second sstable is overlapping
Set<SSTableReader> overlapping = Sets.difference(Sets.newHashSet(cfs.getLiveSSTables()), compacting);
// the first sstable should be expired because the overlapping sstable is newer and the gc period is later
int gcBefore = (int) (System.currentTimeMillis() / 1000) + 5;
Set<SSTableReader> expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore);
assertNotNull(expired);
assertEquals(1, expired.size());
assertEquals(compacting.iterator().next(), expired.iterator().next());
// however if we add an older mutation to the memtable then the sstable should not be expired
applyMutation(cfs.metadata, key, timestamp3);
expired = CompactionController.getFullyExpiredSSTables(cfs, compacting, overlapping, gcBefore);
assertNotNull(expired);
assertEquals(0, expired.size());
}
示例10: syncWriteToBatchlog
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
private static void syncWriteToBatchlog(Collection<RowMutation> mutations, Collection<InetAddress> endpoints, UUID uuid)
throws WriteTimeoutException
{
RowMutation rm = BatchlogManager.getBatchlogMutationFor(mutations, uuid);
AbstractWriteResponseHandler handler = new WriteResponseHandler(endpoints,
Collections.<InetAddress>emptyList(),
ConsistencyLevel.ONE,
Keyspace.open(Keyspace.SYSTEM_KS),
null,
WriteType.BATCH_LOG);
updateBatchlog(rm, endpoints, handler);
handler.get();
}
示例11: asyncRemoveFromBatchlog
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
private static void asyncRemoveFromBatchlog(Collection<InetAddress> endpoints, UUID uuid)
{
ColumnFamily cf = EmptyColumns.factory.create(Schema.instance.getCFMetaData(Keyspace.SYSTEM_KS, SystemKeyspace.BATCHLOG_CF));
cf.delete(new DeletionInfo(FBUtilities.timestampMicros(), (int) (System.currentTimeMillis() / 1000)));
AbstractWriteResponseHandler handler = new WriteResponseHandler(endpoints,
Collections.<InetAddress>emptyList(),
ConsistencyLevel.ANY,
Keyspace.open(Keyspace.SYSTEM_KS),
null,
WriteType.SIMPLE);
RowMutation rm = new RowMutation(Keyspace.SYSTEM_KS, UUIDType.instance.decompose(uuid), cf);
updateBatchlog(rm, endpoints, handler);
}
示例12: ReadCallback
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
/**
* Constructor when response count has to be calculated and blocked for.
*/
public ReadCallback(IResponseResolver<TMessage, TResolved> resolver, ConsistencyLevel consistencyLevel, IReadCommand command, List<InetAddress> filteredEndpoints)
{
this(resolver, consistencyLevel, consistencyLevel.blockFor(Keyspace.open(command.getKeyspace())), command, Keyspace.open(command.getKeyspace()), filteredEndpoints);
if (logger.isTraceEnabled())
logger.trace(String.format("Blockfor is %s; setting up requests to %s", blockfor, StringUtils.join(this.endpoints, ",")));
}
示例13: testSmallFiles
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
@Test
public void testSmallFiles() throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
cfs.disableAutoCompaction();
SSTableReader s = writeFile(cfs, 400);
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
List<SSTableReader> sstables;
int files = 1;
try (ISSTableScanner scanner = s.getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, false, 1000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID()))
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while(ci.hasNext())
{
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 2500000)
{
assertEquals(files, cfs.getLiveSSTables().size()); // all files are now opened early
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
files++;
}
}
sstables = rewriter.finish();
}
assertEquals(files, sstables.size());
assertEquals(files, cfs.getLiveSSTables().size());
LifecycleTransaction.waitForDeletions();
assertFileCounts(s.descriptor.directory.list());
validateCFS(cfs);
}
示例14: getReadExecutor
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
/**
* @return an executor appropriate for the configured speculative read policy
*/
public static AbstractReadExecutor getReadExecutor(ReadCommand command, ConsistencyLevel consistencyLevel) throws UnavailableException
{
Keyspace keyspace = Keyspace.open(command.ksName);
List<InetAddress> allReplicas = StorageProxy.getLiveSortedEndpoints(keyspace, command.key);
ReadRepairDecision repairDecision = Schema.instance.getCFMetaData(command.ksName, command.cfName).newReadRepairDecision();
List<InetAddress> targetReplicas = consistencyLevel.filterForQuery(keyspace, allReplicas, repairDecision);
// Throw UAE early if we don't have enough replicas.
consistencyLevel.assureSufficientLiveNodes(keyspace, targetReplicas);
// Fat client. Speculating read executors need access to cfs metrics and sampled latency, and fat clients
// can't provide that. So, for now, fat clients will always use NeverSpeculatingReadExecutor.
if (StorageService.instance.isClientMode())
return new NeverSpeculatingReadExecutor(command, consistencyLevel, targetReplicas);
if (repairDecision != ReadRepairDecision.NONE)
ReadRepairMetrics.attempted.mark();
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(command.cfName);
RetryType retryType = cfs.metadata.getSpeculativeRetry().type;
// Speculative retry is disabled *OR* there are simply no extra replicas to speculate.
if (retryType == RetryType.NONE || consistencyLevel.blockFor(keyspace) == allReplicas.size())
return new NeverSpeculatingReadExecutor(command, consistencyLevel, targetReplicas);
if (targetReplicas.size() == allReplicas.size())
{
// CL.ALL, RRD.GLOBAL or RRD.DC_LOCAL and a single-DC.
// We are going to contact every node anyway, so ask for 2 full data requests instead of 1, for redundancy
// (same amount of requests in total, but we turn 1 digest request into a full blown data request).
return new AlwaysSpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
}
// RRD.NONE or RRD.DC_LOCAL w/ multiple DCs.
InetAddress extraReplica = allReplicas.get(targetReplicas.size());
// With repair decision DC_LOCAL all replicas/target replicas may be in different order, so
// we might have to find a replacement that's not already in targetReplicas.
if (repairDecision == ReadRepairDecision.DC_LOCAL && targetReplicas.contains(extraReplica))
{
for (InetAddress address : allReplicas)
{
if (!targetReplicas.contains(address))
{
extraReplica = address;
break;
}
}
}
targetReplicas.add(extraReplica);
if (retryType == RetryType.ALWAYS)
return new AlwaysSpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
else // PERCENTILE or CUSTOM.
return new SpeculatingReadExecutor(cfs, command, consistencyLevel, targetReplicas);
}
示例15: testAbortHelper
import org.apache.cassandra.db.Keyspace; //导入方法依赖的package包/类
private void testAbortHelper(boolean earlyException, boolean offline) throws Exception
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
truncate(cfs);
SSTableReader s = writeFile(cfs, 1000);
if (!offline)
cfs.addSSTable(s);
Set<SSTableReader> compacting = Sets.newHashSet(s);
try (ISSTableScanner scanner = compacting.iterator().next().getScanner();
CompactionController controller = new CompactionController(cfs, compacting, 0);
LifecycleTransaction txn = offline ? LifecycleTransaction.offline(OperationType.UNKNOWN, compacting)
: cfs.getTracker().tryModify(compacting, OperationType.UNKNOWN);
SSTableRewriter rewriter = new SSTableRewriter(txn, 1000, offline, 10000000, false);
CompactionIterator ci = new CompactionIterator(OperationType.COMPACTION, Collections.singletonList(scanner), controller, FBUtilities.nowInSeconds(), UUIDGen.getTimeUUID())
)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
while (ci.hasNext())
{
rewriter.append(ci.next());
if (rewriter.currentWriter().getOnDiskFilePointer() > 25000000)
{
rewriter.switchWriter(getWriter(cfs, s.descriptor.directory, txn));
}
}
try
{
rewriter.throwDuringPrepare(earlyException);
rewriter.prepareToCommit();
}
catch (Throwable t)
{
rewriter.abort();
}
}
finally
{
if (offline)
s.selfRef().release();
}
LifecycleTransaction.waitForDeletions();
int filecount = assertFileCounts(s.descriptor.directory.list());
assertEquals(filecount, 1);
if (!offline)
{
assertEquals(1, cfs.getLiveSSTables().size());
validateCFS(cfs);
truncate(cfs);
}
else
{
assertEquals(0, cfs.getLiveSSTables().size());
cfs.truncateBlocking();
}
filecount = assertFileCounts(s.descriptor.directory.list());
if (offline)
{
// the file is not added to the CFS, therefore not truncated away above
assertEquals(1, filecount);
for (File f : s.descriptor.directory.listFiles())
{
FileUtils.deleteRecursive(f);
}
filecount = assertFileCounts(s.descriptor.directory.list());
}
assertEquals(0, filecount);
truncate(cfs);
}