本文整理汇总了Java中org.apache.cassandra.utils.Pair类的典型用法代码示例。如果您正苦于以下问题:Java Pair类的具体用法?Java Pair怎么用?Java Pair使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Pair类属于org.apache.cassandra.utils包,在下文中一共展示了Pair类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: prepare
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
public Term prepare(String keyspace, ColumnSpecification receiver) throws InvalidRequestException
{
validateAssignableTo(keyspace, receiver);
ColumnSpecification keySpec = Maps.keySpecOf(receiver);
ColumnSpecification valueSpec = Maps.valueSpecOf(receiver);
Map<Term, Term> values = new HashMap<>(entries.size());
boolean allTerminal = true;
for (Pair<Term.Raw, Term.Raw> entry : entries)
{
Term k = entry.left.prepare(keyspace, keySpec);
Term v = entry.right.prepare(keyspace, valueSpec);
if (k.containsBindMarker() || v.containsBindMarker())
throw new InvalidRequestException(String.format("Invalid map literal for %s: bind variables are not supported inside collection literals", receiver.name));
if (k instanceof Term.NonTerminal || v instanceof Term.NonTerminal)
allTerminal = false;
values.put(k, v);
}
DelayedValue value = new DelayedValue(((MapType)receiver.type).getKeysType(), values);
return allTerminal ? value.bind(QueryOptions.DEFAULT) : value;
}
示例2: testAssignment
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
public AssignmentTestable.TestResult testAssignment(String keyspace, ColumnSpecification receiver)
{
if (!(receiver.type instanceof MapType))
return AssignmentTestable.TestResult.NOT_ASSIGNABLE;
// If there is no elements, we can't say it's an exact match (an empty map if fundamentally polymorphic).
if (entries.isEmpty())
return AssignmentTestable.TestResult.WEAKLY_ASSIGNABLE;
ColumnSpecification keySpec = Maps.keySpecOf(receiver);
ColumnSpecification valueSpec = Maps.valueSpecOf(receiver);
// It's an exact match if all are exact match, but is not assignable as soon as any is non assignable.
AssignmentTestable.TestResult res = AssignmentTestable.TestResult.EXACT_MATCH;
for (Pair<Term.Raw, Term.Raw> entry : entries)
{
AssignmentTestable.TestResult t1 = entry.left.testAssignment(keyspace, keySpec);
AssignmentTestable.TestResult t2 = entry.right.testAssignment(keyspace, valueSpec);
if (t1 == AssignmentTestable.TestResult.NOT_ASSIGNABLE || t2 == AssignmentTestable.TestResult.NOT_ASSIGNABLE)
return AssignmentTestable.TestResult.NOT_ASSIGNABLE;
if (t1 != AssignmentTestable.TestResult.EXACT_MATCH || t2 != AssignmentTestable.TestResult.EXACT_MATCH)
res = AssignmentTestable.TestResult.WEAKLY_ASSIGNABLE;
}
return res;
}
示例3: getExactTypeIfKnown
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
@Override
public AbstractType<?> getExactTypeIfKnown(String keyspace)
{
AbstractType<?> keyType = null;
AbstractType<?> valueType = null;
for (Pair<Term.Raw, Term.Raw> entry : entries)
{
if (keyType == null)
keyType = entry.left.getExactTypeIfKnown(keyspace);
if (valueType == null)
valueType = entry.right.getExactTypeIfKnown(keyspace);
if (keyType != null && valueType != null)
return MapType.getInstance(keyType, valueType, false);
}
return null;
}
示例4: deserializeFirstLastKey
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
/**
* Deserializes the first and last key stored in the summary
*
* Only for use by offline tools like SSTableMetadataViewer, otherwise SSTable.first/last should be used.
*/
public Pair<DecoratedKey, DecoratedKey> deserializeFirstLastKey(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel) throws IOException
{
in.skipBytes(4); // minIndexInterval
int offsetCount = in.readInt();
long offheapSize = in.readLong();
if (haveSamplingLevel)
in.skipBytes(8); // samplingLevel, fullSamplingSummarySize
in.skip(offsetCount * 4);
in.skip(offheapSize - offsetCount * 4);
DecoratedKey first = partitioner.decorateKey(ByteBufferUtil.readWithLength(in));
DecoratedKey last = partitioner.decorateKey(ByteBufferUtil.readWithLength(in));
return Pair.create(first, last);
}
示例5: getTotalSizeForSections
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
/**
* @param sections Collection of sections in uncompressed file. Should not contain sections that overlap each other.
* @return Total chunk size in bytes for given sections including checksum.
*/
public long getTotalSizeForSections(Collection<Pair<Long, Long>> sections)
{
long size = 0;
long lastOffset = -1;
for (Pair<Long, Long> section : sections)
{
int startIndex = (int) (section.left / parameters.chunkLength());
int endIndex = (int) (section.right / parameters.chunkLength());
endIndex = section.right % parameters.chunkLength() == 0 ? endIndex - 1 : endIndex;
for (int i = startIndex; i <= endIndex; i++)
{
long offset = i * 8L;
long chunkOffset = chunkOffsets.getLong(offset);
if (chunkOffset > lastOffset)
{
lastOffset = chunkOffset;
long nextChunkOffset = offset + 8 == chunkOffsetsSize
? compressedFileLength
: chunkOffsets.getLong(offset + 8);
size += (nextChunkOffset - chunkOffset);
}
}
}
return size;
}
示例6: create
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
public static LZ4Compressor create(Map<String, String> args) throws ConfigurationException
{
String compressorType = validateCompressorType(args.get(LZ4_COMPRESSOR_TYPE));
Integer compressionLevel = validateCompressionLevel(args.get(LZ4_HIGH_COMPRESSION_LEVEL));
Pair<String, Integer> compressorTypeAndLevel = Pair.create(compressorType, compressionLevel);
LZ4Compressor instance = instances.get(compressorTypeAndLevel);
if (instance == null)
{
if (compressorType.equals(LZ4_FAST_COMPRESSOR) && args.get(LZ4_HIGH_COMPRESSION_LEVEL) != null)
logger.warn("'{}' parameter is ignored when '{}' is '{}'", LZ4_HIGH_COMPRESSION_LEVEL, LZ4_COMPRESSOR_TYPE, LZ4_FAST_COMPRESSOR);
instance = new LZ4Compressor(compressorType, compressionLevel);
LZ4Compressor instanceFromMap = instances.putIfAbsent(compressorTypeAndLevel, instance);
if(instanceFromMap != null)
instance = instanceFromMap;
}
return instance;
}
示例7: prepare
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
public Term prepare(String keyspace, ColumnSpecification receiver) throws InvalidRequestException
{
validateAssignableTo(keyspace, receiver);
ColumnSpecification keySpec = Maps.keySpecOf(receiver);
ColumnSpecification valueSpec = Maps.valueSpecOf(receiver);
Map<Term, Term> values = new HashMap<Term, Term>(entries.size());
boolean allTerminal = true;
for (Pair<Term.Raw, Term.Raw> entry : entries)
{
Term k = entry.left.prepare(keyspace, keySpec);
Term v = entry.right.prepare(keyspace, valueSpec);
if (k.containsBindMarker() || v.containsBindMarker())
throw new InvalidRequestException(String.format("Invalid map literal for %s: bind variables are not supported inside collection literals", receiver.name));
if (k instanceof Term.NonTerminal || v instanceof Term.NonTerminal)
allTerminal = false;
values.put(k, v);
}
DelayedValue value = new DelayedValue(((MapType)receiver.type).getKeysType(), values);
return allTerminal ? value.bind(QueryOptions.DEFAULT) : value;
}
示例8: addEndpoint
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
/**
* Stores current DC/rack assignment for ep
*/
protected void addEndpoint(InetAddress ep)
{
IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
String dc = snitch.getDatacenter(ep);
String rack = snitch.getRack(ep);
Pair<String, String> current = currentLocations.get(ep);
if (current != null)
{
if (current.left.equals(dc) && current.right.equals(rack))
return;
dcRacks.get(current.left).remove(current.right, ep);
dcEndpoints.remove(current.left, ep);
}
dcEndpoints.put(dc, ep);
if (!dcRacks.containsKey(dc))
dcRacks.put(dc, HashMultimap.<String, InetAddress>create());
dcRacks.get(dc).put(rack, ep);
currentLocations.put(ep, Pair.create(dc, rack));
}
示例9: whereClause
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
/** serialize the where clause */
private Pair<Integer, String> whereClause()
{
if (partitionKeyString == null)
partitionKeyString = keyString(partitionBoundColumns);
if (partitionKeyMarkers == null)
partitionKeyMarkers = partitionKeyMarkers();
// initial query token(k) >= start_token and token(k) <= end_token
if (emptyPartitionKeyValues())
return Pair.create(0, String.format(" WHERE token(%s) > ? AND token(%s) <= ?", partitionKeyString, partitionKeyString));
// query token(k) > token(pre_partition_key) and token(k) <= end_token
if (clusterColumns.size() == 0 || clusterColumns.get(0).value == null)
return Pair.create(1,
String.format(" WHERE token(%s) > token(%s) AND token(%s) <= ?",
partitionKeyString, partitionKeyMarkers, partitionKeyString));
// query token(k) = token(pre_partition_key) and m = pre_cluster_key_m and n > pre_cluster_key_n
Pair<Integer, String> clause = whereClause(clusterColumns, 0);
return Pair.create(clause.left,
String.format(" WHERE token(%s) = token(%s) %s", partitionKeyString, partitionKeyMarkers, clause.right));
}
示例10: serializedSize
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
public long serializedSize(FileMessageHeader header, int version)
{
long size = UUIDSerializer.serializer.serializedSize(header.cfId, version);
size += TypeSizes.sizeof(header.sequenceNumber);
size += TypeSizes.sizeof(header.version.toString());
if (version >= StreamMessage.VERSION_22)
size += TypeSizes.sizeof(header.format.name);
size += TypeSizes.sizeof(header.estimatedKeys);
size += TypeSizes.sizeof(header.sections.size());
for (Pair<Long, Long> section : header.sections)
{
size += TypeSizes.sizeof(section.left);
size += TypeSizes.sizeof(section.right);
}
size += CompressionInfo.serializer.serializedSize(header.compressionInfo, version);
size += TypeSizes.sizeof(header.sstableLevel);
if (version >= StreamMessage.VERSION_30)
size += SerializationHeader.serializer.serializedSize(header.version, header.header);
return size;
}
示例11: getCompactingAndNonCompactingSSTables
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
/**
* Returns a Pair of all compacting and non-compacting sstables. Non-compacting sstables will be marked as
* compacting.
*/
@SuppressWarnings("resource")
private Pair<List<SSTableReader>, Map<UUID, LifecycleTransaction>> getCompactingAndNonCompactingSSTables()
{
List<SSTableReader> allCompacting = new ArrayList<>();
Map<UUID, LifecycleTransaction> allNonCompacting = new HashMap<>();
for (Keyspace ks : Keyspace.all())
{
for (ColumnFamilyStore cfStore: ks.getColumnFamilyStores())
{
Set<SSTableReader> nonCompacting, allSSTables;
LifecycleTransaction txn = null;
do
{
View view = cfStore.getTracker().getView();
allSSTables = ImmutableSet.copyOf(view.select(SSTableSet.CANONICAL));
nonCompacting = ImmutableSet.copyOf(view.getUncompacting(allSSTables));
}
while (null == (txn = cfStore.getTracker().tryModify(nonCompacting, OperationType.UNKNOWN)));
allNonCompacting.put(cfStore.metadata.cfId, txn);
allCompacting.addAll(Sets.difference(allSSTables, nonCompacting));
}
}
return Pair.create(allCompacting, allNonCompacting);
}
示例12: cloneAfterAllSettled
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
/**
* Create a copy of TokenMetadata with tokenToEndpointMap reflecting situation after all
* current leave, move, and relocate operations have finished.
*
* @return new token metadata
*/
public TokenMetadata cloneAfterAllSettled()
{
lock.readLock().lock();
try
{
TokenMetadata metadata = cloneOnlyTokenMap();
for (InetAddress endpoint : leavingEndpoints)
metadata.removeEndpoint(endpoint);
for (Pair<Token, InetAddress> pair : movingEndpoints)
metadata.updateNormalToken(pair.left, pair.right);
for (Map.Entry<Token, InetAddress> relocating: relocatingTokens.entrySet())
metadata.updateNormalToken(relocating.getKey(), relocating.getValue());
return metadata;
}
finally
{
lock.readLock().unlock();
}
}
示例13: deserialize
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
public Future<Pair<KeyCacheKey, RowIndexEntry>> deserialize(DataInputStream input, ColumnFamilyStore cfs) throws IOException
{
int keyLength = input.readInt();
if (keyLength > FBUtilities.MAX_UNSIGNED_SHORT)
{
throw new IOException(String.format("Corrupted key cache. Key length of %d is longer than maximum of %d",
keyLength, FBUtilities.MAX_UNSIGNED_SHORT));
}
ByteBuffer key = ByteBufferUtil.read(input, keyLength);
int generation = input.readInt();
SSTableReader reader = findDesc(generation, cfs.getSSTables());
input.readBoolean(); // backwards compatibility for "promoted indexes" boolean
if (reader == null)
{
RowIndexEntry.Serializer.skipPromotedIndex(input);
return null;
}
RowIndexEntry entry = reader.metadata.comparator.rowIndexEntrySerializer().deserialize(input, reader.descriptor.version);
return Futures.immediateFuture(Pair.create(new KeyCacheKey(cfs.metadata.cfId, reader.descriptor, key), entry));
}
示例14: bootstrap
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
private void bootstrap(Collection<Token> tokens)
{
isBootstrapMode = true;
SystemKeyspace.updateTokens(tokens); // DON'T use setToken, that makes us part of the ring locally which is incorrect until we are done bootstrapping
if (!DatabaseDescriptor.isReplacing())
{
// if not an existing token then bootstrap
List<Pair<ApplicationState, VersionedValue>> states = new ArrayList<Pair<ApplicationState, VersionedValue>>();
states.add(Pair.create(ApplicationState.TOKENS, valueFactory.tokens(tokens)));
states.add(Pair.create(ApplicationState.STATUS, valueFactory.bootstrapping(tokens)));
Gossiper.instance.addLocalApplicationStates(states);
setMode(Mode.JOINING, "sleeping " + RING_DELAY + " ms for pending range setup", true);
Uninterruptibles.sleepUninterruptibly(RING_DELAY, TimeUnit.MILLISECONDS);
}
else
{
// Dont set any state for the node which is bootstrapping the existing token...
tokenMetadata.updateNormalTokens(tokens, FBUtilities.getBroadcastAddress());
SystemKeyspace.removeEndpoint(DatabaseDescriptor.getReplaceAddress());
}
if (!Gossiper.instance.seenAnySeed())
throw new IllegalStateException("Unable to contact any seeds!");
setMode(Mode.JOINING, "Starting to bootstrap...", true);
new BootStrapper(FBUtilities.getBroadcastAddress(), tokens, tokenMetadata).bootstrap(); // handles token update
logger.info("Bootstrap completed! for the tokens {}", tokens);
}
示例15: testSerialization
import org.apache.cassandra.utils.Pair; //导入依赖的package包/类
@Test
public void testSerialization() throws IOException
{
Pair<List<DecoratedKey>, IndexSummary> random = generateRandomIndex(100, 1);
DataOutputBuffer dos = new DataOutputBuffer();
IndexSummary.serializer.serialize(random.right, dos, false);
// write junk
dos.writeUTF("JUNK");
dos.writeUTF("JUNK");
FileUtils.closeQuietly(dos);
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(dos.toByteArray()));
IndexSummary is = IndexSummary.serializer.deserialize(dis, partitioner, false, 1, 1);
for (int i = 0; i < 100; i++)
assertEquals(i, is.binarySearch(random.left.get(i)));
// read the junk
assertEquals(dis.readUTF(), "JUNK");
assertEquals(dis.readUTF(), "JUNK");
is.close();
FileUtils.closeQuietly(dis);
random.right.close();
}