本文整理汇总了Java中org.apache.cassandra.net.MessagingService.current_version方法的典型用法代码示例。如果您正苦于以下问题:Java MessagingService.current_version方法的具体用法?Java MessagingService.current_version怎么用?Java MessagingService.current_version使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.net.MessagingService
的用法示例。
在下文中一共展示了MessagingService.current_version方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: announce
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
private static Future<?> announce(final Collection<Mutation> schema)
{
Future<?> f = StageManager.getStage(Stage.MIGRATION).submit(new WrappedRunnable()
{
protected void runMayThrow() throws IOException, ConfigurationException
{
DefsTables.mergeSchema(schema);
}
});
for (InetAddress endpoint : Gossiper.instance.getLiveMembers())
{
// only push schema to nodes with known and equal versions
if (!endpoint.equals(FBUtilities.getBroadcastAddress()) &&
MessagingService.instance().knowsVersion(endpoint) &&
MessagingService.instance().getRawVersion(endpoint) == MessagingService.current_version)
pushSchemaMutation(endpoint, schema);
}
return f;
}
示例2: recycleSegment
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
/**
* Differs from the above because it can work on any file instead of just existing
* commit log segments managed by this manager.
*
* @param file segment file that is no longer in use.
*/
void recycleSegment(final File file)
{
if (isCapExceeded()
|| CommitLogDescriptor.fromFileName(file.getName()).getMessagingVersion() != MessagingService.current_version)
{
// (don't decrease managed size, since this was never a "live" segment)
logger.debug("(Unopened) segment {} is no longer needed and will be deleted now", file);
FileUtils.deleteWithConfirm(file);
return;
}
logger.debug("Recycling {}", file);
// this wasn't previously a live segment, so add it to the managed size when we make it live
size.addAndGet(DatabaseDescriptor.getCommitLogSegmentSize());
segmentManagementTasks.add(new Callable<CommitLogSegment>()
{
public CommitLogSegment call()
{
return new CommitLogSegment(file.getPath());
}
});
}
示例3: announce
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
private static Future<?> announce(final Collection<RowMutation> schema)
{
Future<?> f = StageManager.getStage(Stage.MIGRATION).submit(new WrappedRunnable()
{
protected void runMayThrow() throws IOException, ConfigurationException
{
DefsTables.mergeSchema(schema);
}
});
for (InetAddress endpoint : Gossiper.instance.getLiveMembers())
{
if (endpoint.equals(FBUtilities.getBroadcastAddress()))
continue; // we've dealt with localhost already
// don't send schema to the nodes with the versions older than current major
if (MessagingService.instance().getVersion(endpoint) < MessagingService.current_version)
continue;
pushSchemaMutation(endpoint, schema);
}
return f;
}
示例4: recycleSegment
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
/**
* Differs from the above because it can work on any file instead of just existing
* commit log segments managed by this allocator.
*
* @param file segment file that is no longer in use.
*/
public void recycleSegment(final File file)
{
// check against SEGMENT_SIZE avoids recycling odd-sized or empty segments from old C* versions and unit tests
if (isCapExceeded() || file.length() != DatabaseDescriptor.getCommitLogSegmentSize()
|| CommitLogDescriptor.fromFileName(file.getName()).getMessagingVersion() != MessagingService.current_version)
{
// (don't decrease managed size, since this was never a "live" segment)
logger.debug("(Unopened) segment {} is no longer needed and will be deleted now", file);
FileUtils.deleteWithConfirm(file);
return;
}
logger.debug("Recycling {}", file);
// this wasn't previously a live segment, so add it to the managed size when we make it live
size.addAndGet(DatabaseDescriptor.getCommitLogSegmentSize());
queue.add(new Runnable()
{
public void run()
{
CommitLogSegment segment = new CommitLogSegment(file.getPath());
internalAddReadySegment(segment);
}
});
}
示例5: announce
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
private static Future<?> announce(final Collection<Mutation> schema)
{
Future<?> f = StageManager.getStage(Stage.MIGRATION).submit(new WrappedRunnable()
{
protected void runMayThrow() throws ConfigurationException
{
SchemaKeyspace.mergeSchemaAndAnnounceVersion(schema);
}
});
for (InetAddress endpoint : Gossiper.instance.getLiveMembers())
{
// only push schema to nodes with known and equal versions
if (!endpoint.equals(FBUtilities.getBroadcastAddress()) &&
MessagingService.instance().knowsVersion(endpoint) &&
MessagingService.instance().getRawVersion(endpoint) == MessagingService.current_version)
pushSchemaMutation(endpoint, schema);
}
return f;
}
示例6: AbstractReadExecutor
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
AbstractReadExecutor(Keyspace keyspace, ReadCommand command, ConsistencyLevel consistencyLevel, List<InetAddress> targetReplicas)
{
this.command = command;
this.targetReplicas = targetReplicas;
this.handler = new ReadCallback(new DigestResolver(keyspace, command, consistencyLevel, targetReplicas.size()), consistencyLevel, command, targetReplicas);
this.traceState = Tracing.instance.get();
// Set the digest version (if we request some digests). This is the smallest version amongst all our target replicas since new nodes
// knows how to produce older digest but the reverse is not true.
// TODO: we need this when talking with pre-3.0 nodes. So if we preserve the digest format moving forward, we can get rid of this once
// we stop being compatible with pre-3.0 nodes.
int digestVersion = MessagingService.current_version;
for (InetAddress replica : targetReplicas)
digestVersion = Math.min(digestVersion, MessagingService.instance().getVersion(replica));
command.setDigestVersion(digestVersion);
}
示例7: reconnect
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
private void reconnect(InetAddress publicAddress, InetAddress localAddress)
{
if (snitch.getDatacenter(publicAddress).equals(localDc)
&& MessagingService.instance().getVersion(publicAddress) == MessagingService.current_version
&& !MessagingService.instance().getConnectionPool(publicAddress).endPoint().equals(localAddress))
{
MessagingService.instance().getConnectionPool(publicAddress).reset(localAddress);
logger.debug(String.format("Intiated reconnect to an Internal IP %s for the %s", localAddress, publicAddress));
}
}
示例8: shouldPullSchemaFrom
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
public static boolean shouldPullSchemaFrom(InetAddress endpoint)
{
/*
* Don't request schema from nodes with a differnt or unknonw major version (may have incompatible schema)
* Don't request schema from fat clients
*/
return MessagingService.instance().knowsVersion(endpoint)
&& MessagingService.instance().getRawVersion(endpoint) == MessagingService.current_version
&& !Gossiper.instance.isFatClient(endpoint);
}
示例9: shouldPullSchemaFrom
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
private static boolean shouldPullSchemaFrom(InetAddress endpoint)
{
/*
* Don't request schema from nodes with a higher major (may have incompatible schema)
* Don't request schema from fat clients
*/
return MessagingService.instance().getVersion(endpoint) <= MessagingService.current_version
&& !Gossiper.instance.isFatClient(endpoint);
}
示例10: shouldPullSchemaFrom
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
public static boolean shouldPullSchemaFrom(InetAddress endpoint)
{
/*
* Don't request schema from nodes with a differnt or unknonw major version (may have incompatible schema)
* Don't request schema from fat clients
*/
return MessagingService.instance().knowsVersion(endpoint)
&& MessagingService.instance().getRawVersion(endpoint) == MessagingService.current_version
&& !Gossiper.instance.isGossipOnlyMember(endpoint);
}
示例11: deserialize
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
public Batch deserialize(DataInputPlus in, int version) throws IOException
{
UUID id = UUIDSerializer.serializer.deserialize(in, version);
long creationTime = in.readLong();
/*
* If version doesn't match the current one, we cannot not just read the encoded mutations verbatim,
* so we decode them instead, to deal with compatibility.
*/
return version == MessagingService.current_version
? createRemote(id, creationTime, readEncodedMutations(in))
: createLocal(id, creationTime, decodeMutations(in, version));
}
示例12: testSerialization
import org.apache.cassandra.net.MessagingService; //导入方法依赖的package包/类
@Test
public void testSerialization() throws IOException
{
CFMetaData cfm = Keyspace.open(KEYSPACE).getColumnFamilyStore(CF_STANDARD).metadata;
long now = FBUtilities.timestampMicros();
int version = MessagingService.current_version;
UUID uuid = UUIDGen.getTimeUUID();
List<Mutation> mutations = new ArrayList<>(10);
for (int i = 0; i < 10; i++)
{
mutations.add(new RowUpdateBuilder(cfm, FBUtilities.timestampMicros(), bytes(i))
.clustering("name" + i)
.add("val", "val" + i)
.build());
}
Batch batch1 = Batch.createLocal(uuid, now, mutations);
assertEquals(uuid, batch1.id);
assertEquals(now, batch1.creationTime);
assertEquals(mutations, batch1.decodedMutations);
DataOutputBuffer out = new DataOutputBuffer();
Batch.serializer.serialize(batch1, out, version);
assertEquals(out.getLength(), Batch.serializer.serializedSize(batch1, version));
DataInputPlus dis = new DataInputBuffer(out.getData());
Batch batch2 = Batch.serializer.deserialize(dis, version);
assertEquals(batch1.id, batch2.id);
assertEquals(batch1.creationTime, batch2.creationTime);
assertEquals(batch1.decodedMutations.size(), batch2.encodedMutations.size());
Iterator<Mutation> it1 = batch1.decodedMutations.iterator();
Iterator<ByteBuffer> it2 = batch2.encodedMutations.iterator();
while (it1.hasNext())
{
try (DataInputBuffer in = new DataInputBuffer(it2.next().array()))
{
assertEquals(it1.next().toString(), Mutation.serializer.deserialize(in, version).toString());
}
}
}