本文整理汇总了Java中org.apache.cassandra.dht.IPartitioner类的典型用法代码示例。如果您正苦于以下问题:Java IPartitioner类的具体用法?Java IPartitioner怎么用?Java IPartitioner使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IPartitioner类属于org.apache.cassandra.dht包,在下文中一共展示了IPartitioner类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: build
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
public IndexSummary build(IPartitioner partitioner, ReadableBoundary boundary)
{
//assert entries.length() > 0;
int count = (int) (offsets.length() / 4);
long entriesLength = entries.length();
if (boundary != null)
{
count = boundary.summaryCount;
entriesLength = boundary.entriesLength;
}
int sizeAtFullSampling = (int) Math.ceil(keysWritten / (double) minIndexInterval);
//assert count > 0;
return new IndexSummary(partitioner, offsets.currentBuffer().sharedCopy(),
count, entries.currentBuffer().sharedCopy(), entriesLength,
sizeAtFullSampling, minIndexInterval, samplingLevel);
}
示例2: deserializeFirstLastKey
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
/**
* Deserializes the first and last key stored in the summary
*
* Only for use by offline tools like SSTableMetadataViewer, otherwise SSTable.first/last should be used.
*/
public Pair<DecoratedKey, DecoratedKey> deserializeFirstLastKey(DataInputStream in, IPartitioner partitioner, boolean haveSamplingLevel) throws IOException
{
in.skipBytes(4); // minIndexInterval
int offsetCount = in.readInt();
long offheapSize = in.readLong();
if (haveSamplingLevel)
in.skipBytes(8); // samplingLevel, fullSamplingSummarySize
in.skip(offsetCount * 4);
in.skip(offheapSize - offsetCount * 4);
DecoratedKey first = partitioner.decorateKey(ByteBufferUtil.readWithLength(in));
DecoratedKey last = partitioner.decorateKey(ByteBufferUtil.readWithLength(in));
return Pair.create(first, last);
}
示例3: loadTablesFromRemote
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
public static Cluster loadTablesFromRemote(String host, int port, String cfidOverrides) throws IOException {
Map<String, UUID> cfs = parseOverrides(cfidOverrides);
Cluster.Builder builder = Cluster.builder().addContactPoints(host).withPort(port);
Cluster cluster = builder.build();
Metadata metadata = cluster.getMetadata();
IPartitioner partitioner = FBUtilities.newPartitioner(metadata.getPartitioner());
if (DatabaseDescriptor.getPartitioner() == null)
DatabaseDescriptor.setPartitionerUnsafe(partitioner);
for (com.datastax.driver.core.KeyspaceMetadata ksm : metadata.getKeyspaces()) {
if (!ksm.getName().equals("system")) {
for (TableMetadata tm : ksm.getTables()) {
String name = ksm.getName()+"."+tm.getName();
try {
CassandraUtils.tableFromCQL(
new ByteArrayInputStream(tm.asCQLQuery().getBytes()),
cfs.get(name) != null ? cfs.get(name) : tm.getId());
} catch(SyntaxException e) {
// ignore tables that we cant parse (probably dse)
logger.debug("Ignoring table " + name + " due to syntax exception " + e.getMessage());
}
}
}
}
return cluster;
}
示例4: copy
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
public CFMetaData copy(IPartitioner partitioner)
{
return copyOpts(new CFMetaData(ksName,
cfName,
cfId,
isSuper,
isCounter,
isDense,
isCompound,
isView,
copy(partitionKeyColumns),
copy(clusteringColumns),
copy(partitionColumns),
partitioner),
this);
}
示例5: testSatisfiedByWithMultipleTerms
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
@Test
public void testSatisfiedByWithMultipleTerms()
{
final ByteBuffer comment = UTF8Type.instance.decompose("comment");
final ColumnFamilyStore store = Keyspace.open("sasecondaryindex").getColumnFamilyStore("saindexed1");
final IPartitioner<?> partitioner = StorageService.getPartitioner();
ColumnFamily cf = ArrayBackedSortedColumns.factory.create(store.metadata);
cf.addColumn(new Column(comment, UTF8Type.instance.decompose("software engineer is working on a project"), System.currentTimeMillis()));
Operation.Builder builder = new Operation.Builder(OperationType.AND, UTF8Type.instance, controller,
new IndexExpression(comment, IndexOperator.EQ, UTF8Type.instance.decompose("eng is a work")));
Operation op = builder.complete();
Assert.assertTrue(op.satisfiedBy(new Row(partitioner.decorateKey(UTF8Type.instance.decompose("key1")), cf), null, false));
builder = new Operation.Builder(OperationType.AND, UTF8Type.instance, controller,
new IndexExpression(comment, IndexOperator.EQ, UTF8Type.instance.decompose("soft works fine")));
op = builder.complete();
Assert.assertTrue(op.satisfiedBy(new Row(partitioner.decorateKey(UTF8Type.instance.decompose("key1")), cf), null, false));
}
示例6: scheduleAllDeliveries
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
/**
* Attempt delivery to any node for which we have hints. Necessary since we can generate hints even for
* nodes which are never officially down/failed.
*/
private void scheduleAllDeliveries()
{
logger.debug("Started scheduleAllDeliveries");
// Force a major compaction to get rid of the tombstones and expired hints. Do it once, before we schedule any
// individual replay, to avoid N - 1 redundant individual compactions (when N is the number of nodes with hints
// to deliver to).
compact();
IPartitioner p = StorageService.getPartitioner();
RowPosition minPos = p.getMinimumToken().minKeyBound();
Range<RowPosition> range = new Range<>(minPos, minPos, p);
IDiskAtomFilter filter = new NamesQueryFilter(ImmutableSortedSet.<CellName>of());
List<Row> rows = hintStore.getRangeSlice(range, null, filter, Integer.MAX_VALUE, System.currentTimeMillis());
for (Row row : rows)
{
UUID hostId = UUIDGen.getUUID(row.key.getKey());
InetAddress target = StorageService.instance.getTokenMetadata().getEndpointForHostId(hostId);
// token may have since been removed (in which case we have just read back a tombstone)
if (target != null)
scheduleHintDelivery(target, false);
}
logger.debug("Finished scheduleAllDeliveries");
}
示例7: SSTableReader
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
private SSTableReader(Descriptor desc,
Set<Component> components,
CFMetaData metadata,
IPartitioner partitioner,
SegmentedFile ifile,
SegmentedFile dfile,
IndexSummary indexSummary,
IFilter bloomFilter,
long maxDataAge,
StatsMetadata sstableMetadata,
OpenReason openReason)
{
this(desc, components, metadata, partitioner, maxDataAge, sstableMetadata, openReason);
this.ifile = ifile;
this.dfile = dfile;
this.indexSummary = indexSummary;
this.bf = bloomFilter;
this.setup(false);
}
示例8: deserialize
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
public MerkleTree deserialize(DataInputPlus in, int version) throws IOException
{
byte hashdepth = in.readByte();
long maxsize = in.readLong();
long size = in.readLong();
IPartitioner partitioner;
try
{
partitioner = FBUtilities.newPartitioner(in.readUTF());
}
catch (ConfigurationException e)
{
throw new IOException(e);
}
// full range
Token left = Token.serializer.deserialize(in, partitioner, version);
Token right = Token.serializer.deserialize(in, partitioner, version);
Range<Token> fullRange = new Range<>(left, right);
MerkleTree mt = new MerkleTree(partitioner, fullRange, hashdepth, maxsize);
mt.size = size;
mt.root = Hashable.serializer.deserialize(in, partitioner, version);
return mt;
}
示例9: testAddEmptyKey
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
@Test
public void testAddEmptyKey() throws Exception
{
IPartitioner p = new RandomPartitioner();
try (IndexSummaryBuilder builder = new IndexSummaryBuilder(1, 1, BASE_SAMPLING_LEVEL))
{
builder.maybeAddEntry(p.decorateKey(ByteBufferUtil.EMPTY_BYTE_BUFFER), 0);
IndexSummary summary = builder.build(p);
assertEquals(1, summary.size());
assertEquals(0, summary.getPosition(0));
assertArrayEquals(new byte[0], summary.getKey(0));
DataOutputBuffer dos = new DataOutputBuffer();
IndexSummary.serializer.serialize(summary, dos, false);
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(dos.toByteArray()));
IndexSummary loaded = IndexSummary.serializer.deserialize(dis, p, false, 1, 1);
assertEquals(1, loaded.size());
assertEquals(summary.getPosition(0), loaded.getPosition(0));
assertArrayEquals(summary.getKey(0), summary.getKey(0));
summary.close();
loaded.close();
}
}
示例10: deserialize
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
public MerkleTree deserialize(DataInput in, int version) throws IOException
{
byte hashdepth = in.readByte();
long maxsize = in.readLong();
long size = in.readLong();
IPartitioner partitioner;
try
{
partitioner = FBUtilities.newPartitioner(in.readUTF());
}
catch (ConfigurationException e)
{
throw new IOException(e);
}
// full range
Token left = Token.serializer.deserialize(in);
Token right = Token.serializer.deserialize(in);
Range<Token> fullRange = new Range<>(left, right, partitioner);
MerkleTree mt = new MerkleTree(partitioner, fullRange, hashdepth, maxsize);
mt.size = size;
mt.root = Hashable.serializer.deserialize(in, version);
return mt;
}
示例11: testAddEmptyKey
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
@Test
public void testAddEmptyKey() throws Exception
{
IPartitioner p = new RandomPartitioner();
IndexSummaryBuilder builder = new IndexSummaryBuilder(1, 1);
builder.maybeAddEntry(p.decorateKey(ByteBufferUtil.EMPTY_BYTE_BUFFER), 0);
IndexSummary summary = builder.build(p);
assertEquals(1, summary.size());
assertEquals(0, summary.getPosition(0));
assertArrayEquals(new byte[0], summary.getKey(0));
ByteArrayOutputStream aos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(aos);
IndexSummary.serializer.serialize(summary, dos);
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(aos.toByteArray()));
IndexSummary loaded = IndexSummary.serializer.deserialize(dis, p);
assertEquals(1, loaded.size());
assertEquals(summary.getPosition(0), loaded.getPosition(0));
assertArrayEquals(summary.getKey(0), summary.getKey(0));
}
示例12: transferRanges
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
private void transferRanges(ColumnFamilyStore cfs) throws Exception
{
IPartitioner p = cfs.getPartitioner();
List<Range<Token>> ranges = new ArrayList<>();
// wrapped range
ranges.add(new Range<Token>(p.getToken(ByteBufferUtil.bytes("key1")), p.getToken(ByteBufferUtil.bytes("key0"))));
StreamPlan streamPlan = new StreamPlan("StreamingTransferTest").transferRanges(LOCAL, cfs.keyspace.getName(), ranges, cfs.getColumnFamilyName());
streamPlan.execute().get();
verifyConnectionsAreClosed();
//cannot add ranges after stream session is finished
try
{
streamPlan.transferRanges(LOCAL, cfs.keyspace.getName(), ranges, cfs.getColumnFamilyName());
fail("Should have thrown exception");
}
catch (RuntimeException e)
{
//do nothing
}
}
示例13: build
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
public IndexSummary build(IPartitioner partitioner)
{
assert keys != null && keys.size() > 0;
assert keys.size() == positions.size();
Memory memory = Memory.allocate(offheapSize + (keys.size() * 4));
int idxPosition = 0;
int keyPosition = keys.size() * 4;
for (int i = 0; i < keys.size(); i++)
{
memory.setInt(idxPosition, keyPosition);
idxPosition += TypeSizes.NATIVE.sizeof(keyPosition);
byte[] temp = keys.get(i);
memory.setBytes(keyPosition, temp, 0, temp.length);
keyPosition += temp.length;
long tempPosition = positions.get(i);
memory.setLong(keyPosition, tempPosition);
keyPosition += TypeSizes.NATIVE.sizeof(tempPosition);
}
return new IndexSummary(partitioner, memory, keys.size(), indexInterval);
}
示例14: SSTable
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
protected SSTable(Descriptor descriptor, Set<Component> components, CFMetaData metadata, IPartitioner partitioner)
{
// In almost all cases, metadata shouldn't be null, but allowing null allows to create a mostly functional SSTable without
// full schema definition. SSTableLoader use that ability
assert descriptor != null;
assert components != null;
assert partitioner != null;
this.descriptor = descriptor;
Set<Component> dataComponents = new HashSet<Component>(components);
for (Component component : components)
assert component.type != Component.Type.COMPACTED_MARKER;
this.compression = dataComponents.contains(Component.COMPRESSION_INFO);
this.components = new CopyOnWriteArraySet<Component>(dataComponents);
this.metadata = metadata;
this.partitioner = partitioner;
}
示例15: newPartitioner
import org.apache.cassandra.dht.IPartitioner; //导入依赖的package包/类
/**
* Create a new instance of a partitioner defined in an SSTable Descriptor
* @param desc Descriptor of an sstable
* @return a new IPartitioner instance
* @throws IOException
*/
public static IPartitioner newPartitioner(Descriptor desc) throws IOException
{
EnumSet<MetadataType> types = EnumSet.of(MetadataType.VALIDATION, MetadataType.HEADER);
Map<MetadataType, MetadataComponent> sstableMetadata = desc.getMetadataSerializer().deserialize(desc, types);
ValidationMetadata validationMetadata = (ValidationMetadata) sstableMetadata.get(MetadataType.VALIDATION);
SerializationHeader.Component header = (SerializationHeader.Component) sstableMetadata.get(MetadataType.HEADER);
return newPartitioner(validationMetadata.partitioner, Optional.of(header.getKeyType()));
}