本文整理汇总了Java中org.apache.cassandra.config.DatabaseDescriptor.getPartitioner方法的典型用法代码示例。如果您正苦于以下问题:Java DatabaseDescriptor.getPartitioner方法的具体用法?Java DatabaseDescriptor.getPartitioner怎么用?Java DatabaseDescriptor.getPartitioner使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.config.DatabaseDescriptor
的用法示例。
在下文中一共展示了DatabaseDescriptor.getPartitioner方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadTablesFromRemote
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public static Cluster loadTablesFromRemote(String host, int port, String cfidOverrides) throws IOException {
Map<String, UUID> cfs = parseOverrides(cfidOverrides);
Cluster.Builder builder = Cluster.builder().addContactPoints(host).withPort(port);
Cluster cluster = builder.build();
Metadata metadata = cluster.getMetadata();
IPartitioner partitioner = FBUtilities.newPartitioner(metadata.getPartitioner());
if (DatabaseDescriptor.getPartitioner() == null)
DatabaseDescriptor.setPartitionerUnsafe(partitioner);
for (com.datastax.driver.core.KeyspaceMetadata ksm : metadata.getKeyspaces()) {
if (!ksm.getName().equals("system")) {
for (TableMetadata tm : ksm.getTables()) {
String name = ksm.getName()+"."+tm.getName();
try {
CassandraUtils.tableFromCQL(
new ByteArrayInputStream(tm.asCQLQuery().getBytes()),
cfs.get(name) != null ? cfs.get(name) : tm.getId());
} catch(SyntaxException e) {
// ignore tables that we cant parse (probably dse)
logger.debug("Ignoring table " + name + " due to syntax exception " + e.getMessage());
}
}
}
}
return cluster;
}
示例2: setPartitionerUnsafe
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
IPartitioner setPartitionerUnsafe(IPartitioner newPartitioner)
{
IPartitioner oldPartitioner = DatabaseDescriptor.getPartitioner();
DatabaseDescriptor.setPartitioner(newPartitioner);
valueFactory = new VersionedValue.VersionedValueFactory(getPartitioner());
return oldPartitioner;
}
示例3: importJson
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
/**
* Convert a JSON formatted file to an SSTable.
*
* @param jsonFile the file containing JSON formatted data
* @param keyspace keyspace the data belongs to
* @param cf column family the data belongs to
* @param ssTablePath file to write the SSTable to
*
* @throws IOException for errors reading/writing input/output
*/
public int importJson(String jsonFile, String keyspace, String cf, String ssTablePath) throws IOException
{
ColumnFamily columnFamily = ArrayBackedSortedColumns.factory.create(keyspace, cf);
IPartitioner partitioner = DatabaseDescriptor.getPartitioner();
int importedKeys = (isSorted) ? importSorted(jsonFile, columnFamily, ssTablePath, partitioner)
: importUnsorted(jsonFile, columnFamily, ssTablePath, partitioner);
if (importedKeys != -1)
System.out.printf("%d keys imported successfully.%n", importedKeys);
return importedKeys;
}
示例4: getWriter
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
protected SSTableWriter getWriter()
{
return new SSTableWriter(
makeFilename(directory, metadata.ksName, metadata.cfName),
0, // We don't care about the bloom filter
ActiveRepairService.UNREPAIRED_SSTABLE,
metadata,
DatabaseDescriptor.getPartitioner(),
new MetadataCollector(metadata.comparator));
}
示例5: Validator
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
/**
* Create Validator with default size of initial Merkle Tree.
*/
public Validator(RepairJobDesc desc, InetAddress initiator, int gcBefore)
{
this(desc,
initiator,
// TODO: memory usage (maxsize) should either be tunable per
// CF, globally, or as shared for all CFs in a cluster
new MerkleTree(DatabaseDescriptor.getPartitioner(), desc.range, MerkleTree.RECOMMENDED_DEPTH, (int)Math.pow(2, 15)),
gcBefore);
}
示例6: importJson
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
/**
* Convert a JSON formatted file to an SSTable.
*
* @param jsonFile the file containing JSON formatted data
* @param keyspace keyspace the data belongs to
* @param cf column family the data belongs to
* @param ssTablePath file to write the SSTable to
*
* @throws IOException for errors reading/writing input/output
*/
public int importJson(String jsonFile, String keyspace, String cf, String ssTablePath) throws IOException
{
ColumnFamily columnFamily = TreeMapBackedSortedColumns.factory.create(keyspace, cf);
IPartitioner<?> partitioner = DatabaseDescriptor.getPartitioner();
int importedKeys = (isSorted) ? importSorted(jsonFile, columnFamily, ssTablePath, partitioner)
: importUnsorted(jsonFile, columnFamily, ssTablePath, partitioner);
if (importedKeys != -1)
System.out.printf("%d keys imported successfully.%n", importedKeys);
return importedKeys;
}
示例7: getWriter
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
protected SSTableWriter getWriter()
{
return new SSTableWriter(
makeFilename(directory, metadata.ksName, metadata.cfName),
0, // We don't care about the bloom filter
metadata,
DatabaseDescriptor.getPartitioner(),
SSTableMetadata.createCollector(metadata.comparator));
}
示例8: TokenMetadata
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public TokenMetadata()
{
this(SortedBiMultiValMap.<Token, InetAddress>create(null, inetaddressCmp),
HashBiMap.<InetAddress, UUID>create(),
new Topology(),
DatabaseDescriptor.getPartitioner());
}
示例9: getPartitioner
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public static IPartitioner getPartitioner()
{
return DatabaseDescriptor.getPartitioner();
}
示例10: execute
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public List<Row> execute() throws RequestTimeoutException
{
DataRange range = filter.dataRange;
RowPosition lastKey = range.stopKey();
IPartitioner<?> partitioner = DatabaseDescriptor.getPartitioner();
final int maxRows = Math.min(filter.maxColumns(), Math.min(MAX_ROWS, filter.maxRows()));
final List<Row> rows = new ArrayList<>(maxRows);
Operation operationTree = null;
try
{
operationTree = analyze();
if (operationTree == null)
return Collections.emptyList();
operationTree.skipTo(((LongToken) range.keyRange().left.getToken()).token);
intersection:
while (operationTree.hasNext())
{
for (DecoratedKey key : operationTree.next())
{
if ((!lastKey.isMinimum(partitioner) && lastKey.compareTo(key) < 0) || rows.size() >= maxRows)
break intersection;
if (!range.contains(key))
continue;
Row row = getRow(key, filter);
if (row != null && operationTree.satisfiedBy(row, null, !isColumnSlice(key)))
rows.add(row);
}
}
}
finally
{
FileUtils.closeQuietly(operationTree);
controller.finish();
}
return rows;
}
示例11: testPartitioner
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public static IPartitioner testPartitioner()
{
return DatabaseDescriptor.getPartitioner();
}
示例12: testScrubOutOfOrder
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@Test
public void testScrubOutOfOrder() throws Exception
{
// This test assumes ByteOrderPartitioner to create out-of-order SSTable
IPartitioner oldPartitioner = DatabaseDescriptor.getPartitioner();
DatabaseDescriptor.setPartitionerUnsafe(new ByteOrderedPartitioner());
// Create out-of-order SSTable
File tempDir = File.createTempFile("ScrubTest.testScrubOutOfOrder", "").getParentFile();
// create ks/cf directory
File tempDataDir = new File(tempDir, String.join(File.separator, KEYSPACE, CF3));
tempDataDir.mkdirs();
try
{
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE);
String columnFamily = CF3;
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(columnFamily);
cfs.clearUnsafe();
List<String> keys = Arrays.asList("t", "a", "b", "z", "c", "y", "d");
String filename = cfs.getSSTablePath(tempDataDir);
Descriptor desc = Descriptor.fromFilename(filename);
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
try (SSTableTxnWriter writer = new SSTableTxnWriter(txn, createTestWriter(desc, (long) keys.size(), cfs.metadata, txn)))
{
for (String k : keys)
{
PartitionUpdate update = UpdateBuilder.create(cfs.metadata, Util.dk(k))
.newRow("someName").add("val", "someValue")
.build();
writer.append(update.unfilteredIterator());
}
writer.finish(false);
}
try
{
SSTableReader.open(desc, cfs.metadata);
fail("SSTR validation should have caught the out-of-order rows");
}
catch (IllegalStateException ise)
{ /* this is expected */ }
// open without validation for scrubbing
Set<Component> components = new HashSet<>();
if (new File(desc.filenameFor(Component.COMPRESSION_INFO)).exists())
components.add(Component.COMPRESSION_INFO);
components.add(Component.DATA);
components.add(Component.PRIMARY_INDEX);
components.add(Component.FILTER);
components.add(Component.STATS);
components.add(Component.SUMMARY);
components.add(Component.TOC);
SSTableReader sstable = SSTableReader.openNoValidation(desc, components, cfs);
if (sstable.last.compareTo(sstable.first) < 0)
sstable.last = sstable.first;
try (LifecycleTransaction scrubTxn = LifecycleTransaction.offline(OperationType.SCRUB, sstable);
Scrubber scrubber = new Scrubber(cfs, scrubTxn, false, true))
{
scrubber.scrub();
}
LifecycleTransaction.waitForDeletions();
cfs.loadNewSSTables();
assertOrderedAll(cfs, 7);
}
finally
{
FileUtils.deleteRecursive(tempDataDir);
// reset partitioner
DatabaseDescriptor.setPartitionerUnsafe(oldPartitioner);
}
}
示例13: PartitionMapper
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
/**
* Constructor specifying the indexed table {@link CFMetaData}.
*
* @param metadata
* the indexed table metadata
*/
public PartitionMapper(CFMetaData metadata) {
this.metadata = metadata;
partitioner = DatabaseDescriptor.getPartitioner();
type = metadata.getKeyValidator();
}