本文整理汇总了Java中org.apache.cassandra.config.DatabaseDescriptor.setPartitionerUnsafe方法的典型用法代码示例。如果您正苦于以下问题:Java DatabaseDescriptor.setPartitionerUnsafe方法的具体用法?Java DatabaseDescriptor.setPartitionerUnsafe怎么用?Java DatabaseDescriptor.setPartitionerUnsafe使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.config.DatabaseDescriptor
的用法示例。
在下文中一共展示了DatabaseDescriptor.setPartitionerUnsafe方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadTablesFromRemote
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
public static Cluster loadTablesFromRemote(String host, int port, String cfidOverrides) throws IOException {
Map<String, UUID> cfs = parseOverrides(cfidOverrides);
Cluster.Builder builder = Cluster.builder().addContactPoints(host).withPort(port);
Cluster cluster = builder.build();
Metadata metadata = cluster.getMetadata();
IPartitioner partitioner = FBUtilities.newPartitioner(metadata.getPartitioner());
if (DatabaseDescriptor.getPartitioner() == null)
DatabaseDescriptor.setPartitionerUnsafe(partitioner);
for (com.datastax.driver.core.KeyspaceMetadata ksm : metadata.getKeyspaces()) {
if (!ksm.getName().equals("system")) {
for (TableMetadata tm : ksm.getTables()) {
String name = ksm.getName()+"."+tm.getName();
try {
CassandraUtils.tableFromCQL(
new ByteArrayInputStream(tm.asCQLQuery().getBytes()),
cfs.get(name) != null ? cfs.get(name) : tm.getId());
} catch(SyntaxException e) {
// ignore tables that we cant parse (probably dse)
logger.debug("Ignoring table " + name + " due to syntax exception " + e.getMessage());
}
}
}
}
return cluster;
}
示例2: setPartitionerUnsafe
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@VisibleForTesting
public IPartitioner setPartitionerUnsafe(IPartitioner newPartitioner)
{
IPartitioner oldPartitioner = DatabaseDescriptor.setPartitionerUnsafe(newPartitioner);
tokenMetadata = tokenMetadata.cloneWithNewPartitioner(newPartitioner);
valueFactory = new VersionedValue.VersionedValueFactory(newPartitioner);
return oldPartitioner;
}
示例3: clear
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@Before
public void clear()
{
TOKEN_SCALE = new BigInteger("8");
partitioner = RandomPartitioner.instance;
// TODO need to trickle TokenSerializer
DatabaseDescriptor.setPartitionerUnsafe(partitioner);
mt = new MerkleTree(partitioner, fullRange(), RECOMMENDED_DEPTH, Integer.MAX_VALUE);
}
示例4: testFilterOutDuplicates
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
/**
* Tests with invalid sstables (containing duplicate entries in 2.0 and 3.0 storage format),
* that were caused by upgrading from 2.x with duplicate range tombstones.
*
* See CASSANDRA-12144 for details.
*/
@Test
public void testFilterOutDuplicates() throws Exception
{
DatabaseDescriptor.setPartitionerUnsafe(Murmur3Partitioner.instance);
QueryProcessor.process(String.format("CREATE TABLE \"%s\".cf_with_duplicates_3_0 (a int, b int, c int, PRIMARY KEY (a, b))", KEYSPACE), ConsistencyLevel.ONE);
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("cf_with_duplicates_3_0");
Path legacySSTableRoot = Paths.get(System.getProperty(INVALID_LEGACY_SSTABLE_ROOT_PROP),
"Keyspace1",
"cf_with_duplicates_3_0");
for (String filename : new String[]{ "mb-3-big-CompressionInfo.db",
"mb-3-big-Digest.crc32",
"mb-3-big-Index.db",
"mb-3-big-Summary.db",
"mb-3-big-Data.db",
"mb-3-big-Filter.db",
"mb-3-big-Statistics.db",
"mb-3-big-TOC.txt" })
{
Files.copy(Paths.get(legacySSTableRoot.toString(), filename), cfs.getDirectories().getDirectoryForNewSSTables().toPath().resolve(filename));
}
cfs.loadNewSSTables();
cfs.scrub(true, true, true, 1);
UntypedResultSet rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".cf_with_duplicates_3_0", KEYSPACE));
assertEquals(1, rs.size());
QueryProcessor.executeInternal(String.format("DELETE FROM \"%s\".cf_with_duplicates_3_0 WHERE a=1 AND b =2", KEYSPACE));
rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".cf_with_duplicates_3_0", KEYSPACE));
assertEquals(0, rs.size());
}
示例5: testUpgradeSstablesWithDuplicates
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@Test
public void testUpgradeSstablesWithDuplicates() throws Exception
{
DatabaseDescriptor.setPartitionerUnsafe(Murmur3Partitioner.instance);
String cf = "cf_with_duplicates_2_0";
QueryProcessor.process(String.format("CREATE TABLE \"%s\".%s (a int, b int, c int, PRIMARY KEY (a, b))", KEYSPACE, cf), ConsistencyLevel.ONE);
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cf);
Path legacySSTableRoot = Paths.get(System.getProperty(INVALID_LEGACY_SSTABLE_ROOT_PROP),
"Keyspace1",
cf);
for (String filename : new String[]{ "lb-1-big-CompressionInfo.db",
"lb-1-big-Data.db",
"lb-1-big-Digest.adler32",
"lb-1-big-Filter.db",
"lb-1-big-Index.db",
"lb-1-big-Statistics.db",
"lb-1-big-Summary.db",
"lb-1-big-TOC.txt" })
{
Files.copy(Paths.get(legacySSTableRoot.toString(), filename), cfs.getDirectories().getDirectoryForNewSSTables().toPath().resolve(filename));
}
cfs.loadNewSSTables();
cfs.sstablesRewrite(true, 1);
UntypedResultSet rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".%s", KEYSPACE, cf));
assertEquals(1, rs.size());
QueryProcessor.executeInternal(String.format("DELETE FROM \"%s\".%s WHERE a=1 AND b =2", KEYSPACE, cf));
rs = QueryProcessor.executeInternal(String.format("SELECT * FROM \"%s\".%s", KEYSPACE, cf));
assertEquals(0, rs.size());
}
示例6: tableFromSSTable
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public static CFMetaData tableFromSSTable(File path) throws IOException, NoSuchFieldException, IllegalAccessException {
Preconditions.checkNotNull(path);
Descriptor desc = Descriptor.fromFilename(path.getAbsolutePath());
EnumSet<MetadataType> types = EnumSet.of(MetadataType.VALIDATION, MetadataType.STATS, MetadataType.HEADER);
Map<MetadataType, MetadataComponent> sstableMetadata = desc.getMetadataSerializer().deserialize(desc, types);
ValidationMetadata validationMetadata = (ValidationMetadata) sstableMetadata.get(MetadataType.VALIDATION);
Preconditions.checkNotNull(validationMetadata, "Validation Metadata could not be resolved, accompanying Statistics.db file must be missing.");
SerializationHeader.Component header = (SerializationHeader.Component) sstableMetadata.get(MetadataType.HEADER);
Preconditions.checkNotNull(header, "Metadata could not be resolved, accompanying Statistics.db file must be missing.");
IPartitioner partitioner = validationMetadata.partitioner.endsWith("LocalPartitioner") ?
new LocalPartitioner(header.getKeyType()) :
FBUtilities.newPartitioner(validationMetadata.partitioner);
DatabaseDescriptor.setPartitionerUnsafe(partitioner);
AbstractType<?> keyType = header.getKeyType();
List<AbstractType<?>> clusteringTypes = header.getClusteringTypes();
Map<ByteBuffer, AbstractType<?>> staticColumns = header.getStaticColumns();
Map<ByteBuffer, AbstractType<?>> regularColumns = header.getRegularColumns();
int id = cfCounter.incrementAndGet();
CFMetaData.Builder builder = CFMetaData.Builder.create("turtle" + id, "turtles" + id);
staticColumns.entrySet().stream()
.forEach(entry ->
builder.addStaticColumn(UTF8Type.instance.getString(entry.getKey()), entry.getValue()));
regularColumns.entrySet().stream()
.forEach(entry ->
builder.addRegularColumn(UTF8Type.instance.getString(entry.getKey()), entry.getValue()));
List<AbstractType<?>> partTypes = keyType.getComponents();
for(int i = 0; i < partTypes.size(); i++) {
builder.addPartitionKey("partition" + (i > 0 ? i : ""), partTypes.get(i));
}
for (int i = 0; i < clusteringTypes.size(); i++) {
builder.addClusteringColumn("row" + (i > 0 ? i : ""), clusteringTypes.get(i));
}
CFMetaData metaData = builder.build();
Schema.instance.setKeyspaceMetadata(KeyspaceMetadata.create(metaData.ksName, KeyspaceParams.local(),
Tables.of(metaData), Views.none(), getTypes(), Functions.none()));
return metaData;
}
示例7: setUp
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@BeforeClass
public static void setUp()
{
DatabaseDescriptor.setPartitionerUnsafe(ByteOrderedPartitioner.instance);
}
示例8: setUpClass
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@BeforeClass
public static void setUpClass()
{
DatabaseDescriptor.setPartitionerUnsafe(ByteOrderedPartitioner.instance);
}
示例9: tearDown
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@AfterClass
public static void tearDown()
{
DatabaseDescriptor.setPartitionerUnsafe(oldPartitioner);
}
示例10: testScrubOutOfOrder
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@Test
public void testScrubOutOfOrder() throws Exception
{
// This test assumes ByteOrderPartitioner to create out-of-order SSTable
IPartitioner oldPartitioner = DatabaseDescriptor.getPartitioner();
DatabaseDescriptor.setPartitionerUnsafe(new ByteOrderedPartitioner());
// Create out-of-order SSTable
File tempDir = File.createTempFile("ScrubTest.testScrubOutOfOrder", "").getParentFile();
// create ks/cf directory
File tempDataDir = new File(tempDir, String.join(File.separator, KEYSPACE, CF3));
tempDataDir.mkdirs();
try
{
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE);
String columnFamily = CF3;
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(columnFamily);
cfs.clearUnsafe();
List<String> keys = Arrays.asList("t", "a", "b", "z", "c", "y", "d");
String filename = cfs.getSSTablePath(tempDataDir);
Descriptor desc = Descriptor.fromFilename(filename);
LifecycleTransaction txn = LifecycleTransaction.offline(OperationType.WRITE);
try (SSTableTxnWriter writer = new SSTableTxnWriter(txn, createTestWriter(desc, (long) keys.size(), cfs.metadata, txn)))
{
for (String k : keys)
{
PartitionUpdate update = UpdateBuilder.create(cfs.metadata, Util.dk(k))
.newRow("someName").add("val", "someValue")
.build();
writer.append(update.unfilteredIterator());
}
writer.finish(false);
}
try
{
SSTableReader.open(desc, cfs.metadata);
fail("SSTR validation should have caught the out-of-order rows");
}
catch (IllegalStateException ise)
{ /* this is expected */ }
// open without validation for scrubbing
Set<Component> components = new HashSet<>();
if (new File(desc.filenameFor(Component.COMPRESSION_INFO)).exists())
components.add(Component.COMPRESSION_INFO);
components.add(Component.DATA);
components.add(Component.PRIMARY_INDEX);
components.add(Component.FILTER);
components.add(Component.STATS);
components.add(Component.SUMMARY);
components.add(Component.TOC);
SSTableReader sstable = SSTableReader.openNoValidation(desc, components, cfs);
if (sstable.last.compareTo(sstable.first) < 0)
sstable.last = sstable.first;
try (LifecycleTransaction scrubTxn = LifecycleTransaction.offline(OperationType.SCRUB, sstable);
Scrubber scrubber = new Scrubber(cfs, scrubTxn, false, true))
{
scrubber.scrub();
}
LifecycleTransaction.waitForDeletions();
cfs.loadNewSSTables();
assertOrderedAll(cfs, 7);
}
finally
{
FileUtils.deleteRecursive(tempDataDir);
// reset partitioner
DatabaseDescriptor.setPartitionerUnsafe(oldPartitioner);
}
}
示例11: setupPartitioner
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@Before
public void setupPartitioner()
{
// Using an ordered partitioner to be able to predict keys order in the following tests.
partitionerToRestore = DatabaseDescriptor.setPartitionerUnsafe(ByteOrderedPartitioner.instance);
}
示例12: resetPartitioner
import org.apache.cassandra.config.DatabaseDescriptor; //导入方法依赖的package包/类
@After
public void resetPartitioner()
{
DatabaseDescriptor.setPartitionerUnsafe(partitionerToRestore);
}