本文整理汇总了Java中org.apache.cassandra.utils.FBUtilities.newPartitioner方法的典型用法代码示例。如果您正苦于以下问题:Java FBUtilities.newPartitioner方法的具体用法?Java FBUtilities.newPartitioner怎么用?Java FBUtilities.newPartitioner使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.utils.FBUtilities
的用法示例。
在下文中一共展示了FBUtilities.newPartitioner方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: loadTablesFromRemote
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public static Cluster loadTablesFromRemote(String host, int port, String cfidOverrides) throws IOException {
Map<String, UUID> cfs = parseOverrides(cfidOverrides);
Cluster.Builder builder = Cluster.builder().addContactPoints(host).withPort(port);
Cluster cluster = builder.build();
Metadata metadata = cluster.getMetadata();
IPartitioner partitioner = FBUtilities.newPartitioner(metadata.getPartitioner());
if (DatabaseDescriptor.getPartitioner() == null)
DatabaseDescriptor.setPartitionerUnsafe(partitioner);
for (com.datastax.driver.core.KeyspaceMetadata ksm : metadata.getKeyspaces()) {
if (!ksm.getName().equals("system")) {
for (TableMetadata tm : ksm.getTables()) {
String name = ksm.getName()+"."+tm.getName();
try {
CassandraUtils.tableFromCQL(
new ByteArrayInputStream(tm.asCQLQuery().getBytes()),
cfs.get(name) != null ? cfs.get(name) : tm.getId());
} catch(SyntaxException e) {
// ignore tables that we cant parse (probably dse)
logger.debug("Ignoring table " + name + " due to syntax exception " + e.getMessage());
}
}
}
}
return cluster;
}
示例2: getCassandraPartitioner
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public IPartitioner getCassandraPartitioner() throws BackendException {
CTConnection conn = null;
try {
conn = pool.borrowObject(SYSTEM_KS);
return FBUtilities.newPartitioner(conn.getClient().describe_partitioner());
} catch (Exception e) {
throw new TemporaryBackendException(e);
} finally {
pool.returnObjectUnsafe(SYSTEM_KS, conn);
}
}
示例3: getInputPartitioner
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public static IPartitioner getInputPartitioner(Configuration conf)
{
try
{
return FBUtilities.newPartitioner(conf.get(INPUT_PARTITIONER_CONFIG));
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
}
示例4: getOutputPartitioner
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public static IPartitioner getOutputPartitioner(Configuration conf)
{
try
{
return FBUtilities.newPartitioner(conf.get(OUTPUT_PARTITIONER_CONFIG));
}
catch (ConfigurationException e)
{
throw new RuntimeException(e);
}
}
示例5: CassandraTokenSplitManager
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
@Inject
public CassandraTokenSplitManager(CassandraThriftConnectionFactory connectionFactory, @ForCassandra ExecutorService executor, CassandraClientConfig config)
{
this.cassandraThriftClient = new CassandraThriftClient(requireNonNull(connectionFactory, "connectionFactory is null"));
this.executor = requireNonNull(executor, "executor is null");
this.splitSize = config.getSplitSize();
try {
this.partitioner = FBUtilities.newPartitioner(config.getPartitioner());
}
catch (ConfigurationException e) {
throw new RuntimeException(e);
}
}
示例6: getCassandraPartitioner
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
@Override
@SuppressWarnings("unchecked")
public IPartitioner<? extends Token<?>> getCassandraPartitioner() throws BackendException {
CTConnection conn = null;
try {
conn = pool.borrowObject(SYSTEM_KS);
return FBUtilities.newPartitioner(conn.getClient().describe_partitioner());
} catch (Exception e) {
throw new TemporaryBackendException(e);
} finally {
pool.returnObjectUnsafe(SYSTEM_KS, conn);
}
}
示例7: tableFromSSTable
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public static CFMetaData tableFromSSTable(File path) throws IOException, NoSuchFieldException, IllegalAccessException {
Preconditions.checkNotNull(path);
Descriptor desc = Descriptor.fromFilename(path.getAbsolutePath());
EnumSet<MetadataType> types = EnumSet.of(MetadataType.VALIDATION, MetadataType.STATS, MetadataType.HEADER);
Map<MetadataType, MetadataComponent> sstableMetadata = desc.getMetadataSerializer().deserialize(desc, types);
ValidationMetadata validationMetadata = (ValidationMetadata) sstableMetadata.get(MetadataType.VALIDATION);
Preconditions.checkNotNull(validationMetadata, "Validation Metadata could not be resolved, accompanying Statistics.db file must be missing.");
SerializationHeader.Component header = (SerializationHeader.Component) sstableMetadata.get(MetadataType.HEADER);
Preconditions.checkNotNull(header, "Metadata could not be resolved, accompanying Statistics.db file must be missing.");
IPartitioner partitioner = validationMetadata.partitioner.endsWith("LocalPartitioner") ?
new LocalPartitioner(header.getKeyType()) :
FBUtilities.newPartitioner(validationMetadata.partitioner);
DatabaseDescriptor.setPartitionerUnsafe(partitioner);
AbstractType<?> keyType = header.getKeyType();
List<AbstractType<?>> clusteringTypes = header.getClusteringTypes();
Map<ByteBuffer, AbstractType<?>> staticColumns = header.getStaticColumns();
Map<ByteBuffer, AbstractType<?>> regularColumns = header.getRegularColumns();
int id = cfCounter.incrementAndGet();
CFMetaData.Builder builder = CFMetaData.Builder.create("turtle" + id, "turtles" + id);
staticColumns.entrySet().stream()
.forEach(entry ->
builder.addStaticColumn(UTF8Type.instance.getString(entry.getKey()), entry.getValue()));
regularColumns.entrySet().stream()
.forEach(entry ->
builder.addRegularColumn(UTF8Type.instance.getString(entry.getKey()), entry.getValue()));
List<AbstractType<?>> partTypes = keyType.getComponents();
for(int i = 0; i < partTypes.size(); i++) {
builder.addPartitionKey("partition" + (i > 0 ? i : ""), partTypes.get(i));
}
for (int i = 0; i < clusteringTypes.size(); i++) {
builder.addClusteringColumn("row" + (i > 0 ? i : ""), clusteringTypes.get(i));
}
CFMetaData metaData = builder.build();
Schema.instance.setKeyspaceMetadata(KeyspaceMetadata.create(metaData.ksName, KeyspaceParams.local(),
Tables.of(metaData), Views.none(), getTypes(), Functions.none()));
return metaData;
}
示例8: getInputPartitioner
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public static IPartitioner getInputPartitioner(Configuration conf)
{
return FBUtilities.newPartitioner(conf.get(INPUT_PARTITIONER_CONFIG));
}
示例9: getOutputPartitioner
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public static IPartitioner getOutputPartitioner(Configuration conf)
{
return FBUtilities.newPartitioner(conf.get(OUTPUT_PARTITIONER_CONFIG));
}
示例10: CQLClient
import org.apache.cassandra.utils.FBUtilities; //导入方法依赖的package包/类
public CQLClient(LoaderOptions options, String keyspace)
throws NoSuchAlgorithmException, FileNotFoundException, IOException, KeyStoreException,
CertificateException, UnrecoverableKeyException, KeyManagementException, ConfigurationException {
// System.setProperty("com.datastax.driver.NON_BLOCKING_EXECUTOR_SIZE",
// "64");
PoolingOptions poolingOptions = new PoolingOptions();
int connections = options.connectionsPerHost;
if (connections == 0) {
connections = 8;
}
poolingOptions.setCoreConnectionsPerHost(HostDistance.LOCAL, Math.max(1, connections / 2));
poolingOptions.setCoreConnectionsPerHost(HostDistance.REMOTE, Math.max(1, connections / 4));
poolingOptions.setMaxConnectionsPerHost(HostDistance.LOCAL, connections);
poolingOptions.setMaxConnectionsPerHost(HostDistance.REMOTE, Math.max(1, connections / 2));
poolingOptions.setMaxRequestsPerConnection(HostDistance.LOCAL, 32768);
poolingOptions.setMaxRequestsPerConnection(HostDistance.REMOTE, 2000);
this.simulate = options.simulate;
this.verbose = options.verbose;
Cluster.Builder builder = builder().addContactPoints(options.hosts).withProtocolVersion(ProtocolVersion.V3)
.withCompression(Compression.LZ4).withPoolingOptions(poolingOptions);
if (options.user != null && options.passwd != null) {
builder = builder.withCredentials(options.user, options.passwd);
}
if (options.ssl) {
EncryptionOptions enco = options.encOptions;
SSLContext ctx = SSLContext.getInstance(options.encOptions.protocol);
try (FileInputStream tsf = new FileInputStream(enco.truststore);
FileInputStream ksf = new FileInputStream(enco.keystore)) {
KeyStore ts = KeyStore.getInstance(enco.store_type);
ts.load(tsf, enco.truststore_password.toCharArray());
TrustManagerFactory tmf = TrustManagerFactory
.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init(ts);
KeyStore ks = KeyStore.getInstance("JKS");
ks.load(ksf, enco.keystore_password.toCharArray());
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
kmf.init(ks, enco.keystore_password.toCharArray());
ctx.init(kmf.getKeyManagers(), tmf.getTrustManagers(), new SecureRandom());
}
SSLOptions sslOptions = JdkSSLOptions.builder().withSSLContext(ctx).withCipherSuites(enco.cipher_suites)
.build();
builder = builder.withSSL(sslOptions);
}
cluster = builder.build();
session = cluster.connect(keyspace);
metadata = cluster.getMetadata();
keyspaceMetadata = metadata.getKeyspace(keyspace);
org.apache.cassandra.schema.KeyspaceMetadata ksMetaData = org.apache.cassandra.schema.KeyspaceMetadata
.create(keyspaceMetadata.getName(), KeyspaceParams.create(keyspaceMetadata.isDurableWrites(),
keyspaceMetadata.getReplication()));
Schema.instance.load(ksMetaData);
loadUserTypes(keyspaceMetadata.getUserTypes(), keyspace);
partitioner = FBUtilities.newPartitioner(metadata.getPartitioner());
if (options.throttle != 0) {
rateLimiter = RateLimiter.create(options.throttle * 1000 * 1000 / 8);
}
this.batch = options.batch;
this.preparedStatements = options.prepare ? new ConcurrentHashMap<>() : null;
this.ignoreColumns = options.ignoreColumns;
}