本文整理汇总了Java中org.apache.cassandra.config.Config类的典型用法代码示例。如果您正苦于以下问题:Java Config类的具体用法?Java Config怎么用?Java Config使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Config类属于org.apache.cassandra.config包,在下文中一共展示了Config类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: inspectThrowable
import org.apache.cassandra.config.Config; //导入依赖的package包/类
/**
* Certain Throwables and Exceptions represent "Die" conditions for the server.
* This recursively checks the input Throwable's cause hierarchy until null.
* @param t
* The Throwable to check for server-stop conditions
*/
public static void inspectThrowable(Throwable t)
{
boolean isUnstable = false;
if (t instanceof OutOfMemoryError)
{
isUnstable = true;
HeapUtils.generateHeapDump();
}
if (DatabaseDescriptor.getDiskFailurePolicy() == Config.DiskFailurePolicy.die)
if (t instanceof FSError || t instanceof CorruptSSTableException)
isUnstable = true;
// Check for file handle exhaustion
if (t instanceof FileNotFoundException || t instanceof SocketException)
if (t.getMessage().contains("Too many open files"))
isUnstable = true;
if (isUnstable)
killer.killCurrentJVM(t);
if (t.getCause() != null)
inspectThrowable(t.getCause());
}
示例2: inspectThrowable
import org.apache.cassandra.config.Config; //导入依赖的package包/类
/**
* Certain Throwables and Exceptions represent "Die" conditions for the server.
* @param t
* The Throwable to check for server-stop conditions
*/
public static void inspectThrowable(Throwable t)
{
boolean isUnstable = false;
if (t instanceof OutOfMemoryError)
isUnstable = true;
if (DatabaseDescriptor.getDiskFailurePolicy() == Config.DiskFailurePolicy.die)
if (t instanceof FSError || t instanceof CorruptSSTableException)
isUnstable = true;
// Check for file handle exhaustion
if (t instanceof FileNotFoundException || t instanceof SocketException)
if (t.getMessage().contains("Too many open files"))
isUnstable = true;
if (isUnstable)
killer.killCurrentJVM(t);
}
示例3: CommitLog
import org.apache.cassandra.config.Config; //导入依赖的package包/类
private CommitLog()
{
DatabaseDescriptor.createAllDirectories();
allocator = new CommitLogSegmentManager();
executor = DatabaseDescriptor.getCommitLogSync() == Config.CommitLogSync.batch
? new BatchCommitLogService(this)
: new PeriodicCommitLogService(this);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try
{
mbs.registerMBean(this, new ObjectName("org.apache.cassandra.db:type=Commitlog"));
}
catch (Exception e)
{
throw new RuntimeException(e);
}
// register metrics
metrics = new CommitLogMetrics(executor, allocator);
}
示例4: newSocket
import org.apache.cassandra.config.Config; //导入依赖的package包/类
public static Socket newSocket(InetAddress endpoint) throws IOException
{
// zero means 'bind on any available port.'
if (isEncryptedChannel(endpoint))
{
if (Config.getOutboundBindAny())
return SSLFactory.getSocket(DatabaseDescriptor.getServerEncryptionOptions(), endpoint, DatabaseDescriptor.getSSLStoragePort());
else
return SSLFactory.getSocket(DatabaseDescriptor.getServerEncryptionOptions(), endpoint, DatabaseDescriptor.getSSLStoragePort(), FBUtilities.getLocalAddress(), 0);
}
else
{
Socket socket = SocketChannel.open(new InetSocketAddress(endpoint, DatabaseDescriptor.getStoragePort())).socket();
if (Config.getOutboundBindAny() && !socket.isBound())
socket.bind(new InetSocketAddress(FBUtilities.getLocalAddress(), 0));
return socket;
}
}
示例5: loadConfig
import org.apache.cassandra.config.Config; //导入依赖的package包/类
@Override
public Config loadConfig() throws ConfigurationException
{
Config config = super.loadConfig();
config.rpc_port += offset;
config.native_transport_port += offset;
config.storage_port += offset;
config.commitlog_directory += File.pathSeparator + offset;
config.saved_caches_directory += File.pathSeparator + offset;
for (int i = 0; i < config.data_file_directories.length; i++)
config.data_file_directories[i] += File.pathSeparator + offset;
return config;
}
示例6: testCommitFailurePolicy_stop
import org.apache.cassandra.config.Config; //导入依赖的package包/类
@Test
public void testCommitFailurePolicy_stop() throws ConfigurationException
{
// Need storage service active so stop policy can shutdown gossip
StorageService.instance.initServer();
Assert.assertTrue(Gossiper.instance.isEnabled());
Config.CommitFailurePolicy oldPolicy = DatabaseDescriptor.getCommitFailurePolicy();
try
{
DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.stop);
CommitLog.handleCommitError("Test stop error", new Throwable());
Assert.assertFalse(Gossiper.instance.isEnabled());
}
finally
{
DatabaseDescriptor.setCommitFailurePolicy(oldPolicy);
}
}
示例7: testCommitFailurePolicy_die
import org.apache.cassandra.config.Config; //导入依赖的package包/类
@Test
public void testCommitFailurePolicy_die()
{
KillerForTests killerForTests = new KillerForTests();
JVMStabilityInspector.Killer originalKiller = JVMStabilityInspector.replaceKiller(killerForTests);
Config.CommitFailurePolicy oldPolicy = DatabaseDescriptor.getCommitFailurePolicy();
try
{
DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.die);
CommitLog.handleCommitError("Testing die policy", new Throwable());
Assert.assertTrue(killerForTests.wasKilled());
}
finally
{
DatabaseDescriptor.setCommitFailurePolicy(oldPolicy);
JVMStabilityInspector.replaceKiller(originalKiller);
}
}
示例8: CommitLog
import org.apache.cassandra.config.Config; //导入依赖的package包/类
private CommitLog()
{
DatabaseDescriptor.createAllDirectories();
allocator = new CommitLogAllocator();
activateNextSegment();
executor = DatabaseDescriptor.getCommitLogSync() == Config.CommitLogSync.batch
? new BatchCommitLogExecutorService()
: new PeriodicCommitLogExecutorService(this);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try
{
mbs.registerMBean(this, new ObjectName("org.apache.cassandra.db:type=Commitlog"));
}
catch (Exception e)
{
throw new RuntimeException(e);
}
// register metrics
metrics = new CommitLogMetrics(executor, allocator);
}
示例9: main
import org.apache.cassandra.config.Config; //导入依赖的package包/类
public static void main(String[] args) throws Exception
{
Config.setClientMode(true);
// Print usage if no argument is specified.
if (args.length < 2 || args.length > 3)
{
System.err.println("Usage: " + Client.class.getSimpleName() + " <host> <port> [<version>]");
return;
}
// Parse options.
String host = args[0];
int port = Integer.parseInt(args[1]);
int version = args.length == 3 ? Integer.parseInt(args[2]) : Server.CURRENT_VERSION;
ClientEncryptionOptions encryptionOptions = new ClientEncryptionOptions();
System.out.println("CQL binary protocol console " + host + "@" + port + " using native protocol version " + version);
new Client(host, port, version, encryptionOptions).run();
System.exit(0);
}
示例10: RepairOption
import org.apache.cassandra.config.Config; //导入依赖的package包/类
public RepairOption(RepairParallelism parallelism, boolean primaryRange, boolean incremental, boolean trace, int jobThreads, Collection<Range<Token>> ranges, boolean isSubrangeRepair)
{
if (FBUtilities.isWindows() &&
(DatabaseDescriptor.getDiskAccessMode() != Config.DiskAccessMode.standard || DatabaseDescriptor.getIndexAccessMode() != Config.DiskAccessMode.standard) &&
parallelism == RepairParallelism.SEQUENTIAL)
{
logger.warn("Sequential repair disabled when memory-mapped I/O is configured on Windows. Reverting to parallel.");
this.parallelism = RepairParallelism.PARALLEL;
}
else
this.parallelism = parallelism;
this.primaryRange = primaryRange;
this.incremental = incremental;
this.trace = trace;
this.jobThreads = jobThreads;
this.ranges.addAll(ranges);
this.isSubrangeRepair = isSubrangeRepair;
}
示例11: CommitLog
import org.apache.cassandra.config.Config; //导入依赖的package包/类
@VisibleForTesting
CommitLog(String location, CommitLogArchiver archiver)
{
this.location = location;
this.configuration = new Configuration(DatabaseDescriptor.getCommitLogCompression());
DatabaseDescriptor.createAllDirectories();
this.archiver = archiver;
metrics = new CommitLogMetrics();
executor = DatabaseDescriptor.getCommitLogSync() == Config.CommitLogSync.batch
? new BatchCommitLogService(this)
: new PeriodicCommitLogService(this);
allocator = new CommitLogSegmentManager(this);
// register metrics
metrics.attach(executor, allocator);
}
示例12: newSocket
import org.apache.cassandra.config.Config; //导入依赖的package包/类
@SuppressWarnings("resource")
public static Socket newSocket(InetAddress endpoint) throws IOException
{
// zero means 'bind on any available port.'
if (isEncryptedChannel(endpoint))
{
if (Config.getOutboundBindAny())
return SSLFactory.getSocket(DatabaseDescriptor.getServerEncryptionOptions(), endpoint, DatabaseDescriptor.getSSLStoragePort());
else
return SSLFactory.getSocket(DatabaseDescriptor.getServerEncryptionOptions(), endpoint, DatabaseDescriptor.getSSLStoragePort(), FBUtilities.getLocalAddress(), 0);
}
else
{
SocketChannel channel = SocketChannel.open();
if (!Config.getOutboundBindAny())
channel.bind(new InetSocketAddress(FBUtilities.getLocalAddress(), 0));
channel.connect(new InetSocketAddress(endpoint, DatabaseDescriptor.getStoragePort()));
return channel.socket();
}
}
示例13: bufferSize
import org.apache.cassandra.config.Config; //导入依赖的package包/类
/**
Return the buffer size for a given record size. For spinning disks always add one page.
For solid state disks only add one page if the chance of crossing to the next page is more
than a predifined value, @see Config.disk_optimization_page_cross_chance.
*/
static int bufferSize(long recordSize)
{
Config.DiskOptimizationStrategy strategy = DatabaseDescriptor.getDiskOptimizationStrategy();
if (strategy == Config.DiskOptimizationStrategy.ssd)
{
// The crossing probability is calculated assuming a uniform distribution of record
// start position in a page, so it's the record size modulo the page size divided by
// the total page size.
double pageCrossProbability = (recordSize % 4096) / 4096.;
// if the page cross probability is equal or bigger than disk_optimization_page_cross_chance we add one page
if ((pageCrossProbability - DatabaseDescriptor.getDiskOptimizationPageCrossChance()) > -1e-16)
recordSize += 4096;
return roundBufferSize(recordSize);
}
else if (strategy == Config.DiskOptimizationStrategy.spinning)
{
return roundBufferSize(recordSize + 4096);
}
else
{
throw new IllegalStateException("Unsupported disk optimization strategy: " + strategy);
}
}
示例14: metadataFromSSTable
import org.apache.cassandra.config.Config; //导入依赖的package包/类
@Override
protected CFMetaData metadataFromSSTable(Descriptor desc) throws IOException {
if (!legacy && desc.version.storeRows()) {
return super.metadataFromSSTable(desc);
}
Config.setClientMode(false);
Util.initDatabaseDescriptor();
// load keyspace descriptions.
Schema.instance.loadFromDiskForTool();
CFMetaData metaData = Schema.instance.getCFMetaData(desc);
if (metaData == null) {
throw new IllegalArgumentException(String.format(
"Could not locate schema info for %s/(%s:%s). "
+ "Make sure your 'data_file_directories' is pointed correctly.",
desc, desc.ksname, desc.cfname));
}
return metaData;
}
示例15: main
import org.apache.cassandra.config.Config; //导入依赖的package包/类
public static void main(String args[]) {
Config.setClientMode(true);
LoaderOptions options = LoaderOptions.parseArgs(args);
try {
File dir = options.directory;
if (dir.isFile()) {
dir = dir.getParentFile();
}
String keyspace = dir.getParentFile().getName();
CQLClient client = new CQLClient(options, keyspace);
SSTableToCQL ssTableToCQL = new SSTableToCQL(keyspace, client);
try {
ssTableToCQL.stream(options.directory);
} finally {
client.close();
}
System.exit(0);
} catch (Throwable t) {
t.printStackTrace();
System.exit(1);
}
}