本文整理汇总了Java中org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter.setDirectory方法的典型用法代码示例。如果您正苦于以下问题:Java KahaDBPersistenceAdapter.setDirectory方法的具体用法?Java KahaDBPersistenceAdapter.setDirectory怎么用?Java KahaDBPersistenceAdapter.setDirectory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter
的用法示例。
在下文中一共展示了KahaDBPersistenceAdapter.setDirectory方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: configureBroker
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
protected void configureBroker(BrokerService answer) throws Exception {
File dataFileDir = new File("target/test-amq-data/perfTest/kahadb");
KahaDBPersistenceAdapter kaha = new KahaDBPersistenceAdapter();
kaha.setDirectory(dataFileDir);
//answer.setUseJmx(false);
// The setEnableJournalDiskSyncs(false) setting is a little dangerous right now, as I have not verified
// what happens if the index is updated but a journal update is lost.
// Index is going to be in consistent, but can it be repaired?
//kaha.setEnableJournalDiskSyncs(false);
// Using a bigger journal file size makes he take fewer spikes as it is not switching files as often.
//kaha.setJournalMaxFileLength(1024*100);
// small batch means more frequent and smaller writes
//kaha.setIndexWriteBatchSize(100);
// do the index write in a separate thread
//kaha.setEnableIndexWriteAsync(true);
answer.setPersistenceAdapter(kaha);
answer.setAdvisorySupport(false);
answer.setEnableStatistics(false);
answer.addConnector(bindAddress);
answer.setDeleteAllMessagesOnStartup(true);
}
示例2: setUp
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
@Override
protected void setUp() throws Exception {
setMaxTestTime(10 * 60 * 1000); // 10 mins
setAutoFail(true);
super.setUp();
broker = new BrokerService();
File testDataDir = new File("target/activemq-data/QueuePurgeTest");
broker.setDataDirectoryFile(testDataDir);
broker.setUseJmx(true);
broker.setDeleteAllMessagesOnStartup(true);
broker.getSystemUsage().getMemoryUsage().setLimit(1024L * 1024 * 64);
KahaDBPersistenceAdapter persistenceAdapter = new KahaDBPersistenceAdapter();
persistenceAdapter.setDirectory(new File(testDataDir, "kahadb"));
broker.setPersistenceAdapter(persistenceAdapter);
broker.addConnector("tcp://localhost:0");
broker.start();
factory = new ActiveMQConnectionFactory(broker.getTransportConnectors().get(0).getConnectUri().toString());
connection = factory.createConnection();
connection.start();
}
示例3: createBroker
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
private void createBroker(boolean delete) throws Exception {
broker = BrokerFactory.createBroker("broker:(vm://localhost)");
broker.setKeepDurableSubsActive(true);
broker.setPersistent(true);
broker.setDeleteAllMessagesOnStartup(delete);
KahaDBPersistenceAdapter kahadb = new KahaDBPersistenceAdapter();
kahadb.setDirectory(new File("activemq-data/" + getName() + "-kahadb"));
kahadb.setJournalMaxFileLength(500 * 1024);
broker.setPersistenceAdapter(kahadb);
broker.setBrokerName(getName());
// only if we pre-create the destinations
broker.setDestinations(new ActiveMQDestination[]{topic});
broker.start();
broker.waitUntilStarted();
connection = createConnection();
}
示例4: startBroker
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
private void startBroker(boolean deleteAllMessages) throws Exception {
if (broker != null)
return;
broker = BrokerFactory.createBroker("broker:(vm://localhost)");
broker.setBrokerName(getName());
broker.setDeleteAllMessagesOnStartup(deleteAllMessages);
if (PERSISTENT_BROKER) {
broker.setPersistent(true);
KahaDBPersistenceAdapter persistenceAdapter = new KahaDBPersistenceAdapter();
persistenceAdapter.setDirectory(new File("activemq-data/" + getName()));
broker.setPersistenceAdapter(persistenceAdapter);
} else
broker.setPersistent(false);
broker.addConnector("tcp://localhost:61656");
broker.getSystemUsage().getMemoryUsage().setLimit(256 * 1024 * 1024);
broker.getSystemUsage().getTempUsage().setLimit(256 * 1024 * 1024);
broker.getSystemUsage().getStoreUsage().setLimit(256 * 1024 * 1024);
broker.start();
}
示例5: startBroker
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
private void startBroker(boolean deleteMessages) throws Exception {
broker = BrokerFactory.createBroker("broker:(vm://" + getName() + ")");
broker.setUseJmx(true);
broker.getManagementContext().setCreateConnector(false);
broker.setBrokerName(getName());
broker.setPersistent(true);
KahaDBPersistenceAdapter persistenceAdapter = new KahaDBPersistenceAdapter();
persistenceAdapter.setDirectory(new File("activemq-data/" + getName()));
broker.setPersistenceAdapter(persistenceAdapter);
if (deleteMessages) {
broker.setDeleteAllMessagesOnStartup(true);
}
broker.setKeepDurableSubsActive(true);
broker.start();
broker.waitUntilStarted();
connection = createConnection();
}
示例6: getMultiKahaDbAdapter
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
private static MultiKahaDBPersistenceAdapter getMultiKahaDbAdapter(File dir) {
MultiKahaDBPersistenceAdapter adapter = new MultiKahaDBPersistenceAdapter();
adapter.setDirectory(dir);
KahaDBPersistenceAdapter kahaStore = new KahaDBPersistenceAdapter();
kahaStore.setDirectory(dir);
FilteredKahaDBPersistenceAdapter filtered = new FilteredKahaDBPersistenceAdapter();
filtered.setPersistenceAdapter(kahaStore);
filtered.setPerDestination(true);
adapter.setFilteredPersistenceAdapters(Lists.newArrayList(filtered));
return adapter;
}
示例7: before
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
@Before
public void before() throws Exception {
adapter = new KahaDBPersistenceAdapter();
adapter.setJournalMaxFileLength(1024 * 1024);
adapter.setDirectory(storeFolder.getRoot());
adapter.start();
store = adapter.getStore();
}
示例8: getPersistenceAdapter
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
@Override
public PersistenceAdapter getPersistenceAdapter(File dir) {
MultiKahaDBPersistenceAdapter adapter = new MultiKahaDBPersistenceAdapter();
adapter.setJournalMaxFileLength(1024 * 1024);
adapter.setDirectory(dir);
KahaDBPersistenceAdapter kahaStore = new KahaDBPersistenceAdapter();
kahaStore.setDirectory(dir);
FilteredKahaDBPersistenceAdapter filtered = new FilteredKahaDBPersistenceAdapter();
filtered.setPersistenceAdapter(kahaStore);
filtered.setPerDestination(true);
adapter.setFilteredPersistenceAdapters(Lists.newArrayList(filtered));
return adapter;
}
示例9: getPersistenceAdapter
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
@Override
public PersistenceAdapter getPersistenceAdapter(File dir) {
KahaDBPersistenceAdapter adapter = new KahaDBPersistenceAdapter();
adapter.setJournalMaxFileLength(1024 * 1024);
adapter.setDirectory(dir);
return adapter;
}
示例10: persistenceAdapterFactory_KahaDB
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
private PersistenceAdapter persistenceAdapterFactory_KahaDB(String path) {
KahaDBPersistenceAdapter kahaDBPersistenceAdapter = new KahaDBPersistenceAdapter();
kahaDBPersistenceAdapter.setDirectory(new File(path));
kahaDBPersistenceAdapter.setIgnoreMissingJournalfiles(true);
kahaDBPersistenceAdapter.setCheckForCorruptJournalFiles(true);
kahaDBPersistenceAdapter.setChecksumJournalFiles(true);
return kahaDBPersistenceAdapter;
}
示例11: main
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
public static void main(String arg[]) {
try {
KahaDBPersistenceAdapter kahaDB = new KahaDBPersistenceAdapter();
File dataFileDir = new File("target/test-amq-data/perfTest/kahadb");
IOHelper.deleteChildren(dataFileDir);
kahaDB.setDirectory(dataFileDir);
// The setEnableJournalDiskSyncs(false) setting is a little
// dangerous right now, as I have not verified
// what happens if the index is updated but a journal update is
// lost.
// Index is going to be in consistent, but can it be repaired?
// kaha.setEnableJournalDiskSyncs(false);
// Using a bigger journal file size makes he take fewer spikes as it
// is not switching files as often.
// kaha.setJournalMaxFileLength(1024*1024*100);
// small batch means more frequent and smaller writes
kahaDB.setIndexWriteBatchSize(1000);
kahaDB.setIndexCacheSize(10000);
// do the index write in a separate thread
// kahaDB.setEnableIndexWriteAsync(true);
BrokerService broker = new BrokerService();
broker.setUseJmx(false);
// broker.setPersistenceAdapter(adaptor);
broker.setPersistenceAdapter(kahaDB);
// broker.setPersistent(false);
broker.setDeleteAllMessagesOnStartup(true);
broker.addConnector("tcp://0.0.0.0:61616");
broker.start();
System.err.println("Running");
Thread.sleep(Long.MAX_VALUE);
} catch (Throwable e) {
e.printStackTrace();
}
}
示例12: configureBroker
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
@Override
protected void configureBroker(BrokerService answer, String uri) throws Exception {
File dataFileDir = new File("target/test-amq-data/perfTest/kahadb");
File archiveDir = new File(dataFileDir, "archive");
KahaDBPersistenceAdapter kaha = new KahaDBPersistenceAdapter();
kaha.setDirectory(dataFileDir);
kaha.setDirectoryArchive(archiveDir);
kaha.setArchiveDataLogs(false);
// The setEnableJournalDiskSyncs(false) setting is a little dangerous right now, as I have not verified
// what happens if the index is updated but a journal update is lost.
// Index is going to be in consistent, but can it be repaired?
kaha.setEnableJournalDiskSyncs(true);
// Using a bigger journal file size makes he take fewer spikes as it is not switching files as often.
//kaha.setJournalMaxFileLength(1024*1024*100);
// small batch means more frequent and smaller writes
//kaha.setIndexWriteBatchSize(100);
// do the index write in a separate thread
kaha.setEnableIndexWriteAsync(true);
kaha.setIndexCacheSize(10000);
answer.setPersistenceAdapter(kaha);
answer.addConnector(uri);
answer.setDeleteAllMessagesOnStartup(true);
}
示例13: startBroker
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
private void startBroker(boolean deleteAllMessages) throws Exception {
if (broker != null)
return;
broker = BrokerFactory.createBroker("broker:(vm://" + getName() + ")");
broker.setBrokerName(getName());
broker.setAdvisorySupport(false);
broker.setDeleteAllMessagesOnStartup(deleteAllMessages);
broker.setKeepDurableSubsActive(true);
File kahadbData = new File("activemq-data/" + getName() + "-kahadb");
if (deleteAllMessages)
delete(kahadbData);
broker.setPersistent(true);
KahaDBPersistenceAdapter kahadb = new KahaDBPersistenceAdapter();
kahadb.setDirectory(kahadbData);
kahadb.setJournalMaxFileLength(20 * 1024);
broker.setPersistenceAdapter(kahadb);
broker.addConnector("tcp://localhost:61656");
broker.getSystemUsage().getMemoryUsage().setLimit(256 * 1024 * 1024);
broker.getSystemUsage().getTempUsage().setLimit(256 * 1024 * 1024);
broker.getSystemUsage().getStoreUsage().setLimit(256 * 1024 * 1024);
broker.start();
}
示例14: startBroker
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
private void startBroker(boolean deleteAllMessages) throws Exception {
if (broker != null)
return;
broker = BrokerFactory.createBroker("broker:(vm://" + getName() + ")");
broker.setBrokerName(getName());
broker.setAdvisorySupport(false);
broker.setDeleteAllMessagesOnStartup(deleteAllMessages);
File kahadbData = new File("activemq-data/" + getName() + "-kahadb");
if (deleteAllMessages)
delete(kahadbData);
broker.setPersistent(true);
KahaDBPersistenceAdapter kahadb = new KahaDBPersistenceAdapter();
kahadb.setDirectory(kahadbData);
kahadb.setJournalMaxFileLength(500 * 1024);
broker.setPersistenceAdapter(kahadb);
connectionUri = broker.addConnector("tcp://localhost:0").getPublishableConnectString();
broker.getSystemUsage().getMemoryUsage().setLimit(256 * 1024 * 1024);
broker.getSystemUsage().getTempUsage().setLimit(256 * 1024 * 1024);
broker.getSystemUsage().getStoreUsage().setLimit(256 * 1024 * 1024);
broker.start();
}
示例15: startBroker
import org.apache.activemq.store.kahadb.KahaDBPersistenceAdapter; //导入方法依赖的package包/类
private void startBroker(boolean deleteAllMessages) throws Exception {
if (broker != null)
return;
broker = BrokerFactory.createBroker("broker:(vm://" + getName() + ")");
broker.setBrokerName(getName());
broker.setAdvisorySupport(false);
broker.setDeleteAllMessagesOnStartup(deleteAllMessages);
File kahadbData = new File("activemq-data/" + getName() + "-kahadb");
if (deleteAllMessages)
delete(kahadbData);
broker.setPersistent(true);
KahaDBPersistenceAdapter kahadb = new KahaDBPersistenceAdapter();
kahadb.setDirectory(kahadbData);
kahadb.setJournalMaxFileLength(10 * 1024);
kahadb.setCleanupInterval(5000);
broker.setPersistenceAdapter(kahadb);
broker.addConnector("tcp://localhost:61656");
broker.getSystemUsage().getMemoryUsage().setLimit(256 * 1024 * 1024);
broker.getSystemUsage().getTempUsage().setLimit(256 * 1024 * 1024);
broker.getSystemUsage().getStoreUsage().setLimit(256 * 1024 * 1024);
LOG.info(toString() + "Starting Broker...");
broker.start();
broker.waitUntilStarted();
LOG.info(toString() + " Broker started!!");
}