本文整理汇总了Java中org.apache.cassandra.SchemaLoader.insertData方法的典型用法代码示例。如果您正苦于以下问题:Java SchemaLoader.insertData方法的具体用法?Java SchemaLoader.insertData怎么用?Java SchemaLoader.insertData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.SchemaLoader
的用法示例。
在下文中一共展示了SchemaLoader.insertData方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: rowCacheLoad
import org.apache.cassandra.SchemaLoader; //导入方法依赖的package包/类
public void rowCacheLoad(int totalKeys, int keysToSave, int offset) throws Exception
{
CompactionManager.instance.disableAutoCompaction();
ColumnFamilyStore store = Keyspace.open(KEYSPACE_CACHED).getColumnFamilyStore(CF_CACHED);
// empty the cache
CacheService.instance.invalidateRowCache();
assertEquals(0, CacheService.instance.rowCache.size());
// insert data and fill the cache
SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHED, offset, totalKeys);
readData(KEYSPACE_CACHED, CF_CACHED, offset, totalKeys);
assertEquals(totalKeys, CacheService.instance.rowCache.size());
// force the cache to disk
CacheService.instance.rowCache.submitWrite(keysToSave).get();
// empty the cache again to make sure values came from disk
CacheService.instance.invalidateRowCache();
assertEquals(0, CacheService.instance.rowCache.size());
assertEquals(keysToSave == Integer.MAX_VALUE ? totalKeys : keysToSave, CacheService.instance.rowCache.loadSaved());
}
示例2: testKeyCacheLoad
import org.apache.cassandra.SchemaLoader; //导入方法依赖的package包/类
@Test
public void testKeyCacheLoad() throws Exception
{
CompactionManager.instance.disableAutoCompaction();
ColumnFamilyStore store = Keyspace.open(KEYSPACE1).getColumnFamilyStore(COLUMN_FAMILY2);
// empty the cache
CacheService.instance.invalidateKeyCache();
assertKeyCacheSize(0, KEYSPACE1, COLUMN_FAMILY2);
// insert data and force to disk
SchemaLoader.insertData(KEYSPACE1, COLUMN_FAMILY2, 0, 100);
store.forceBlockingFlush();
// populate the cache
readData(KEYSPACE1, COLUMN_FAMILY2, 0, 100);
assertKeyCacheSize(100, KEYSPACE1, COLUMN_FAMILY2);
// really? our caches don't implement the map interface? (hence no .addAll)
Map<KeyCacheKey, RowIndexEntry> savedMap = new HashMap<KeyCacheKey, RowIndexEntry>();
for (Iterator<KeyCacheKey> iter = CacheService.instance.keyCache.keyIterator();
iter.hasNext();)
{
KeyCacheKey k = iter.next();
if (k.desc.ksname.equals(KEYSPACE1) && k.desc.cfname.equals(COLUMN_FAMILY2))
savedMap.put(k, CacheService.instance.keyCache.get(k));
}
// force the cache to disk
CacheService.instance.keyCache.submitWrite(Integer.MAX_VALUE).get();
CacheService.instance.invalidateKeyCache();
assertKeyCacheSize(0, KEYSPACE1, COLUMN_FAMILY2);
CacheService.instance.keyCache.loadSaved();
assertKeyCacheSize(savedMap.size(), KEYSPACE1, COLUMN_FAMILY2);
// probably it's better to add equals/hashCode to RowIndexEntry...
for (Map.Entry<KeyCacheKey, RowIndexEntry> entry : savedMap.entrySet())
{
RowIndexEntry expected = entry.getValue();
RowIndexEntry actual = CacheService.instance.keyCache.get(entry.getKey());
assertEquals(expected.position, actual.position);
assertEquals(expected.columnsIndex(), actual.columnsIndex());
if (expected.isIndexed())
{
assertEquals(expected.deletionTime(), actual.deletionTime());
}
}
}
示例3: testKeyCacheLoadWithLostTable
import org.apache.cassandra.SchemaLoader; //导入方法依赖的package包/类
@Test
public void testKeyCacheLoadWithLostTable() throws Exception
{
CompactionManager.instance.disableAutoCompaction();
ColumnFamilyStore store = Keyspace.open(KEYSPACE1).getColumnFamilyStore(COLUMN_FAMILY3);
// empty the cache
CacheService.instance.invalidateKeyCache();
assertKeyCacheSize(0, KEYSPACE1, COLUMN_FAMILY3);
// insert data and force to disk
SchemaLoader.insertData(KEYSPACE1, COLUMN_FAMILY3, 0, 100);
store.forceBlockingFlush();
Collection<SSTableReader> firstFlushTables = ImmutableList.copyOf(store.getLiveSSTables());
// populate the cache
readData(KEYSPACE1, COLUMN_FAMILY3, 0, 100);
assertKeyCacheSize(100, KEYSPACE1, COLUMN_FAMILY3);
// insert some new data and force to disk
SchemaLoader.insertData(KEYSPACE1, COLUMN_FAMILY3, 100, 50);
store.forceBlockingFlush();
// check that it's fine
readData(KEYSPACE1, COLUMN_FAMILY3, 100, 50);
assertKeyCacheSize(150, KEYSPACE1, COLUMN_FAMILY3);
// force the cache to disk
CacheService.instance.keyCache.submitWrite(Integer.MAX_VALUE).get();
CacheService.instance.invalidateKeyCache();
assertKeyCacheSize(0, KEYSPACE1, COLUMN_FAMILY3);
// check that the content is written correctly
CacheService.instance.keyCache.loadSaved();
assertKeyCacheSize(150, KEYSPACE1, COLUMN_FAMILY3);
CacheService.instance.invalidateKeyCache();
assertKeyCacheSize(0, KEYSPACE1, COLUMN_FAMILY3);
// now remove the first sstable from the store to simulate losing the file
store.markObsolete(firstFlushTables, OperationType.UNKNOWN);
// check that reading now correctly skips over lost table and reads the rest (CASSANDRA-10219)
CacheService.instance.keyCache.loadSaved();
assertKeyCacheSize(50, KEYSPACE1, COLUMN_FAMILY3);
}
示例4: testSSTablesPerReadHistogramWhenRowCache
import org.apache.cassandra.SchemaLoader; //导入方法依赖的package包/类
@Test
public void testSSTablesPerReadHistogramWhenRowCache()
{
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
ColumnFamilyStore cachedStore = keyspace.getColumnFamilyStore(CF_CACHED);
// empty the row cache
CacheService.instance.invalidateRowCache();
// set global row cache size to 1 MB
CacheService.instance.setRowCacheCapacityInMB(1);
// inserting 100 rows into both column families
SchemaLoader.insertData(KEYSPACE_CACHED, CF_CACHED, 0, 100);
//force flush for confidence that SSTables exists
cachedStore.forceBlockingFlush();
((ClearableHistogram)cachedStore.metric.sstablesPerReadHistogram.cf).clear();
for (int i = 0; i < 100; i++)
{
DecoratedKey key = Util.dk("key" + i);
Util.getAll(Util.cmd(cachedStore, key).build());
long count_before = cachedStore.metric.sstablesPerReadHistogram.cf.getCount();
Util.getAll(Util.cmd(cachedStore, key).build());
// check that SSTablePerReadHistogram has been updated by zero,
// so count has been increased and in a 1/2 of requests there were zero read SSTables
long count_after = cachedStore.metric.sstablesPerReadHistogram.cf.getCount();
double belowMedian = cachedStore.metric.sstablesPerReadHistogram.cf.getSnapshot().getValue(0.49D);
double mean_after = cachedStore.metric.sstablesPerReadHistogram.cf.getSnapshot().getMean();
assertEquals("SSTablePerReadHistogram should be updated even key found in row cache", count_before + 1, count_after);
assertTrue("In half of requests we have not touched SSTables, " +
"so 49 percentile (" + belowMedian + ") must be strongly less than 0.9", belowMedian < 0.9D);
assertTrue("In half of requests we have not touched SSTables, " +
"so mean value (" + mean_after + ") must be strongly less than 1, but greater than 0", mean_after < 0.999D && mean_after > 0.001D);
}
assertEquals("Min value of SSTablesPerRead should be zero", 0, cachedStore.metric.sstablesPerReadHistogram.cf.getSnapshot().getMin());
CacheService.instance.setRowCacheCapacityInMB(0);
}
示例5: testScheduleTimeout
import org.apache.cassandra.SchemaLoader; //导入方法依赖的package包/类
@Test
public void testScheduleTimeout() throws Exception
{
InetAddress peer = FBUtilities.getBroadcastAddress();
StreamSession session = new StreamSession(peer, peer, null, 0, true, false);
ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD);
// create two sstables
for (int i = 0; i < 2; i++)
{
SchemaLoader.insertData(KEYSPACE1, CF_STANDARD, i, 1);
cfs.forceBlockingFlush();
}
// create streaming task that streams those two sstables
StreamTransferTask task = new StreamTransferTask(session, cfs.metadata.cfId);
for (SSTableReader sstable : cfs.getLiveSSTables())
{
List<Range<Token>> ranges = new ArrayList<>();
ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken()));
task.addTransferFile(sstable.selfRef(), 1, sstable.getPositionsForRanges(ranges), 0);
}
assertEquals(2, task.getTotalNumberOfFiles());
// if file sending completes before timeout then the task should be canceled.
Future f = task.scheduleTimeout(0, 0, TimeUnit.NANOSECONDS);
f.get();
// when timeout runs on second file, task should be completed
f = task.scheduleTimeout(1, 10, TimeUnit.MILLISECONDS);
task.complete(1);
try
{
f.get();
Assert.assertTrue(false);
}
catch (CancellationException ex)
{
}
assertEquals(StreamSession.State.WAIT_COMPLETE, session.state());
// when all streaming are done, time out task should not be scheduled.
assertNull(task.scheduleTimeout(1, 1, TimeUnit.SECONDS));
}