本文整理汇总了Java中org.apache.cassandra.Util.dk方法的典型用法代码示例。如果您正苦于以下问题:Java Util.dk方法的具体用法?Java Util.dk怎么用?Java Util.dk使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.Util
的用法示例。
在下文中一共展示了Util.dk方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testRemoveSubColumn
import org.apache.cassandra.Util; //导入方法依赖的package包/类
@Test
public void testRemoveSubColumn()
{
Keyspace keyspace = Keyspace.open("Keyspace1");
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Super1");
Mutation rm;
DecoratedKey dk = Util.dk("key1");
// add data
rm = new Mutation("Keyspace1", dk.getKey());
Util.addMutation(rm, "Super1", "SC1", 1, "asdf", 0);
rm.apply();
store.forceBlockingFlush();
CellName cname = CellNames.compositeDense(ByteBufferUtil.bytes("SC1"), getBytes(1L));
// remove
rm = new Mutation("Keyspace1", dk.getKey());
rm.delete("Super1", cname, 1);
rm.apply();
ColumnFamily retrieved = store.getColumnFamily(QueryFilter.getIdentityFilter(dk, "Super1", System.currentTimeMillis()));
assertFalse(retrieved.getColumn(cname).isLive());
assertNull(Util.cloneAndRemoveDeleted(retrieved, Integer.MAX_VALUE));
}
示例2: testCompaction
import org.apache.cassandra.Util; //导入方法依赖的package包/类
private void testCompaction(String columnFamilyName, int insertsPerTable) throws ExecutionException, InterruptedException
{
CompactionManager.instance.disableAutoCompaction();
Keyspace keyspace = Keyspace.open("Keyspace1");
ColumnFamilyStore store = keyspace.getColumnFamilyStore(columnFamilyName);
Set<DecoratedKey> inserted = new HashSet<DecoratedKey>();
for (int j = 0; j < insertsPerTable; j++) {
DecoratedKey key = Util.dk(String.valueOf(j));
Mutation rm = new Mutation("Keyspace1", key.getKey());
rm.add(columnFamilyName, Util.cellname("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
rm.apply();
inserted.add(key);
store.forceBlockingFlush();
assertEquals(inserted.size(), Util.getRangeSlice(store).size());
}
CompactionManager.instance.performMaximal(store);
assertEquals(1, store.getSSTables().size());
}
示例3: testRowIteration
import org.apache.cassandra.Util; //导入方法依赖的package包/类
@Test
public void testRowIteration() throws IOException, ExecutionException, InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore store = keyspace.getColumnFamilyStore("Super3");
final int ROWS_PER_SSTABLE = 10;
Set<DecoratedKey> inserted = new HashSet<DecoratedKey>();
for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
DecoratedKey key = Util.dk(String.valueOf(i));
RowMutation rm = new RowMutation(KEYSPACE1, key.key);
rm.add("Super3", CompositeType.build(ByteBufferUtil.bytes("sc"), ByteBufferUtil.bytes(String.valueOf(i))), ByteBuffer.wrap(new byte[ROWS_PER_SSTABLE * 10 - i * 2]), i);
rm.apply();
inserted.add(key);
}
store.forceBlockingFlush();
assertEquals(inserted.toString(), inserted.size(), Util.getRangeSlice(store).size());
}
示例4: validateNameSort
import org.apache.cassandra.Util; //导入方法依赖的package包/类
private void validateNameSort(Keyspace keyspace, int N) throws IOException
{
for (int i = 0; i < N; ++i)
{
DecoratedKey key = Util.dk(Integer.toString(i));
ColumnFamily cf;
cf = Util.getColumnFamily(keyspace, key, "Standard1");
Collection<Column> columns = cf.getSortedColumns();
for (Column column : columns)
{
String name = ByteBufferUtil.string(column.name());
int j = Integer.valueOf(name.substring(name.length() - 1));
byte[] bytes = j % 2 == 0 ? "a".getBytes() : "b".getBytes();
assertEquals(new String(bytes), ByteBufferUtil.string(column.value()));
}
}
}
示例5: setup
import org.apache.cassandra.Util; //导入方法依赖的package包/类
@Before
public void setup()
{
dk = Util.dk("key1");
ks = Keyspace.open(KEYSPACE1);
cfs = ks.getColumnFamilyStore(CF_STANDARD);
cfm = cfs.metadata;
cfs2 = ks.getColumnFamilyStore(CF_COLLECTION);
cfm2 = cfs2.metadata;
m = cfm2.getColumnDefinition(new ColumnIdentifier("m", false));
nowInSec = FBUtilities.nowInSeconds();
command = Util.cmd(cfs, dk).withNowInSeconds(nowInSec).build();
}
示例6: testMissingHeader
import org.apache.cassandra.Util; //导入方法依赖的package包/类
@Test
public void testMissingHeader() throws IOException
{
Keyspace keyspace1 = Keyspace.open(KEYSPACE1);
Keyspace keyspace2 = Keyspace.open(KEYSPACE2);
DecoratedKey dk = Util.dk("keymulti");
UnfilteredRowIterator upd1 = Util.apply(new RowUpdateBuilder(keyspace1.getColumnFamilyStore(CF_STANDARD1).metadata, 1L, 0, "keymulti")
.clustering("col1").add("val", "1")
.build());
UnfilteredRowIterator upd2 = Util.apply(new RowUpdateBuilder(keyspace2.getColumnFamilyStore(CF_STANDARD3).metadata, 1L, 0, "keymulti")
.clustering("col1").add("val", "1")
.build());
keyspace1.getColumnFamilyStore("Standard1").clearUnsafe();
keyspace2.getColumnFamilyStore("Standard3").clearUnsafe();
// nuke the header
for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
{
if (file.getName().endsWith(".header"))
FileUtils.deleteWithConfirm(file);
}
CommitLog.instance.resetUnsafe(false);
Assert.assertTrue(Util.equal(upd1, Util.getOnlyPartitionUnfiltered(Util.cmd(keyspace1.getColumnFamilyStore(CF_STANDARD1), dk).build()).unfilteredIterator()));
Assert.assertTrue(Util.equal(upd2, Util.getOnlyPartitionUnfiltered(Util.cmd(keyspace2.getColumnFamilyStore(CF_STANDARD3), dk).build()).unfilteredIterator()));
}
示例7: populate
import org.apache.cassandra.Util; //导入方法依赖的package包/类
private long populate(String ks, String cf, int startRowKey, int endRowKey, int ttl) {
long timestamp = System.currentTimeMillis();
for (int i = startRowKey; i <= endRowKey; i++)
{
DecoratedKey key = Util.dk(Integer.toString(i));
Mutation rm = new Mutation(ks, key.getKey());
for (int j = 0; j < 10; j++)
rm.add(cf, Util.cellname(Integer.toString(j)),
ByteBufferUtil.EMPTY_BYTE_BUFFER,
timestamp,
j > 0 ? ttl : 0); // let first column never expire, since deleting all columns does not produce sstable
rm.apply();
}
return timestamp;
}
示例8: insertRowWithKey
import org.apache.cassandra.Util; //导入方法依赖的package包/类
private static void insertRowWithKey(int key)
{
long timestamp = System.currentTimeMillis();
DecoratedKey decoratedKey = Util.dk(String.format("%03d", key));
Mutation rm = new Mutation(KEYSPACE1, decoratedKey.getKey());
rm.add("Standard1", Util.cellname("col"), ByteBufferUtil.EMPTY_BYTE_BUFFER, timestamp, 1000);
rm.apply();
}
示例9: testValidationMultipleSSTablePerLevel
import org.apache.cassandra.Util; //导入方法依赖的package包/类
@Test
public void testValidationMultipleSSTablePerLevel() throws Exception
{
byte [] b = new byte[100 * 1024];
new Random().nextBytes(b);
ByteBuffer value = ByteBuffer.wrap(b); // 100 KB value, make it easy to have multiple files
// Enough data to have a level 1 and 2
int rows = 20;
int columns = 10;
// Adds enough data to trigger multiple sstable per level
for (int r = 0; r < rows; r++)
{
DecoratedKey key = Util.dk(String.valueOf(r));
Mutation rm = new Mutation(ksname, key.getKey());
for (int c = 0; c < columns; c++)
{
rm.add(cfname, Util.cellname("column" + c), value, 0);
}
rm.apply();
cfs.forceBlockingFlush();
}
waitForLeveling(cfs);
WrappingCompactionStrategy strategy = (WrappingCompactionStrategy) cfs.getCompactionStrategy();
// Checking we're not completely bad at math
assertTrue(strategy.getSSTableCountPerLevel()[1] > 0);
assertTrue(strategy.getSSTableCountPerLevel()[2] > 0);
Range<Token> range = new Range<>(Util.token(""), Util.token(""));
int gcBefore = keyspace.getColumnFamilyStore(cfname).gcBefore(System.currentTimeMillis());
UUID parentRepSession = UUID.randomUUID();
ActiveRepairService.instance.registerParentRepairSession(parentRepSession, Arrays.asList(cfs), Arrays.asList(range));
RepairJobDesc desc = new RepairJobDesc(parentRepSession, UUID.randomUUID(), ksname, cfname, range);
Validator validator = new Validator(desc, FBUtilities.getBroadcastAddress(), gcBefore);
CompactionManager.instance.submitValidation(cfs, validator).get();
}
示例10: testMissingHeader
import org.apache.cassandra.Util; //导入方法依赖的package包/类
@Test
public void testMissingHeader() throws IOException
{
Keyspace keyspace1 = Keyspace.open("Keyspace1");
Keyspace keyspace2 = Keyspace.open("Keyspace2");
Mutation rm;
DecoratedKey dk = Util.dk("keymulti");
ColumnFamily cf;
cf = ArrayBackedSortedColumns.factory.create("Keyspace1", "Standard1");
cf.addColumn(column("col1", "val1", 1L));
rm = new Mutation("Keyspace1", dk.getKey(), cf);
rm.apply();
cf = ArrayBackedSortedColumns.factory.create("Keyspace2", "Standard3");
cf.addColumn(column("col2", "val2", 1L));
rm = new Mutation("Keyspace2", dk.getKey(), cf);
rm.apply();
keyspace1.getColumnFamilyStore("Standard1").clearUnsafe();
keyspace2.getColumnFamilyStore("Standard3").clearUnsafe();
// nuke the header
for (File file : new File(DatabaseDescriptor.getCommitLogLocation()).listFiles())
{
if (file.getName().endsWith(".header"))
FileUtils.deleteWithConfirm(file);
}
CommitLog.instance.resetUnsafe(); // disassociate segments from live CL
CommitLog.instance.recover();
assertColumns(Util.getColumnFamily(keyspace1, dk, "Standard1"), "col1");
assertColumns(Util.getColumnFamily(keyspace2, dk, "Standard3"), "col2");
}
示例11: testStandardColumnCompactions
import org.apache.cassandra.Util; //导入方法依赖的package包/类
@Test
public void testStandardColumnCompactions() throws IOException, ExecutionException, InterruptedException
{
// this test does enough rows to force multiple block indexes to be used
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");
cfs.clearUnsafe();
final int ROWS_PER_SSTABLE = 10;
final int SSTABLES = cfs.metadata.getMinIndexInterval() * 3 / ROWS_PER_SSTABLE;
// disable compaction while flushing
cfs.disableAutoCompaction();
long maxTimestampExpected = Long.MIN_VALUE;
Set<DecoratedKey> inserted = new HashSet<DecoratedKey>();
for (int j = 0; j < SSTABLES; j++) {
for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
DecoratedKey key = Util.dk(String.valueOf(i % 2));
Mutation rm = new Mutation(KEYSPACE1, key.getKey());
long timestamp = j * ROWS_PER_SSTABLE + i;
rm.add("Standard1", Util.cellname(String.valueOf(i / 2)),
ByteBufferUtil.EMPTY_BYTE_BUFFER,
timestamp);
maxTimestampExpected = Math.max(timestamp, maxTimestampExpected);
rm.apply();
inserted.add(key);
}
cfs.forceBlockingFlush();
CompactionsTest.assertMaxTimestamp(cfs, maxTimestampExpected);
assertEquals(inserted.toString(), inserted.size(), Util.getRangeSlice(cfs).size());
}
forceCompactions(cfs);
assertEquals(inserted.size(), Util.getRangeSlice(cfs).size());
// make sure max timestamp of compacted sstables is recorded properly after compaction.
CompactionsTest.assertMaxTimestamp(cfs, maxTimestampExpected);
cfs.truncateBlocking();
}
示例12: testDeleteStandardRowSticksAfterFlush
import org.apache.cassandra.Util; //导入方法依赖的package包/类
@Test
public void testDeleteStandardRowSticksAfterFlush() throws Throwable
{
// test to make sure flushing after a delete doesn't resurrect delted cols.
String keyspaceName = "Keyspace1";
String cfName = "Standard1";
Keyspace keyspace = Keyspace.open(keyspaceName);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(cfName);
DecoratedKey key = Util.dk("f-flush-resurrection");
SlicePredicate sp = new SlicePredicate();
sp.setSlice_range(new SliceRange());
sp.getSlice_range().setCount(100);
sp.getSlice_range().setStart(ArrayUtils.EMPTY_BYTE_ARRAY);
sp.getSlice_range().setFinish(ArrayUtils.EMPTY_BYTE_ARRAY);
// insert
putColsStandard(cfs, key, column("col1", "val1", 1), column("col2", "val2", 1));
assertRowAndColCount(1, 2, null, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));
// flush.
cfs.forceBlockingFlush();
// insert, don't flush
putColsStandard(cfs, key, column("col3", "val3", 1), column("col4", "val4", 1));
assertRowAndColCount(1, 4, null, false, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));
// delete (from sstable and memtable)
RowMutation rm = new RowMutation(keyspace.getName(), key.key);
rm.delete(cfs.name, 2);
rm.apply();
// verify delete
assertRowAndColCount(1, 0, null, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));
// flush
cfs.forceBlockingFlush();
// re-verify delete. // first breakage is right here because of CASSANDRA-1837.
assertRowAndColCount(1, 0, null, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));
// simulate a 'late' insertion that gets put in after the deletion. should get inserted, but fail on read.
putColsStandard(cfs, key, column("col5", "val5", 1), column("col2", "val2", 1));
// should still be nothing there because we deleted this row. 2nd breakage, but was undetected because of 1837.
assertRowAndColCount(1, 0, null, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));
// make sure that new writes are recognized.
putColsStandard(cfs, key, column("col6", "val6", 3), column("col7", "val7", 3));
assertRowAndColCount(1, 2, null, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));
// and it remains so after flush. (this wasn't failing before, but it's good to check.)
cfs.forceBlockingFlush();
assertRowAndColCount(1, 2, null, true, cfs.getRangeSlice(Util.range("f", "g"), null, ThriftValidation.asIFilter(sp, cfs.metadata, null), 100));
}
示例13: createRow
import org.apache.cassandra.Util; //导入方法依赖的package包/类
private Row createRow(String name, int nbCol)
{
return new Row(Util.dk(name), createCF(nbCol));
}
示例14: testStandardColumnCompactions
import org.apache.cassandra.Util; //导入方法依赖的package包/类
@Test
public void testStandardColumnCompactions()
{
// this test does enough rows to force multiple block indexes to be used
Keyspace keyspace = Keyspace.open(KEYSPACE1);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore("Standard1");
cfs.clearUnsafe();
final int ROWS_PER_SSTABLE = 10;
final int SSTABLES = cfs.metadata.params.minIndexInterval * 3 / ROWS_PER_SSTABLE;
// disable compaction while flushing
cfs.disableAutoCompaction();
long maxTimestampExpected = Long.MIN_VALUE;
Set<DecoratedKey> inserted = new HashSet<DecoratedKey>();
for (int j = 0; j < SSTABLES; j++) {
for (int i = 0; i < ROWS_PER_SSTABLE; i++) {
DecoratedKey key = Util.dk(String.valueOf(i % 2));
long timestamp = j * ROWS_PER_SSTABLE + i;
maxTimestampExpected = Math.max(timestamp, maxTimestampExpected);
UpdateBuilder.create(cfs.metadata, key)
.withTimestamp(timestamp)
.newRow(String.valueOf(i / 2)).add("val", ByteBufferUtil.EMPTY_BYTE_BUFFER)
.apply();
inserted.add(key);
}
cfs.forceBlockingFlush();
CompactionsTest.assertMaxTimestamp(cfs, maxTimestampExpected);
assertEquals(inserted.toString(), inserted.size(), Util.getAll(Util.cmd(cfs).build()).size());
}
forceCompactions(cfs);
assertEquals(inserted.toString(), inserted.size(), Util.getAll(Util.cmd(cfs).build()).size());
// make sure max timestamp of compacted sstables is recorded properly after compaction.
CompactionsTest.assertMaxTimestamp(cfs, maxTimestampExpected);
cfs.truncateBlocking();
}
示例15: testAggressiveFullyExpired
import org.apache.cassandra.Util; //导入方法依赖的package包/类
@Test
public void testAggressiveFullyExpired()
{
ColumnFamilyStore cfs = Keyspace.open("Keyspace1").getColumnFamilyStore("Standard1");
cfs.disableAutoCompaction();
cfs.metadata.gcGraceSeconds(0);
DecoratedKey ttlKey = Util.dk("ttl");
Mutation rm = new Mutation("Keyspace1", ttlKey.getKey());
rm.add("Standard1", Util.cellname("col1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 1, 1);
rm.add("Standard1", Util.cellname("col2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 3, 1);
rm.applyUnsafe();
cfs.forceBlockingFlush();
rm = new Mutation("Keyspace1", ttlKey.getKey());
rm.add("Standard1", Util.cellname("col1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 2, 1);
rm.add("Standard1", Util.cellname("col2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 5, 1);
rm.applyUnsafe();
cfs.forceBlockingFlush();
rm = new Mutation("Keyspace1", ttlKey.getKey());
rm.add("Standard1", Util.cellname("col1"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 4, 1);
rm.add("Standard1", Util.cellname("shadow"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 7, 1);
rm.applyUnsafe();
cfs.forceBlockingFlush();
rm = new Mutation("Keyspace1", ttlKey.getKey());
rm.add("Standard1", Util.cellname("shadow"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 6, 3);
rm.add("Standard1", Util.cellname("col2"), ByteBufferUtil.EMPTY_BYTE_BUFFER, 8, 1);
rm.applyUnsafe();
cfs.forceBlockingFlush();
Set<SSTableReader> sstables = Sets.newHashSet(cfs.getSSTables());
int now = (int)(System.currentTimeMillis() / 1000);
int gcBefore = now + 2;
Set<SSTableReader> expired = CompactionController.getFullyExpiredSSTables(
cfs,
sstables,
Collections.EMPTY_SET,
gcBefore);
assertEquals(2, expired.size());
cfs.clearUnsafe();
}