本文整理汇总了Java中org.apache.cassandra.db.compaction.AbstractCompactedRow类的典型用法代码示例。如果您正苦于以下问题:Java AbstractCompactedRow类的具体用法?Java AbstractCompactedRow怎么用?Java AbstractCompactedRow使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AbstractCompactedRow类属于org.apache.cassandra.db.compaction包,在下文中一共展示了AbstractCompactedRow类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: add
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
/**
* Called (in order) for every row present in the CF.
* Hashes the row, and adds it to the tree being built.
*
* @param row Row to add hash
*/
public void add(AbstractCompactedRow row)
{
assert desc.range.contains(row.key.getToken()) : row.key.getToken() + " is not contained in " + desc.range;
assert lastKey == null || lastKey.compareTo(row.key) < 0
: "row " + row.key + " received out of order wrt " + lastKey;
lastKey = row.key;
if (range == null)
range = ranges.next();
// generate new ranges as long as case 1 is true
while (!range.contains(row.key.getToken()))
{
// add the empty hash, and move to the next range
range.ensureHashInitialised();
range = ranges.next();
}
// case 3 must be true: mix in the hashed row
RowHash rowHash = rowHash(row);
if (rowHash != null)
{
range.addHash(rowHash);
}
}
示例2: rowHash
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
private MerkleTree.RowHash rowHash(AbstractCompactedRow row)
{
validated++;
// MerkleTree uses XOR internally, so we want lots of output bits here
CountingDigest digest = new CountingDigest(FBUtilities.newMessageDigest("SHA-256"));
row.update(digest);
// only return new hash for merkle tree in case digest was updated - see CASSANDRA-8979
if (digest.count > 0)
{
return new MerkleTree.RowHash(row.key.getToken(), digest.digest(), digest.count);
}
else
{
return null;
}
}
示例3: append
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
/**
* @param row
* @return null if the row was compacted away entirely; otherwise, the PK index entry for this row
*/
public RowIndexEntry append(AbstractCompactedRow row)
{
long startPosition = beforeAppend(row.key);
RowIndexEntry entry;
try
{
entry = row.write(startPosition, dataFile.stream);
if (entry == null)
return null;
}
catch (IOException e)
{
throw new FSWriteError(e, dataFile.getPath());
}
long endPosition = dataFile.getFilePointer();
sstableMetadataCollector.update(endPosition - startPosition, row.columnStats());
afterAppend(row.key, endPosition, entry);
return entry;
}
示例4: add
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
/**
* Called (in order) for every row present in the CF.
* Hashes the row, and adds it to the tree being built.
*
* @param row Row to add hash
*/
public void add(AbstractCompactedRow row)
{
assert desc.range.contains(row.key.token) : row.key.token + " is not contained in " + desc.range;
assert lastKey == null || lastKey.compareTo(row.key) < 0
: "row " + row.key + " received out of order wrt " + lastKey;
lastKey = row.key;
if (range == null)
range = ranges.next();
// generate new ranges as long as case 1 is true
while (!range.contains(row.key.token))
{
// add the empty hash, and move to the next range
range.ensureHashInitialised();
range = ranges.next();
}
// case 3 must be true: mix in the hashed row
range.addHash(rowHash(row));
}
示例5: append
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
/**
* @param row
* @return null if the row was compacted away entirely; otherwise, the PK index entry for this row
*/
public RowIndexEntry append(AbstractCompactedRow row)
{
long currentPosition = beforeAppend(row.key);
RowIndexEntry entry;
try
{
entry = row.write(currentPosition, dataFile.stream);
if (entry == null)
return null;
}
catch (IOException e)
{
throw new FSWriteError(e, dataFile.getPath());
}
sstableMetadataCollector.update(dataFile.getFilePointer() - currentPosition, row.columnStats());
afterAppend(row.key, currentPosition, entry);
return entry;
}
示例6: add
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
/**
* Called (in order) for every row present in the CF.
* Hashes the row, and adds it to the tree being built.
*
* @param row Row to add hash
*/
public void add(AbstractCompactedRow row)
{
assert desc.range.contains(row.key.getToken()) : row.key.getToken() + " is not contained in " + desc.range;
assert lastKey == null || lastKey.compareTo(row.key) < 0
: "row " + row.key + " received out of order wrt " + lastKey;
lastKey = row.key;
if (range == null)
range = ranges.next();
// generate new ranges as long as case 1 is true
while (!range.contains(row.key.getToken()))
{
// add the empty hash, and move to the next range
range.ensureHashInitialised();
range = ranges.next();
}
// case 3 must be true: mix in the hashed row
range.addHash(rowHash(row));
}
示例7: add
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
/**
* Called (in order) for every row present in the CF.
* Hashes the row, and adds it to the tree being built.
*
* There are four possible cases:
* 1. Token is greater than range.right (we haven't generated a range for it yet),
* 2. Token is less than/equal to range.left (the range was valid),
* 3. Token is contained in the range (the range is in progress),
* 4. No more invalid ranges exist.
*
* TODO: Because we only validate completely empty trees at the moment, we
* do not bother dealing with case 2 and case 4 should result in an error.
*
* Additionally, there is a special case for the minimum token, because
* although it sorts first, it is contained in the last possible range.
*
* @param row The row.
*/
public void add(AbstractCompactedRow row)
{
assert request.range.contains(row.key.token) : row.key.token + " is not contained in " + request.range;
assert lastKey == null || lastKey.compareTo(row.key) < 0
: "row " + row.key + " received out of order wrt " + lastKey;
lastKey = row.key;
if (range == null)
range = ranges.next();
// generate new ranges as long as case 1 is true
while (!range.contains(row.key.token))
{
// add the empty hash, and move to the next range
range.addHash(EMPTY_ROW);
range = ranges.next();
}
// case 3 must be true: mix in the hashed row
range.addHash(rowHash(row));
}
示例8: append
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
public RowIndexEntry append(AbstractCompactedRow row)
{
// we do this before appending to ensure we can resetAndTruncate() safely if the append fails
maybeReopenEarly(row.key);
RowIndexEntry index = writer.append(row);
if (!isOffline)
{
if (index == null)
{
cfs.invalidateCachedRow(row.key);
}
else
{
boolean save = false;
for (SSTableReader reader : rewriting)
{
if (reader.getCachedPosition(row.key, false) != null)
{
save = true;
break;
}
}
if (save)
cachedKeys.put(row.key, index);
}
}
return index;
}
示例9: tryAppend
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
public RowIndexEntry tryAppend(AbstractCompactedRow row)
{
writer.mark();
try
{
return append(row);
}
catch (Throwable t)
{
writer.resetAndTruncate();
throw t;
}
}
示例10: basicTest
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
@Test
public void basicTest() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
for (int j = 0; j < 100; j ++)
{
ByteBuffer key = ByteBufferUtil.bytes(String.valueOf(j));
Mutation rm = new Mutation(KEYSPACE, key);
rm.add(CF, Util.cellname("0"), ByteBufferUtil.EMPTY_BYTE_BUFFER, j);
rm.apply();
}
cfs.forceBlockingFlush();
Set<SSTableReader> sstables = new HashSet<>(cfs.getSSTables());
assertEquals(1, sstables.size());
SSTableRewriter writer = new SSTableRewriter(cfs, sstables, 1000, false);
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategy().getScanners(sstables);)
{
ISSTableScanner scanner = scanners.scanners.get(0);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(System.currentTimeMillis()));
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory));
while(scanner.hasNext())
{
AbstractCompactedRow row = new LazilyCompactedRow(controller, Arrays.asList(scanner.next()));
writer.append(row);
}
}
Collection<SSTableReader> newsstables = writer.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(sstables, newsstables , OperationType.COMPACTION);
Thread.sleep(100);
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list(), 0, 0);
assertEquals(1, filecounts);
}
示例11: basicTest2
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
@Test
public void basicTest2() throws InterruptedException
{
Keyspace keyspace = Keyspace.open(KEYSPACE);
ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF);
cfs.truncateBlocking();
SSTableReader s = writeFile(cfs, 1000);
cfs.addSSTable(s);
Set<SSTableReader> sstables = new HashSet<>(cfs.getSSTables());
assertEquals(1, sstables.size());
SSTableRewriter.overrideOpenInterval(10000000);
SSTableRewriter writer = new SSTableRewriter(cfs, sstables, 1000, false);
try (AbstractCompactionStrategy.ScannerList scanners = cfs.getCompactionStrategy().getScanners(sstables);)
{
ISSTableScanner scanner = scanners.scanners.get(0);
CompactionController controller = new CompactionController(cfs, sstables, cfs.gcBefore(System.currentTimeMillis()));
writer.switchWriter(getWriter(cfs, sstables.iterator().next().descriptor.directory));
while (scanner.hasNext())
{
AbstractCompactedRow row = new LazilyCompactedRow(controller, Arrays.asList(scanner.next()));
writer.append(row);
}
}
Collection<SSTableReader> newsstables = writer.finish();
cfs.getDataTracker().markCompactedSSTablesReplaced(sstables, newsstables, OperationType.COMPACTION);
Thread.sleep(100);
validateCFS(cfs);
int filecounts = assertFileCounts(sstables.iterator().next().descriptor.directory.list(), 0, 0);
assertEquals(1, filecounts);
}
示例12: rowHash
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
private MerkleTree.RowHash rowHash(AbstractCompactedRow row)
{
validated++;
// MerkleTree uses XOR internally, so we want lots of output bits here
CountingDigest digest = new CountingDigest(FBUtilities.newMessageDigest("SHA-256"));
row.update(digest);
return new MerkleTree.RowHash(row.key.token, digest.digest(), digest.count);
}
示例13: rowHash
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
private MerkleTree.RowHash rowHash(AbstractCompactedRow row)
{
validated++;
// MerkleTree uses XOR internally, so we want lots of output bits here
CountingDigest digest = new CountingDigest(FBUtilities.newMessageDigest("SHA-256"));
row.update(digest);
return new MerkleTree.RowHash(row.key.getToken(), digest.digest(), digest.count);
}
示例14: tryAppend
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
public RowIndexEntry tryAppend(AbstractCompactedRow row)
{
mark();
try
{
return append(row);
}
catch (Throwable t)
{
resetAndTruncate();
throw t;
}
}
示例15: rowHash
import org.apache.cassandra.db.compaction.AbstractCompactedRow; //导入依赖的package包/类
private MerkleTree.RowHash rowHash(AbstractCompactedRow row)
{
validated++;
// MerkleTree uses XOR internally, so we want lots of output bits here
MessageDigest digest = FBUtilities.newMessageDigest("SHA-256");
row.update(digest);
return new MerkleTree.RowHash(row.key.token, digest.digest());
}