本文整理汇总了Java中org.apache.cassandra.dht.Range.normalize方法的典型用法代码示例。如果您正苦于以下问题:Java Range.normalize方法的具体用法?Java Range.normalize怎么用?Java Range.normalize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.dht.Range
的用法示例。
在下文中一共展示了Range.normalize方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addTransferRanges
import org.apache.cassandra.dht.Range; //导入方法依赖的package包/类
/**
* Set up transfer for specific keyspace/ranges/CFs
*
* Used in repair - a streamed sstable in repair will be marked with the given repairedAt time
*
* @param keyspace Transfer keyspace
* @param ranges Transfer ranges
* @param columnFamilies Transfer ColumnFamilies
* @param flushTables flush tables?
* @param repairedAt the time the repair started.
*/
public void addTransferRanges(String keyspace, Collection<Range<Token>> ranges, Collection<String> columnFamilies, boolean flushTables, long repairedAt)
{
Collection<ColumnFamilyStore> stores = getColumnFamilyStores(keyspace, columnFamilies);
if (flushTables)
flushSSTables(stores);
List<Range<Token>> normalizedRanges = Range.normalize(ranges);
List<SSTableStreamingSections> sections = getSSTableSectionsForRanges(normalizedRanges, stores, repairedAt, repairedAt != ActiveRepairService.UNREPAIRED_SSTABLE);
try
{
addTransferFiles(sections);
}
finally
{
for (SSTableStreamingSections release : sections)
release.ref.release();
}
}
示例2: addTransferRanges
import org.apache.cassandra.dht.Range; //导入方法依赖的package包/类
/**
* Set up transfer for specific keyspace/ranges/CFs
*
* Used in repair - a streamed sstable in repair will be marked with the given repairedAt time
*
* @param keyspace Transfer keyspace
* @param ranges Transfer ranges
* @param columnFamilies Transfer ColumnFamilies
* @param flushTables flush tables?
* @param repairedAt the time the repair started.
*/
public synchronized void addTransferRanges(String keyspace, Collection<Range<Token>> ranges, Collection<String> columnFamilies, boolean flushTables, long repairedAt)
{
failIfFinished();
Collection<ColumnFamilyStore> stores = getColumnFamilyStores(keyspace, columnFamilies);
if (flushTables)
flushSSTables(stores);
List<Range<Token>> normalizedRanges = Range.normalize(ranges);
List<SSTableStreamingSections> sections = getSSTableSectionsForRanges(normalizedRanges, stores, repairedAt, isIncremental);
try
{
addTransferFiles(sections);
}
finally
{
for (SSTableStreamingSections release : sections)
release.ref.release();
}
}
示例3: makeBounds
import org.apache.cassandra.dht.Range; //导入方法依赖的package包/类
private static List<AbstractBounds<PartitionPosition>> makeBounds(SSTableReader sstable, Collection<Range<Token>> tokenRanges)
{
List<AbstractBounds<PartitionPosition>> boundsList = new ArrayList<>(tokenRanges.size());
for (Range<Token> range : Range.normalize(tokenRanges))
addRange(sstable, Range.makeRowRange(range), boundsList);
return boundsList;
}
示例4: addTransferRanges
import org.apache.cassandra.dht.Range; //导入方法依赖的package包/类
/**
* Set up transfer for specific keyspace/ranges/CFs
*
* @param keyspace Transfer keyspace
* @param ranges Transfer ranges
* @param columnFamilies Transfer ColumnFamilies
*/
public void addTransferRanges(String keyspace, Collection<Range<Token>> ranges, Collection<String> columnFamilies, boolean flushTables)
{
Collection<ColumnFamilyStore> stores = new HashSet<>();
// if columnfamilies are not specified, we add all cf under the keyspace
if (columnFamilies.isEmpty())
{
stores.addAll(Keyspace.open(keyspace).getColumnFamilyStores());
}
else
{
for (String cf : columnFamilies)
stores.add(Keyspace.open(keyspace).getColumnFamilyStore(cf));
}
if (flushTables)
flushSSTables(stores);
List<Range<Token>> normalizedRanges = Range.normalize(ranges);
List<SSTableReader> sstables = Lists.newLinkedList();
for (ColumnFamilyStore cfStore : stores)
{
List<AbstractBounds<RowPosition>> rowBoundsList = Lists.newLinkedList();
for (Range<Token> range : normalizedRanges)
rowBoundsList.add(range.toRowBounds());
ColumnFamilyStore.ViewFragment view = cfStore.markReferenced(rowBoundsList);
sstables.addAll(view.sstables);
}
addTransferFiles(normalizedRanges, sstables);
}
示例5: recordSizeEstimates
import org.apache.cassandra.dht.Range; //导入方法依赖的package包/类
@SuppressWarnings("resource")
private void recordSizeEstimates(ColumnFamilyStore table, Collection<Range<Token>> localRanges)
{
List<Range<Token>> unwrappedRanges = Range.normalize(localRanges);
// for each local primary range, estimate (crudely) mean partition size and partitions count.
Map<Range<Token>, Pair<Long, Long>> estimates = new HashMap<>(localRanges.size());
for (Range<Token> range : unwrappedRanges)
{
// filter sstables that have partitions in this range.
Refs<SSTableReader> refs = null;
long partitionsCount, meanPartitionSize;
try
{
while (refs == null)
{
Iterable<SSTableReader> sstables = table.getTracker().getView().select(SSTableSet.CANONICAL);
SSTableIntervalTree tree = SSTableIntervalTree.build(sstables);
Range<PartitionPosition> r = Range.makeRowRange(range);
Iterable<SSTableReader> canonicalSSTables = View.sstablesInBounds(r.left, r.right, tree);
refs = Refs.tryRef(canonicalSSTables);
}
// calculate the estimates.
partitionsCount = estimatePartitionsCount(refs, range);
meanPartitionSize = estimateMeanPartitionSize(refs);
}
finally
{
if (refs != null)
refs.release();
}
estimates.put(range, Pair.create(partitionsCount, meanPartitionSize));
}
// atomically update the estimates.
SystemKeyspace.updateSizeEstimates(table.metadata.ksName, table.metadata.cfName, estimates);
}
示例6: performAnticompaction
import org.apache.cassandra.dht.Range; //导入方法依赖的package包/类
/**
* Make sure the {validatedForRepair} are marked for compaction before calling this.
*
* Caller must reference the validatedForRepair sstables (via ParentRepairSession.getAndReferenceSSTables(..)).
*
* @param cfs
* @param ranges Ranges that the repair was carried out on
* @param validatedForRepair SSTables containing the repaired ranges. Should be referenced before passing them.
* @throws InterruptedException, ExecutionException, IOException
*/
public void performAnticompaction(ColumnFamilyStore cfs,
Collection<Range<Token>> ranges,
Refs<SSTableReader> validatedForRepair,
long repairedAt) throws InterruptedException, ExecutionException, IOException
{
logger.info("Starting anticompaction for {}.{} on {}/{} sstables", cfs.keyspace.getName(), cfs.getColumnFamilyName(), validatedForRepair.size(), cfs.getSSTables().size());
logger.debug("Starting anticompaction for ranges {}", ranges);
Set<SSTableReader> sstables = new HashSet<>(validatedForRepair);
Set<SSTableReader> mutatedRepairStatuses = new HashSet<>();
Set<SSTableReader> nonAnticompacting = new HashSet<>();
Iterator<SSTableReader> sstableIterator = sstables.iterator();
try
{
while (sstableIterator.hasNext())
{
SSTableReader sstable = sstableIterator.next();
for (Range<Token> r : Range.normalize(ranges))
{
Range<Token> sstableRange = new Range<>(sstable.first.getToken(), sstable.last.getToken(), sstable.partitioner);
if (r.contains(sstableRange))
{
logger.info("SSTable {} fully contained in range {}, mutating repairedAt instead of anticompacting", sstable, r);
sstable.descriptor.getMetadataSerializer().mutateRepairedAt(sstable.descriptor, repairedAt);
sstable.reloadSSTableMetadata();
mutatedRepairStatuses.add(sstable);
sstableIterator.remove();
break;
}
else if (!sstableRange.intersects(r))
{
logger.info("SSTable {} ({}) does not intersect repaired range {}, not touching repairedAt.", sstable, sstableRange, r);
nonAnticompacting.add(sstable);
sstableIterator.remove();
break;
}
else
{
logger.info("SSTable {} ({}) will be anticompacted on range {}", sstable, sstableRange, r);
}
}
}
cfs.getDataTracker().notifySSTableRepairedStatusChanged(mutatedRepairStatuses);
cfs.getDataTracker().unmarkCompacting(Sets.union(nonAnticompacting, mutatedRepairStatuses));
validatedForRepair.release(Sets.union(nonAnticompacting, mutatedRepairStatuses));
if (!sstables.isEmpty())
doAntiCompaction(cfs, ranges, sstables, repairedAt);
}
finally
{
validatedForRepair.release();
cfs.getDataTracker().unmarkCompacting(sstables);
}
logger.info(String.format("Completed anticompaction successfully"));
}