本文整理汇总了Java中org.apache.cassandra.service.ActiveRepairService类的典型用法代码示例。如果您正苦于以下问题:Java ActiveRepairService类的具体用法?Java ActiveRepairService怎么用?Java ActiveRepairService使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ActiveRepairService类属于org.apache.cassandra.service包,在下文中一共展示了ActiveRepairService类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: processOldApiNotification
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
/**
* Handles notifications from the old repair API (forceRepairAsync)
*/
private void processOldApiNotification(Notification notification) {
try {
int[] data = (int[]) notification.getUserData();
// get the repair sequence number
int repairNo = data[0];
// get the repair status
ActiveRepairService.Status status = ActiveRepairService.Status.values()[data[1]];
// this is some text message like "Starting repair...", "Finished repair...", etc.
String message = notification.getMessage();
// let the handler process the even
repairStatusHandler.get().handle(repairNo, Optional.of(status), Optional.absent(), message);
} catch (RuntimeException e) {
LOG.error("Error while processing JMX notification", e);
}
}
示例2: initiateStreaming
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
private void initiateStreaming()
{
long repairedAt = ActiveRepairService.UNREPAIRED_SSTABLE;
InetAddress dest = request.dst;
InetAddress preferred = SystemKeyspace.getPreferredIP(dest);
if (desc.parentSessionId != null && ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId) != null)
repairedAt = ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId).repairedAt;
logger.info(String.format("[streaming task #%s] Performing streaming repair of %d ranges with %s", desc.sessionId, request.ranges.size(), request.dst));
StreamResultFuture op = new StreamPlan("Repair", repairedAt, 1)
.flushBeforeTransfer(true)
// request ranges from the remote node
.requestRanges(dest, preferred, desc.keyspace, request.ranges, desc.columnFamily)
// send ranges to the remote node
.transferRanges(dest, preferred, desc.keyspace, request.ranges, desc.columnFamily)
.execute();
op.addEventListener(this);
}
示例3: addTransferRanges
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
/**
* Set up transfer for specific keyspace/ranges/CFs
*
* Used in repair - a streamed sstable in repair will be marked with the given repairedAt time
*
* @param keyspace Transfer keyspace
* @param ranges Transfer ranges
* @param columnFamilies Transfer ColumnFamilies
* @param flushTables flush tables?
* @param repairedAt the time the repair started.
*/
public void addTransferRanges(String keyspace, Collection<Range<Token>> ranges, Collection<String> columnFamilies, boolean flushTables, long repairedAt)
{
Collection<ColumnFamilyStore> stores = getColumnFamilyStores(keyspace, columnFamilies);
if (flushTables)
flushSSTables(stores);
List<Range<Token>> normalizedRanges = Range.normalize(ranges);
List<SSTableStreamingSections> sections = getSSTableSectionsForRanges(normalizedRanges, stores, repairedAt, repairedAt != ActiveRepairService.UNREPAIRED_SSTABLE);
try
{
addTransferFiles(sections);
}
finally
{
for (SSTableStreamingSections release : sections)
release.ref.release();
}
}
示例4: defaultStatsMetadata
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
public static StatsMetadata defaultStatsMetadata()
{
return new StatsMetadata(defaultRowSizeHistogram(),
defaultColumnCountHistogram(),
ReplayPosition.NONE,
Long.MIN_VALUE,
Long.MAX_VALUE,
Integer.MAX_VALUE,
NO_COMPRESSION_RATIO,
defaultTombstoneDropTimeHistogram(),
0,
Collections.<ByteBuffer>emptyList(),
Collections.<ByteBuffer>emptyList(),
true,
ActiveRepairService.UNREPAIRED_SSTABLE);
}
示例5: testAsciiKeyValidator
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
@Test
public void testAsciiKeyValidator() throws IOException, ParseException
{
File tempSS = tempSSTableFile("Keyspace1", "AsciiKeys");
ColumnFamily cfamily = ArrayBackedSortedColumns.factory.create("Keyspace1", "AsciiKeys");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, ActiveRepairService.UNREPAIRED_SSTABLE);
// Add a row
cfamily.addColumn(column("column", "value", 1L));
writer.append(Util.dk("key", AsciiType.instance), cfamily);
SSTableReader reader = writer.closeAndOpenReader();
// Export to JSON and verify
File tempJson = File.createTempFile("CFWithAsciiKeys", ".json");
SSTableExport.export(reader,
new PrintStream(tempJson.getPath()),
new String[0],
CFMetaData.sparseCFMetaData("Keyspace1", "AsciiKeys", BytesType.instance));
JSONArray json = (JSONArray)JSONValue.parseWithException(new FileReader(tempJson));
assertEquals(1, json.size());
JSONObject row = (JSONObject)json.get(0);
// check row key
assertEquals("key", row.get("key"));
}
示例6: doVerb
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
public void doVerb(MessageIn<RepairMessage> message, int id)
{
// TODO add cancel/interrupt message
RepairJobDesc desc = message.payload.desc;
switch (message.payload.messageType)
{
case VALIDATION_REQUEST:
ValidationRequest validationRequest = (ValidationRequest) message.payload;
// trigger read-only compaction
ColumnFamilyStore store = Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily);
Validator validator = new Validator(desc, message.from, validationRequest.gcBefore);
CompactionManager.instance.submitValidation(store, validator);
break;
case SYNC_REQUEST:
// forwarded sync request
SyncRequest request = (SyncRequest) message.payload;
StreamingRepairTask task = new StreamingRepairTask(desc, request);
task.run();
break;
default:
ActiveRepairService.instance.handleMessage(message.from, message.payload);
break;
}
}
示例7: startSync
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
/**
* Starts sending/receiving our list of differences to/from the remote endpoint: creates a callback
* that will be called out of band once the streams complete.
*/
protected void startSync(List<Range<Token>> differences)
{
InetAddress local = FBUtilities.getBroadcastAddress();
// We can take anyone of the node as source or destination, however if one is localhost, we put at source to avoid a forwarding
InetAddress dst = r2.endpoint.equals(local) ? r1.endpoint : r2.endpoint;
InetAddress preferred = SystemKeyspace.getPreferredIP(dst);
String message = String.format("Performing streaming repair of %d ranges with %s", differences.size(), dst);
logger.info("[repair #{}] {}", desc.sessionId, message);
boolean isIncremental = false;
if (desc.parentSessionId != null)
{
ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId);
isIncremental = prs.isIncremental;
}
Tracing.traceRepair(message);
new StreamPlan("Repair", repairedAt, 1, false, isIncremental).listeners(this)
.flushBeforeTransfer(true)
// request ranges from the remote node
.requestRanges(dst, preferred, desc.keyspace, differences, desc.columnFamily)
// send ranges to the remote node
.transferRanges(dst, preferred, desc.keyspace, differences, desc.columnFamily)
.execute();
}
示例8: run
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
public void run()
{
InetAddress dest = request.dst;
InetAddress preferred = SystemKeyspace.getPreferredIP(dest);
logger.info(String.format("[streaming task #%s] Performing streaming repair of %d ranges with %s", desc.sessionId, request.ranges.size(), request.dst));
boolean isIncremental = false;
if (desc.parentSessionId != null)
{
ActiveRepairService.ParentRepairSession prs = ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId);
isIncremental = prs.isIncremental;
}
new StreamPlan("Repair", repairedAt, 1, false, isIncremental).listeners(this)
.flushBeforeTransfer(true)
// request ranges from the remote node
.requestRanges(dest, preferred, desc.keyspace, request.ranges, desc.columnFamily)
// send ranges to the remote node
.transferRanges(dest, preferred, desc.keyspace, request.ranges, desc.columnFamily)
.execute();
}
示例9: defaultStatsMetadata
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
public static StatsMetadata defaultStatsMetadata()
{
return new StatsMetadata(defaultPartitionSizeHistogram(),
defaultCellPerPartitionCountHistogram(),
IntervalSet.empty(),
Long.MIN_VALUE,
Long.MAX_VALUE,
Integer.MAX_VALUE,
Integer.MAX_VALUE,
0,
Integer.MAX_VALUE,
NO_COMPRESSION_RATIO,
defaultTombstoneDropTimeHistogram(),
0,
Collections.<ByteBuffer>emptyList(),
Collections.<ByteBuffer>emptyList(),
true,
ActiveRepairService.UNREPAIRED_SSTABLE,
-1,
-1);
}
示例10: testNoDifference
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
/**
* When there is no difference between two, LocalSyncTask should return stats with 0 difference.
*/
@Test
public void testNoDifference() throws Throwable
{
final InetAddress ep1 = InetAddress.getByName("127.0.0.1");
final InetAddress ep2 = InetAddress.getByName("127.0.0.1");
Range<Token> range = new Range<>(partirioner.getMinimumToken(), partirioner.getRandomToken());
RepairJobDesc desc = new RepairJobDesc(UUID.randomUUID(), UUID.randomUUID(), KEYSPACE1, "Standard1", Arrays.asList(range));
MerkleTrees tree1 = createInitialTree(desc);
MerkleTrees tree2 = createInitialTree(desc);
// difference the trees
// note: we reuse the same endpoint which is bogus in theory but fine here
TreeResponse r1 = new TreeResponse(ep1, tree1);
TreeResponse r2 = new TreeResponse(ep2, tree2);
LocalSyncTask task = new LocalSyncTask(desc, r1, r2, ActiveRepairService.UNREPAIRED_SSTABLE);
task.run();
assertEquals(0, task.get().numberOfDifferences);
}
示例11: write
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
public Collection<SSTableReader> write(int expectedSize, Appender appender) throws IOException
{
File datafile = (dest == null) ? tempSSTableFile(ksname, cfname, generation) : new File(dest.filenameFor(Component.DATA));
CFMetaData cfm = Schema.instance.getCFMetaData(ksname, cfname);
ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(cfm.cfId);
SerializationHeader header = appender.header();
SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, datafile.getAbsolutePath(), expectedSize, ActiveRepairService.UNREPAIRED_SSTABLE, 0, header);
while (appender.append(writer)) { /* pass */ }
Collection<SSTableReader> readers = writer.finish(true);
// mark all components for removal
if (cleanup)
for (SSTableReader reader: readers)
for (Component component : reader.components)
new File(reader.descriptor.filenameFor(component)).deleteOnExit();
return readers;
}
示例12: initiateStreaming
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
private void initiateStreaming()
{
long repairedAt = ActiveRepairService.UNREPAIRED_SSTABLE;
if (desc.parentSessionId != null && ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId) != null)
repairedAt = ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId).repairedAt;
logger.info(String.format("[streaming task #%s] Performing streaming repair of %d ranges with %s", desc.sessionId, request.ranges.size(), request.dst));
StreamResultFuture op = new StreamPlan("Repair", repairedAt, 1)
.flushBeforeTransfer(true)
// request ranges from the remote node
.requestRanges(request.dst, desc.keyspace, request.ranges, desc.columnFamily)
// send ranges to the remote node
.transferRanges(request.dst, desc.keyspace, request.ranges, desc.columnFamily)
.execute();
op.addEventListener(this);
}
示例13: testAsciiKeyValidator
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
@Test
public void testAsciiKeyValidator() throws IOException, ParseException
{
File tempSS = tempSSTableFile(KEYSPACE1, "AsciiKeys");
ColumnFamily cfamily = ArrayBackedSortedColumns.factory.create(KEYSPACE1, "AsciiKeys");
SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, ActiveRepairService.UNREPAIRED_SSTABLE);
// Add a row
cfamily.addColumn(column("column", "value", 1L));
writer.append(Util.dk("key", AsciiType.instance), cfamily);
SSTableReader reader = writer.closeAndOpenReader();
// Export to JSON and verify
File tempJson = File.createTempFile("CFWithAsciiKeys", ".json");
SSTableExport.export(reader,
new PrintStream(tempJson.getPath()),
new String[0],
CFMetaData.sparseCFMetaData(KEYSPACE1, "AsciiKeys", BytesType.instance));
JSONArray json = (JSONArray)JSONValue.parseWithException(new FileReader(tempJson));
assertEquals(1, json.size());
JSONObject row = (JSONObject)json.get(0);
// check row key
assertEquals("key", row.get("key"));
}
示例14: initiateStreaming
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
private void initiateStreaming()
{
long repairedAt = ActiveRepairService.UNREPAIRED_SSTABLE;
if (desc.parentSessionId != null && ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId) != null)
repairedAt = ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId).repairedAt;
logger.info(String.format("[streaming task #%s] Performing streaming repair of %d ranges with %s", desc.sessionId, request.ranges.size(), request.dst));
StreamResultFuture op = new StreamPlan("Repair", repairedAt)
.flushBeforeTransfer(true)
// request ranges from the remote node
.requestRanges(request.dst, desc.keyspace, request.ranges, desc.columnFamily)
// send ranges to the remote node
.transferRanges(request.dst, desc.keyspace, request.ranges, desc.columnFamily)
.execute();
op.addEventListener(this);
}
示例15: addTransferFiles
import org.apache.cassandra.service.ActiveRepairService; //导入依赖的package包/类
/**
* Set up transfer of the specific SSTables.
* {@code sstables} must be marked as referenced so that not get deleted until transfer completes.
*
* @param ranges Transfer ranges
* @param sstables Transfer files
* @param overriddenRepairedAt use this repairedAt time, for use in repair.
*/
public void addTransferFiles(Collection<Range<Token>> ranges, Collection<SSTableReader> sstables, long overriddenRepairedAt)
{
List<SSTableStreamingSections> sstableDetails = new ArrayList<>(sstables.size());
for (SSTableReader sstable : sstables)
{
long repairedAt = overriddenRepairedAt;
if (overriddenRepairedAt == ActiveRepairService.UNREPAIRED_SSTABLE)
repairedAt = sstable.getSSTableMetadata().repairedAt;
sstableDetails.add(new SSTableStreamingSections(sstable,
sstable.getPositionsForRanges(ranges),
sstable.estimatedKeysForRanges(ranges),
repairedAt));
}
addTransferFiles(sstableDetails);
}