本文整理汇总了Java中com.netflix.astyanax.MutationBatch.isEmpty方法的典型用法代码示例。如果您正苦于以下问题:Java MutationBatch.isEmpty方法的具体用法?Java MutationBatch.isEmpty怎么用?Java MutationBatch.isEmpty使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.netflix.astyanax.MutationBatch
的用法示例。
在下文中一共展示了MutationBatch.isEmpty方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: purge
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public void purge(AstyanaxStorage storage, Runnable progress) {
DeltaPlacement placement = (DeltaPlacement) storage.getPlacement();
CassandraKeyspace keyspace = placement.getKeyspace();
// Scan all the shards and delete all the rows we find.
MutationBatch mutation = keyspace.prepareMutationBatch(SorConsistencies.toAstyanax(WriteConsistency.STRONG));
Iterator<String> keyIter = _keyScanner.scanKeys(storage, ReadConsistency.STRONG);
while (keyIter.hasNext()) {
ByteBuffer rowKey = storage.getRowKey(keyIter.next());
mutation.withRow(placement.getDeltaColumnFamily(), rowKey).delete();
mutation.withRow(placement.getBlockedDeltaColumnFamily(), rowKey).delete();
mutation.withRow(placement.getAuditColumnFamily(), rowKey).delete();
if (mutation.getRowCount() >= 100) {
progress.run();
execute(mutation, "purge %d records from placement %s", mutation.getRowCount(), placement.getName());
mutation.discardMutations();
}
}
if (!mutation.isEmpty()) {
progress.run();
execute(mutation, "purge %d records from placement %s", mutation.getRowCount(), placement.getName());
}
}
示例2: doAnAction
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
public void doAnAction(JsonObject order) throws Exception {
if (order.has("activity")) {
if (order.get("activity").getAsString().equals("enable")) {
setEnabled(true);
Loggers.Manager.info("Enable job creator:\t" + toString());
} else if (order.get("activity").getAsString().equals("disable")) {
setEnabled(false);
Loggers.Manager.info("Disable job creator:\t" + toString());
} else if (order.get("activity").getAsString().equals("createjobs")) {
try {
MutationBatch mutator = CassandraDb.prepareMutationBatch();
createJobs(mutator);
if (mutator.isEmpty() == false) {
mutator.execute();
Loggers.Manager.info("Create jobs:\t" + toString());
}
} catch (ConnectionException e) {
manager.getServiceException().onCassandraError(e);
}
}
}
}
示例3: updateWorkerStatus
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
static void updateWorkerStatus(List<WorkerNG> workers, AppManager manager) {
if (workers == null) {
throw new NullPointerException("\"workers\" can't to be null");
}
long start_time = System.currentTimeMillis();
try {
MutationBatch mutator = CassandraDb.prepareMutationBatch();
WorkerExporter we;
for (int pos = 0; pos < workers.size(); pos++) {
we = workers.get(pos).getExporter();
we.update();
mutator.withRow(CF_WORKERS, we.reference_key).putColumn("source", MyDMAM.gson_kit.getGson().toJson(we), InstanceStatus.TTL);
Loggers.Manager.trace("Update worker status [" + we.reference_key + "], " + we.worker_class);
}
if (mutator.isEmpty() == false) {
mutator.execute();
}
} catch (ConnectionException e) {
manager.getServiceException().onCassandraError(e);
}
Loggers.Manager.debug("Update all workers status, took " + (System.currentTimeMillis() - start_time) + " ms");
}
示例4: purge
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public void purge(AstyanaxStorage storage, Runnable progress) {
BlobPlacement placement = (BlobPlacement) storage.getPlacement();
CassandraKeyspace keyspace = placement.getKeyspace();
ColumnFamily<ByteBuffer, ?> cf = placement.getBlobColumnFamily();
// Limit the query to a single column since we mainly just want the row keys (but not zero columns because
// then we couldn't distinguish a live row from a row that has been deleted already).
ByteBufferRange columnRange = new RangeBuilder().setLimit(1).build();
MutationBatch mutation = keyspace.prepareMutationBatch(CONSISTENCY_STRONG);
LimitCounter unlimited = LimitCounter.max();
// Range query all the shards and delete all the rows we find.
Iterator<ByteBufferRange> scanIter = storage.scanIterator(null);
while (scanIter.hasNext()) {
ByteBufferRange keyRange = scanIter.next();
Iterator<Row<ByteBuffer, Composite>> rowIter = scanInternal(placement, keyRange, columnRange, unlimited);
while (rowIter.hasNext()) {
Row<ByteBuffer, Composite> row = rowIter.next();
if (row.getColumns().isEmpty()) {
continue; // don't bother deleting range ghosts
}
mutation.withRow(cf, row.getKey()).delete();
if (mutation.getRowCount() >= 100) {
progress.run();
execute(mutation);
mutation.discardMutations();
}
}
}
if (!mutation.isEmpty()) {
progress.run();
execute(mutation);
}
}
示例5: putShardState
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public void putShardState(int shard, Map<Granularity, Map<Integer, UpdateStamp>> slotTimes) throws IOException {
AstyanaxIO astyanaxIO = AstyanaxIO.singleton();
Timer.Context ctx = Instrumentation.getWriteTimerContext(CassandraModel.CF_METRICS_STATE_NAME);
try {
MutationBatch mutationBatch = astyanaxIO.getKeyspace().prepareMutationBatch();
ColumnListMutation<SlotState> mutation = mutationBatch.withRow(CassandraModel.CF_METRICS_STATE, (long)shard);
for (Map.Entry<Granularity, Map<Integer, UpdateStamp>> granEntry : slotTimes.entrySet()) {
Granularity g = granEntry.getKey();
for (Map.Entry<Integer, UpdateStamp> entry : granEntry.getValue().entrySet()) {
// granularity,slot,state
SlotState slotState = new SlotState(g, entry.getKey(), entry.getValue().getState());
mutation.putColumn(slotState, entry.getValue().getTimestamp());
/*
Note: this method used to set the timestamp of the Cassandra column to entry.getValue().getTimestamp() * 1000, i.e. the collection time.
That implementation was changed because it could cause delayed metrics not to rollup.
Consider you are getting out of order metrics M1 and M2, with collection times T1 and T2 with T2>T1, belonging to same slot
Assume M2 arrives first. The slot gets marked active and rolled up and the state is set as Rolled. Now, assume M1 arrives. We update the slot state to active,
set the slot timestamp to T1, and while persisting we set it, we set the column timestamp to be T1*1000, but because the T1 < T2, Cassandra wasn't updating it.
*/
}
}
if (!mutationBatch.isEmpty())
try {
mutationBatch.execute();
} catch (ConnectionException e) {
Instrumentation.markWriteError(e);
LOG.error("Error persisting shard state", e);
throw new IOException(e);
}
} finally {
ctx.stop();
}
}
示例6: performInstanceActions
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
static boolean performInstanceActions(ArrayList<InstanceActionReceiver> all_receviers) throws Exception {
final List<InstanceAction> pending_actions = new ArrayList<InstanceAction>(1);
CassandraDb.allRowsReader(CF_ACTION, new AllRowsFoundRow() {
public void onFoundRow(Row<String, String> row) throws Exception {
pending_actions.add(MyDMAM.gson_kit.getGson().fromJson(row.getColumns().getColumnByName("source").getStringValue(), InstanceAction.class));
}
});
if (pending_actions.isEmpty()) {
return false;
}
MutationBatch mutator = CassandraDb.prepareMutationBatch();
for (int pos_pa = 0; pos_pa < pending_actions.size(); pos_pa++) {
InstanceAction current_instance_action = pending_actions.get(pos_pa);
String target_class_name = current_instance_action.target_class_name;
InstanceActionReceiver recevier = null;
for (int pos_rcv = 0; pos_rcv < all_receviers.size(); pos_rcv++) {
if (all_receviers.get(pos_rcv).getClassToCallback().getSimpleName().equalsIgnoreCase(target_class_name)) {
if (all_receviers.get(pos_rcv).getReferenceKey().equalsIgnoreCase(current_instance_action.target_reference_key)) {
recevier = all_receviers.get(pos_rcv);
break;
}
}
}
if (recevier == null) {
if (Loggers.Manager.isDebugEnabled()) {
Loggers.Manager.debug("An instance action exists in database but this Instance is not concerned " + current_instance_action);
}
continue;
}
Loggers.Manager.info("Do an Instance Action " + current_instance_action);
current_instance_action.delete(mutator);
try {
recevier.doAnAction(current_instance_action.order);
} catch (Exception e) {
Loggers.Manager.error("Problem with an Instance Action " + current_instance_action, e);
}
}
if (mutator.isEmpty() == false) {
mutator.execute();
}
return true;
}
示例7: run
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
public void run() {
while (isWantToRun()) {
try {
ArrayList<AbstractFoundedFile> active_list = WatchFolderDB.getAllInProcess();
if (active_list.isEmpty()) {
stoppableSleep(10000);
continue;
}
List<String> job_list = active_list.stream().filter(founded -> {
if (founded.map_job_target == null) {
return false;
}
if (founded.map_job_target.isEmpty()) {
return false;
}
return true;
}).flatMap(founded -> {
return founded.map_job_target.keySet().stream();
}).collect(Collectors.toList());
if (job_list.isEmpty()) {
stoppableSleep(10000);
continue;
}
LinkedHashMap<String, JobStatus> job_statuses = JobNG.Utility.getJobsStatusByKeys(job_list);
MutationBatch mutator = CassandraDb.prepareMutationBatch();
active_list.stream().filter(founded -> {
/**
* return true in case of errors or missing
*/
return founded.map_job_target.keySet().stream().anyMatch(job_key -> {
if (job_statuses.containsKey(job_key) == false) {
return true;
}
return job_statuses.get(job_key).isInThisStatus(JobStatus.ERROR, JobStatus.CANCELED, JobStatus.STOPPED, JobStatus.TOO_LONG_DURATION, JobStatus.TOO_OLD);
});
}).forEach(founded -> {
Loggers.Transcode_WatchFolder.warn("Detected a failed operation for founded file " + founded.toString() + "; switch it to error");
founded.status = Status.ERROR;
founded.saveToCassandra(mutator, true);
});
if (mutator.isEmpty() == false) {
mutator.execute();
}
} catch (ConnectionException e) {
Loggers.Transcode.error("Loose Cassandra connection", e);
}
stoppableSleep(10000);
}
}