本文整理汇总了Java中com.netflix.astyanax.MutationBatch.mergeShallow方法的典型用法代码示例。如果您正苦于以下问题:Java MutationBatch.mergeShallow方法的具体用法?Java MutationBatch.mergeShallow怎么用?Java MutationBatch.mergeShallow使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.netflix.astyanax.MutationBatch
的用法示例。
在下文中一共展示了MutationBatch.mergeShallow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doWriteContent
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
protected void doWriteContent(Content content, @Nullable Content previous) {
try {
long id = content.getId().longValue();
MutationBatch batch = keyspace.prepareMutationBatch();
batch.setConsistencyLevel(writeConsistency);
marshaller.marshallInto(
content.getId(),
batch.withRow(mainCf, id),
content,
java.util.Optional.ofNullable(previous),
true
);
batch.mergeShallow(aliasIndex.mutateAliases(content, previous));
batch.execute();
log.trace("Written content id " + id);
} catch (Exception e) {
throw new CassandraPersistenceException(content.toString(), e);
}
}
示例2: doWrite
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
protected void doWrite(Topic topic, @Nullable Topic previous) {
//Allow migration of owl content that has changed source
//checkArgument(previous == null || topic.getSource().equals(previous.getSource()));
try {
long id = topic.getId().longValue();
MutationBatch batch = keyspace.prepareMutationBatch();
batch.setConsistencyLevel(writeConsistency);
batch.withRow(mainCf, id)
.putColumn(valueColumn, topicSerializer.serialize(topic));
batch.mergeShallow(aliasIndex.mutateAliases(topic, previous));
batch.execute();
} catch (Exception e) {
throw new CassandraPersistenceException(topic.toString(), e);
}
}
示例3: removeEdgeTypeFromSource
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public MutationBatch removeEdgeTypeFromSource( final ApplicationScope scope, final Id sourceNode, final String type,
final long timestamp ) {
final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();
if ( migration.needsMigration() ) {
final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();
aggregateBatch.mergeShallow( migration.from.removeEdgeTypeFromSource( scope, sourceNode, type, timestamp ) );
aggregateBatch.mergeShallow( migration.to.removeEdgeTypeFromSource( scope, sourceNode, type, timestamp ) );
return aggregateBatch;
}
return migration.to.removeEdgeTypeFromSource( scope, sourceNode, type, timestamp );
}
示例4: removeIdTypeFromSource
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public MutationBatch removeIdTypeFromSource( final ApplicationScope scope, final Id sourceNode, final String type,
final String idType, final long timestamp ) {
final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();
if ( migration.needsMigration() ) {
final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();
aggregateBatch
.mergeShallow( migration.from.removeIdTypeFromSource( scope, sourceNode, type, idType, timestamp ) );
aggregateBatch.mergeShallow( migration.to.removeIdTypeFromSource( scope, sourceNode, type, idType, timestamp ) );
return aggregateBatch;
}
return migration.to.removeIdTypeFromSource( scope, sourceNode, type, idType, timestamp );
}
示例5: removeIdTypeToTarget
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public MutationBatch removeIdTypeToTarget( final ApplicationScope scope, final Id targetNode, final String type,
final String idType, final long timestamp ) {
final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();
if ( migration.needsMigration() ) {
final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();
aggregateBatch.mergeShallow( migration.from.removeIdTypeToTarget( scope, targetNode, type, idType, timestamp ) );
aggregateBatch.mergeShallow( migration.to.removeIdTypeToTarget( scope, targetNode, type, idType, timestamp ) );
return aggregateBatch;
}
return migration.to.removeIdTypeToTarget( scope, targetNode, type, idType, timestamp );
}
示例6: write
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public MutationBatch write( final ApplicationScope applicationScope, final MvccLogEntry entry ) {
final MigrationRelationship<MvccLogEntrySerializationStrategy> migration = getMigrationRelationShip();
if ( migration.needsMigration() ) {
final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();
aggregateBatch.mergeShallow( migration.from.write( applicationScope, entry ) );
aggregateBatch.mergeShallow( migration.to.write( applicationScope, entry ) );
return aggregateBatch;
}
return migration.to.write( applicationScope, entry );
}
示例7: delete
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public MutationBatch delete( final ApplicationScope applicationScope, final Id entityId, final UUID version ) {
final MigrationRelationship<MvccLogEntrySerializationStrategy> migration = getMigrationRelationShip();
if ( migration.needsMigration() ) {
final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();
aggregateBatch.mergeShallow( migration.from.delete( applicationScope, entityId, version ) );
aggregateBatch.mergeShallow( migration.to.delete( applicationScope, entityId, version ) );
return aggregateBatch;
}
return migration.to.delete( applicationScope, entityId, version );
}
示例8: write
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public MutationBatch write( final ApplicationScope context, final MvccEntity entity ) {
final MigrationRelationship<MvccEntitySerializationStrategy> migration = getMigrationRelationShip();
if ( migration.needsMigration() ) {
final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();
aggregateBatch.mergeShallow( migration.from.write( context, entity ) );
aggregateBatch.mergeShallow( migration.to.write( context, entity ) );
return aggregateBatch;
}
return migration.to.write( context, entity );
}
示例9: mark
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public MutationBatch mark( final ApplicationScope context, final Id entityId, final UUID version ) {
final MigrationRelationship<MvccEntitySerializationStrategy> migration = getMigrationRelationShip();
if ( migration.needsMigration() ) {
final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();
aggregateBatch.mergeShallow( migration.from.mark( context, entityId, version ) );
aggregateBatch.mergeShallow( migration.to.mark( context, entityId, version ) );
return aggregateBatch;
}
return migration.to.mark( context, entityId, version );
}
示例10: delete
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public MutationBatch delete( final ApplicationScope context, final Id entityId, final UUID version ) {
final MigrationRelationship<MvccEntitySerializationStrategy> migration = getMigrationRelationShip();
if ( migration.needsMigration() ) {
final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();
aggregateBatch.mergeShallow( migration.from.delete( context, entityId, version ) );
aggregateBatch.mergeShallow( migration.to.delete( context, entityId, version ) );
return aggregateBatch;
}
return migration.to.delete( context, entityId, version );
}
示例11: call
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public void call( final CollectionIoEvent<MvccEntity> idIoEvent ) {
final MvccEntity entity = idIoEvent.getEvent();
MvccValidationUtils.verifyMvccEntityOptionalEntity( entity );
final Id entityId = entity.getId();
final UUID version = entity.getVersion();
final ApplicationScope applicationScope = idIoEvent.getEntityCollection();
logger.debug("Inserting tombstone for entity {} at version {}", entityId, version );
final MvccLogEntry startEntry =
new MvccLogEntryImpl( entityId, version, Stage.COMMITTED, MvccLogEntry.State.DELETED );
final MutationBatch entityStateBatch = logStrat.write( applicationScope, startEntry );
//insert a "cleared" value into the versions. Post processing should actually delete
try {
final MutationBatch entityBatch = entityStrat.mark( applicationScope, entityId, version );
entityStateBatch.mergeShallow( entityBatch );
entityStateBatch.execute();
}
catch ( ConnectionException e ) {
throw new RuntimeException( "Unable to mark entry as deleted" );
}
// TODO: do we need this or can we rely on UniqueCleanup + Cassandra replication?
//
// // actorSystemFig may be null in testing
// if ( actorSystemFig != null && actorSystemFig.getEnabled() ) {
//
// String region = idIoEvent.getAuthoritativeRegion();
// if ( region == null ) {
// region = uniqueValuesFig.getAuthoritativeRegion();
// }
// if ( region == null ) {
// region = actorSystemFig.getRegionLocal();
// }
//
// try {
// uniqueValuesService.releaseUniqueValues( applicationScope, entityId, version, region );
// } catch (UniqueValueException e) {
// throw new RuntimeException( "Unable to release unique values for entity " + entityId );
// }
// }
}
示例12: confirmUniqueFields
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
private CollectionIoEvent<MvccEntity> confirmUniqueFields(CollectionIoEvent<MvccEntity> ioEvent) {
final MvccEntity mvccEntity = ioEvent.getEvent();
MvccValidationUtils.verifyMvccEntityWithEntity( mvccEntity );
final Id entityId = mvccEntity.getId();
final UUID version = mvccEntity.getVersion();
final ApplicationScope applicationScope = ioEvent.getEntityCollection();
//set the version into the entity
final Entity entity = mvccEntity.getEntity().get();
EntityUtils.setVersion( entity, version );
MvccValidationUtils.verifyMvccEntityWithEntity( ioEvent.getEvent() );
ValidationUtils.verifyTimeUuid( version ,"version" );
final MvccLogEntry startEntry =
new MvccLogEntryImpl( entityId, version, Stage.COMMITTED, MvccLogEntry.State.COMPLETE );
MutationBatch logMutation = logEntryStrat.write( applicationScope, startEntry );
// now get our actual insert into the entity data
MutationBatch entityMutation = entityStrat.write( applicationScope, mvccEntity );
// merge the 2 into 1 mutation
logMutation.mergeShallow( entityMutation );
// akkaFig may be null when this is called from JUnit tests
if ( actorSystemFig != null && actorSystemFig.getEnabled() && uniqueValuesFig.getUnqiueValueViaCluster() ) {
String authoritativeRegion = ioEvent.getAuthoritativeRegion();
if ( StringUtils.isEmpty(authoritativeRegion) ) {
authoritativeRegion = uniqueValuesFig.getAuthoritativeRegion();
}
if ( StringUtils.isEmpty(authoritativeRegion) ) {
authoritativeRegion = actorSystemFig.getRegionLocal();
}
confirmUniqueFieldsAkka( mvccEntity, version, applicationScope, authoritativeRegion );
} else {
confirmUniqueFields( mvccEntity, version, applicationScope, logMutation );
}
try {
logMutation.execute();
}
catch ( ConnectionException e ) {
logger.error( "Failed to execute write asynchronously ", e );
throw new WriteCommitException( mvccEntity, applicationScope,
"Failed to execute write asynchronously ", e );
}
return ioEvent;
}
示例13: createBatch
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
/**
* Create a mutation batch
*/
public MutationBatch createBatch( final ApplicationScope scope, final Collection<Shard> shards,
final UUID opTimestamp ) {
final MutationBatch batch =
keyspace.prepareMutationBatch().withConsistencyLevel( cassandraConfig.getWriteCL() )
.withTimestamp( opTimestamp.timestamp() );
final C column = getDirectedEdge();
final MultiTenantColumnFamily<ScopedRowKey<R>, C> columnFamily = getColumnFamily();
final boolean isDeleted = isDeleted();
for ( Shard shard : shards ) {
final R rowKey = getRowKey( shard );
writeEdge( batch, columnFamily, scope, rowKey, column, shard, isDeleted );
if(logger.isTraceEnabled() && getDirectedEdge() instanceof DirectedEdge){
DirectedEdge directedEdge = (DirectedEdge) getDirectedEdge();
if( shard != null && shard.getShardEnd().isPresent()
&& directedEdge.timestamp > shard.getShardEnd().get().timestamp){
logger.trace("Writing edge past shard end for edge: {}, shard: {}", directedEdge, shard );
}
}
// if an edge is being written to this shard, un-delete it in case it was previously marked
// don't un-delete if the edge write is to actually remove an edge
// Usergrid allows entities to be written with a UUID generated from the past (time)
if(shard.isDeleted() && !isDeleted) {
logger.info("Shard is deleted. Un-deleting as new data is being written to the shard - {}", shard);
shard.setDeleted(false);
batch.mergeShallow(edgeShardSerialization.writeShardMeta(scope, shard, getDirectedEdgeMeta()));
}
}
return batch;
}
示例14: removeEdgeTypeToTarget
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Override
public MutationBatch removeEdgeTypeToTarget( final ApplicationScope scope, final Edge edge ) {
final MigrationRelationship<EdgeMetadataSerialization> migration = getMigrationRelationShip();
if ( migration.needsMigration() ) {
final MutationBatch aggregateBatch = keyspace.prepareMutationBatch();
aggregateBatch.mergeShallow( migration.from.removeEdgeTypeToTarget( scope, edge ) );
aggregateBatch.mergeShallow( migration.to.removeEdgeTypeToTarget( scope, edge ) );
return aggregateBatch;
}
return migration.to.removeEdgeTypeToTarget( scope, edge );
}
示例15: testShardDelete
import com.netflix.astyanax.MutationBatch; //导入方法依赖的package包/类
@Test
public void testShardDelete() throws ConnectionException {
final Id now = IdGenerator.createId( "test" );
final long timestamp = 2000L;
final Shard shard1 = new Shard( 1000L, timestamp, false );
final Shard shard2 = new Shard( shard1.getShardIndex(), timestamp * 2, true );
final Shard shard3 = new Shard( shard2.getShardIndex() * 2, timestamp * 3, true );
final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNodeTargetType( now, "edgeType", "subType" );
MutationBatch batch = edgeShardSerialization.writeShardMeta( scope, shard1, sourceEdgeMeta );
batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard2, sourceEdgeMeta ) );
batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard3, sourceEdgeMeta ) );
batch.execute();
Iterator<Shard> results =
edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );
// Latest timestamp comes first
assertEquals( shard3, results.next() );
// This should now not remove anything
edgeShardSerialization.removeShardMeta( scope, shard3, sourceEdgeMeta ).execute();
// Get iterator again
results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta );
// We should still have shard2 stored
assertEquals( shard2, results.next() );
}