本文整理汇总了Java中com.datastax.driver.mapping.MappingManager.mapper方法的典型用法代码示例。如果您正苦于以下问题:Java MappingManager.mapper方法的具体用法?Java MappingManager.mapper怎么用?Java MappingManager.mapper使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.datastax.driver.mapping.MappingManager
的用法示例。
在下文中一共展示了MappingManager.mapper方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: MyInterceptor
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
MyInterceptor(Class<?> entityCls, boolean singleton) {
super();
this.entityCls = entityCls;
this.targetInitializer = () -> {
if (singleton)
log.info("Creating actual mapper for entity: " + entityCls.getName());
Session session;
if (MapperScannerConfigurer.this.session == null)
session = mainContext.getBean(Session.class);
else
session = MapperScannerConfigurer.this.session;
MappingManager mappingManager = new MappingManager(session);
return mappingManager.mapper(entityCls);
};
this.singleton = singleton;
if (singleton)
this.singletonTarget = new Lazy<>(targetInitializer);
else
this.singletonTarget = null;
}
示例2: init
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
public void init(Class<T> tClass) {
try {
Cluster.Builder builder = Cluster.builder();
final String[] nodesList = nodes.split(",");
for (String node : nodesList) {
builder.addContactPoint(node).withPort(Integer.parseInt(port));
LOGGER.info(String.format("Added cassandra node : %s", node + ":" + port));
}
cluster = builder.build();
session = null;
if (keyspace != null) {
session = cluster.connect(keyspace);
} else {
session = cluster.connect();
}
MappingManager mappingManager = new MappingManager(session);
mapper = mappingManager.mapper(tClass);
} catch (Exception e) {
LOGGER.error("Error initializing CassandraDao");
throw e;
}
}
示例3: NativeSerializer
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
NativeSerializer(CassandraClient cassandraClient, Class<K> keyClass, Class<T> persistentClass, CassandraMapping mapping) {
super(cassandraClient, keyClass, persistentClass, mapping);
try {
analyzePersistent();
} catch (Exception e) {
throw new RuntimeException("Error occurred while analyzing the persistent class, :" + e.getMessage());
}
this.createSchema();
MappingManager mappingManager = new MappingManager(cassandraClient.getSession());
mapper = mappingManager.mapper(persistentClass);
if (cassandraClient.getWriteConsistencyLevel() != null) {
mapper.setDefaultDeleteOptions(Mapper.Option.consistencyLevel(ConsistencyLevel.valueOf(cassandraClient.getWriteConsistencyLevel())));
mapper.setDefaultSaveOptions(Mapper.Option.consistencyLevel(ConsistencyLevel.valueOf(cassandraClient.getWriteConsistencyLevel())));
}
if (cassandraClient.getReadConsistencyLevel() != null) {
mapper.setDefaultGetOptions(Mapper.Option.consistencyLevel(ConsistencyLevel.valueOf(cassandraClient.getReadConsistencyLevel())));
}
}
示例4: ClusteredLoader
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
public ClusteredLoader(Mapper<Data> mapper, Class<Data> dataClass, Class<CKey> ckeyClass, String tableName) {
MappingManager manager = mapper.getManager();
session = manager.getSession();
this.mapper = manager.mapper(dataClass);
String keyspace = mapper.getTableMetadata().getKeyspace().getName();
MaterializedViewMetadata mv = mapper.getTableMetadata().getKeyspace().getMaterializedView(tableName);
AbstractTableMetadata tableMetadata = mv == null ? mapper.getTableMetadata().getKeyspace().getTable(tableName) : mv;
if (tableMetadata == null) {
throw new IllegalArgumentException("No table or materialized view " + keyspace + "." + tableName + "found");
}
List<ColumnMetadata> primaryKey = tableMetadata.getPrimaryKey();
String pkEq = exceptLast(primaryKey).stream()
.map(c -> c.getName() + "=?")
.collect(Collectors.joining(" and "));
List<ColumnMetadata> clusteringColumns = tableMetadata.getClusteringColumns();
String orderByDesc = orderBy(clusteringColumns, "DESC");
String orderByAsc = orderBy(clusteringColumns, "ASC");
String indexColumn = clusteringColumns.get(clusteringColumns.size() - 1).getName();
indexAccessor = CassandraUtil.findProperty(dataClass, ckeyClass, indexColumn);
selectUnbounded = prepare(String.format("select * from %s.%s where " + pkEq + " order by %s limit ?", keyspace, tableName, orderByDesc));
selectBefore = prepare(String.format("select * from %s.%s where "+pkEq+" and %s < ? order by %s limit ?", keyspace, tableName, indexColumn, orderByDesc));
selectAfter = prepare(String.format("select * from %s.%s where "+pkEq+" and %s > ? order by %s limit ?", keyspace, tableName, indexColumn, orderByDesc));
selectBeforeAfter = prepare(String.format("select * from %s.%s where "+pkEq+" and %s < ? and %s > ? order by %s limit ?", keyspace, tableName, indexColumn, indexColumn, orderByDesc));
selectUnboundedAsc = prepare(String.format("select * from %s.%s where "+pkEq+" order by %s limit ?", keyspace, tableName, orderByAsc));
selectBeforeAsc = prepare(String.format("select * from %s.%s where "+pkEq+" and %s < ? order by %s limit ?", keyspace, tableName, indexColumn, orderByAsc));
selectAfterAsc = prepare(String.format("select * from %s.%s where "+pkEq+" and %s > ? order by %s limit ?", keyspace, tableName, indexColumn, orderByAsc));
selectBeforeAfterAsc = prepare(String.format("select * from %s.%s where "+pkEq+" and %s < ? and %s > ? order by %s limit ?", keyspace, tableName, indexColumn, indexColumn, orderByAsc));
selectByIdKey = prepare(String.format("select * from %s.%s where "+pkEq+" and %s=?", keyspace, tableName, indexColumn));
deleteByIdKey = prepare(String.format("delete from %s.%s where "+pkEq+" and %s=?", keyspace, tableName, indexColumn));
selectAllById = prepare(String.format("select * from %s.%s where " + pkEq, keyspace, tableName));
deleteAllById = prepare(String.format("delete from %s.%s where "+pkEq, keyspace, tableName));
}
示例5: CassandraTokenStore
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
public CassandraTokenStore(Session session, Serializer serializer, TokenSchema schema) {
if (session == null) {
throw new IllegalArgumentException("Parameter 'session' cannot be null");
}
MappingManager mappingManager = new MappingManager(session);
this.tokenMapper = mappingManager.mapper(TokenEntry.class);
this.serializer = getOrDefault(serializer, XStreamSerializer::new);
this.schema = getOrDefault(schema, TokenSchema.builder().build());
}
示例6: start
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
@Override
public boolean start() throws IOException {
LOG.debug("Starting Cassandra reader");
cluster = getCluster(source.spec.hosts(), source.spec.port(), source.spec.username(),
source.spec.password(), source.spec.localDc(), source.spec.consistencyLevel());
session = cluster.connect();
LOG.debug("Query: " + source.splitQuery);
resultSet = session.execute(source.splitQuery);
final MappingManager mappingManager = new MappingManager(session);
Mapper mapper = mappingManager.mapper(source.spec.entity());
iterator = mapper.map(resultSet).iterator();
return advance();
}
示例7: setUp
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
@Setup
public void setUp() throws InterruptedException, TTransportException, ConfigurationException, IOException {
datastaxHelper = new DatastaxHelper();
MappingManager mappingManager = new MappingManager(datastaxHelper.session);
mapper4 = mappingManager.mapper(Object4Fields.class);
mapper16 = mappingManager.mapper(Object16Fields.class);
}
示例8: init
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
@PostConstruct
public void init() {
MappingManager manager = new MappingManager (session);
pictureAccesor = manager.createAccessor(PictureAccessor.class);
generalCounterAccessor = manager.createAccessor(GeneralCounterAccessor.class);
userCounterAccessor = manager.createAccessor(UserCounterAccessor.class);
mapper = manager.mapper(Picture.class);
findByIds = session.prepare("SELECT * from picture WHERE id IN ?;");
truncateStmt = session.prepare("TRUNCATE picture");
}
示例9: init
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
@PostConstruct
public void init() {
MappingManager manager = new MappingManager (session);
pictureSearchAccesor = manager.createAccessor(PictureSearchAccesor.class);
generalCounterAccessor = manager.createAccessor(GeneralCounterAccessor.class);
pictureSearchCounterAccessor = manager.createAccessor(PictureSearchCounterAccessor.class);
mapper = manager.mapper(PictureSearch.class);
findAllStmt = session.prepare("SELECT * FROM pictureSearch");
truncateStmt = session.prepare("TRUNCATE pictureSearch");
}
示例10: init
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
@PostConstruct
public void init() {
MappingManager manager = new MappingManager (session);
pictureFoundAccessor = manager.createAccessor(PictureFoundAccessor.class);
pictureSearchCounterAccessor = manager.createAccessor(PictureSearchCounterAccessor.class);
mapper = manager.mapper(PictureFound.class);
findAllStmt = session.prepare("SELECT * FROM pictureFound");
findAllByPictureStmt = session.prepare("SELECT * FROM pictureFound WHERE picture_id = ?");
truncateStmt = session.prepare("TRUNCATE pictureFound");
}
示例11: init
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
@PostConstruct
public void init() {
MappingManager manager = new MappingManager (session);
metadataAccessor = manager.createAccessor(MetadataAccessor.class);
generalCounterAccessor = manager.createAccessor(GeneralCounterAccessor.class);
mapper = manager.mapper(Metadata.class);
findAllStmt = session.prepare("SELECT * FROM metadata");
truncateStmt = session.prepare("TRUNCATE metadata");
}
示例12: CqlContentStore
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
protected CqlContentStore(Builder builder) {
this.idGenerator = checkNotNull(builder.idGenerator);
this.session = checkNotNull(builder.session);
this.clock = checkNotNull(builder.clock);
MappingManager mappingManager = new MappingManager(session);
// TODO: bug in driver 3.1.0 prompting this hackaround. Remove when it's fixed. MBST-16715
mappingManager.udtCodec(Description.class);
mappingManager.udtCodec(org.atlasapi.content.v2.model.udt.BroadcastRef.class);
mappingManager.udtCodec(org.atlasapi.content.v2.model.udt.LocationSummary.class);
mappingManager.udtCodec(org.atlasapi.content.v2.model.udt.LocationSummary.class);
mappingManager.udtCodec(org.atlasapi.content.v2.model.udt.ItemSummary.class);
this.mapper = mappingManager.mapper(org.atlasapi.content.v2.model.Content.class);
this.accessor = mappingManager.createAccessor(ContentAccessor.class);
mapper.setDefaultGetOptions(Mapper.Option.consistencyLevel(builder.readConsistency));
mapper.setDefaultSaveOptions(Mapper.Option.consistencyLevel(builder.writeConsistency));
mapper.setDefaultDeleteOptions(Mapper.Option.consistencyLevel(builder.writeConsistency));
this.sender = builder.sender;
this.hasher = checkNotNull(builder.hasher);
this.graphStore = checkNotNull(builder.graphStore);
writeContent = builder.metricPrefix + "writeContent.";
writeBroadcast = builder.metricPrefix + "writeBroadcast.";
this.metricRegistry = builder.metricRegistry;
}
示例13: CassandraReadOnlyEventStorageEngine
import com.datastax.driver.mapping.MappingManager; //导入方法依赖的package包/类
/**
* Initializes an EventStorageEngine with given {@code serializer}, {@code upcasterChain} and {@code
* persistenceExceptionResolver}.
*
* @param serializer Used to serialize and deserialize event payload and metadata. If {@code null}
* an {@link XStreamSerializer} is used.
* @param upcasterChain Allows older revisions of serialized objects to be deserialized. If {@code
* null} a {@link org.axonframework.serialization.upcasting.event.NoOpEventUpcaster} is used.
* @param persistenceExceptionResolver Detects concurrency exceptions from the backing database. If {@code null}
* persistence exceptions are not explicitly resolved.
* @param batchSize The number of events that should be read at each database access. When more
* than this number of events must be read to rebuild an aggregate's state, the
* events are read in batches of this size. If {@code null} a batch size of 100
* is used. Tip: if you use a snapshotter, make sure to choose snapshot trigger
* and batch size such that a single batch will generally retrieve all events
*/
public CassandraReadOnlyEventStorageEngine(Serializer serializer, EventUpcaster upcasterChain, PersistenceExceptionResolver persistenceExceptionResolver, Integer batchSize, Session session, EventSchema schema) {
super(serializer, upcasterChain, persistenceExceptionResolver, batchSize);
if (session == null) {
throw new IllegalArgumentException("Parameter 'session' cannot be null");
}
MappingManager mappingManager = new MappingManager(session);
this.session = session;
this.schema = getOrDefault(schema, () -> EventSchema.builder().build());
this.eventMapper = mappingManager.mapper(DomainEventEntry.class);
this.snapshotMapper = mappingManager.mapper(SnapshotEventEntry.class);
this.eventLogMapper = mappingManager.mapper(EventLogEntry.class);
}