本文整理汇总了Scala中com.datastax.driver.core.Session类的典型用法代码示例。如果您正苦于以下问题:Scala Session类的具体用法?Scala Session怎么用?Scala Session使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Session类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: CassandraSink
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.cassandra.scaladsl
import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import com.datastax.driver.core.{BoundStatement, PreparedStatement, Session}
import scala.concurrent.{ExecutionContext, Future}
import akka.stream.alpakka.cassandra.GuavaFutures._
object CassandraSink {
def apply[T](
parallelism: Int,
statement: PreparedStatement,
statementBinder: (T, PreparedStatement) => BoundStatement
)(implicit session: Session, ex: ExecutionContext): Sink[T, Future[Done]] =
Flow[T]
.mapAsyncUnordered(parallelism)(t ? session.executeAsync(statementBinder(t, statement)).asScala())
.toMat(Sink.ignore)(Keep.right)
}
示例2: Connector
//设置package包名称以及导入依赖的类
package connector
import com.datastax.driver.core.{Session, Cluster}
import com.websudos.phantom.connectors.{KeySpace, SessionProvider}
trait CassandraConnector extends SessionProvider {
implicit val space: KeySpace = Connector.keySpace
val cluster = Connector.cluster
override implicit lazy val session = Connector.session
}
object Connector {
val keySpace = KeySpace("MovieLens")
val cluster = Cluster.builder().addContactPoint("192.168.99.100").build()
val session = cluster.connect(keySpace.name)
}
示例3: CassandraSink
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.cassandra.javadsl
import java.util.concurrent.CompletionStage
import java.util.function.BiFunction
import akka.Done
import akka.stream.javadsl.Sink
import com.datastax.driver.core.{BoundStatement, PreparedStatement, Session}
import akka.stream.alpakka.cassandra.scaladsl.{CassandraSink => ScalaCSink}
import scala.compat.java8.FutureConverters._
import scala.concurrent.ExecutionContext
object CassandraSink {
def create[T](parallelism: Int,
statement: PreparedStatement,
statementBinder: BiFunction[T, PreparedStatement, BoundStatement],
session: Session,
executionContext: ExecutionContext): Sink[T, CompletionStage[Done]] = {
val sink =
ScalaCSink.apply[T](parallelism, statement, (t, p) => statementBinder.apply(t, p))(session, executionContext)
sink.mapMaterializedValue(_.toJava).asJava
}
}
示例4: beforeAll
//设置package包名称以及导入依赖的类
package com.github.jparkie.spark.cassandra
import java.net.{ InetAddress, InetSocketAddress }
import com.datastax.driver.core.Session
import com.datastax.spark.connector.cql.CassandraConnector
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.{ BeforeAndAfterAll, Suite }
trait CassandraServerSpecLike extends BeforeAndAfterAll { this: Suite =>
// Remove protected modifier because of SharedSparkContext.
override def beforeAll(): Unit = {
super.beforeAll()
EmbeddedCassandraServerHelper.startEmbeddedCassandra()
}
// Remove protected modifier because of SharedSparkContext.
override def afterAll(): Unit = {
EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
super.afterAll()
}
def getClusterName: String = {
EmbeddedCassandraServerHelper.getClusterName
}
def getHosts: Set[InetAddress] = {
val temporaryAddress =
new InetSocketAddress(EmbeddedCassandraServerHelper.getHost, EmbeddedCassandraServerHelper.getNativeTransportPort)
.getAddress
Set(temporaryAddress)
}
def getNativeTransportPort: Int = {
EmbeddedCassandraServerHelper.getNativeTransportPort
}
def getRpcPort: Int = {
EmbeddedCassandraServerHelper.getRpcPort
}
def getCassandraConnector: CassandraConnector = {
CassandraConnector(hosts = getHosts, port = getNativeTransportPort)
}
def createKeyspace(session: Session, keyspace: String): Unit = {
session.execute(
s"""CREATE KEYSPACE "$keyspace"
|WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
""".stripMargin
)
}
}
示例5: CassandraAsyncContextImpl
//设置package包名称以及导入依赖的类
package data
import javax.inject.{Inject, Singleton}
import com.datastax.driver.core.{Cluster, Session}
import com.typesafe.config.Config
import io.getquill.{CassandraAsyncContext, SnakeCase}
import play.api.inject.ApplicationLifecycle
import play.api.{Configuration, Environment, Logger}
import util.FutureUtils.toFutureUnit
import scala.concurrent.ExecutionContext
@Singleton
class CassandraAsyncContextImpl @Inject()(cassandra: CassandraCluster, conf: CassandraConfig)
extends CassandraAsyncContext[SnakeCase](cassandra.cluster, conf.keySpace, 100L)
@Singleton
class CassandraCluster @Inject()(conf: CassandraConfig, appLifecycle: ApplicationLifecycle)(implicit executor: ExecutionContext) {
import conf._
private[data] val cluster =
Cluster.builder()
.addContactPoints(hosts: _*)
.withPort(port)
.build()
private[data] def noKeySpaceSession: Session = cluster.connect()
private[data] def stop() = toFutureUnit(cluster.closeAsync())
Logger.info(s"Cassandra host to be used: '${hosts.mkString(",")}' with port:$port")
appLifecycle.addStopHook(() => stop())
}
@Singleton
class CassandraConfig @Inject()(configuration: Configuration, environment: Environment) {
val config: Config = configuration.underlying
val keySpace = config.getString("devgym.db.cassandra.keyspace")
val port = config.getInt("devgym.db.cassandra.port")
val hosts: Seq[String] =
configuration.getStringSeq("devgym.db.cassandra.hosts").get
}
示例6: CassandraSpec
//设置package包名称以及导入依赖的类
package hmda.query.cassandra
import com.datastax.driver.core.{ Cluster, Session }
import org.scalatest.{ BeforeAndAfterAll, MustMatchers, WordSpec }
import org.cassandraunit.CQLDataLoader
import org.cassandraunit.dataset.cql.ClassPathCQLDataSet
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
class CassandraSpec extends WordSpec with MustMatchers with BeforeAndAfterAll {
var cluster: Cluster = _
var session: Session = _
override def beforeAll(): Unit = {
EmbeddedCassandraServerHelper.startEmbeddedCassandra(60000L)
cluster = EmbeddedCassandraServerHelper.getCluster
session = cluster.connect()
loadData()
}
override def afterAll(): Unit = {
EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()
}
def loadData(): Unit = {
val dataLoader = new CQLDataLoader(session)
dataLoader.load(new ClassPathCQLDataSet("simple.cql", "hmda_query"))
}
"Cassandra" must {
"Select from table" in {
val resultSet = session.execute("select * from myTable where id = 'myKey01'")
resultSet.iterator().next().getString("value") mustBe "myValue01"
}
}
}
示例7: UserTypeCache
//设置package包名称以及导入依赖的类
package net.reactivecore.cca.utils
import com.datastax.driver.core.DataType.CollectionType
import com.datastax.driver.core.{ Session, UserType }
import net.reactivecore.cca.EncodingException
import scala.collection.mutable
import scala.collection.JavaConverters._
private[cca] class UserTypeCache(tableName: String) {
object lock
var cache: mutable.Map[String, UserType] = mutable.Map.empty
def getUserType(columnName: String, session: Session): UserType = {
val candidate = lock.synchronized(cache.get(columnName))
candidate match {
case Some(v) => v
case None =>
val userType = fetchUserType(columnName, session)
lock.synchronized {
cache.put(columnName, userType)
}
userType
}
}
private def fetchUserType(columnName: String, session: Session): UserType = {
val meta = session.getCluster.getMetadata
val column = (for {
keyspace <- Option(meta.getKeyspace(session.getLoggedKeyspace))
table <- Option(keyspace.getTable(tableName))
column <- Option(table.getColumn(columnName))
} yield column).getOrElse {
throw new EncodingException(s"Could not find type for column ${columnName} in ${tableName}")
}
column.getType match {
case user: UserType => user
case collectionType: CollectionType =>
val subType = collectionType.getTypeArguments.asScala
subType match {
case Seq(one: UserType) =>
one
case _ => throw new EncodingException(s"Not supported complex list types of more than one value")
}
case somethingElse => throw new EncodingException(s"Expected UserType for ${columnName} in ${tableName}, found ${somethingElse}")
}
}
}
示例8: environment
//设置package包名称以及导入依赖的类
package components
import cassandra.{CassandraConnector, CassandraConnectionUri}
import com.datastax.driver.core.Session
import models.ProductModel
import play.api.inject.ApplicationLifecycle
import play.api.{Configuration, Environment, Mode}
import repositories.{Repository, ProductsRepository}
import scala.concurrent.Future
trait CassandraRepositoryComponents {
// These will be filled by Play's built-in components; should be `def` to avoid initialization problems
def environment: Environment
def configuration: Configuration
def applicationLifecycle: ApplicationLifecycle
lazy private val cassandraSession: Session = {
val uriString = environment.mode match {
case Mode.Prod => "cassandra://localhost:9042/prod"
case _ => "cassandra://localhost:9042/test"
}
val session: Session = CassandraConnector.createSessionAndInitKeyspace(
CassandraConnectionUri(uriString)
)
// Shutdown the client when the app is stopped or reloaded
applicationLifecycle.addStopHook(() => Future.successful(session.close()))
session
}
lazy val productsRepository: Repository[ProductModel, Int] = {
new ProductsRepository(cassandraSession)
}
}
开发者ID:manuelkiessling,项目名称:play2-compiletime-cassandra-di,代码行数:35,代码来源:CassandraRepositoryComponents.scala
示例9: CassandraRepository
//设置package包名称以及导入依赖的类
package repositories
import com.datastax.driver.core.querybuilder.QueryBuilder
import com.datastax.driver.core.querybuilder.QueryBuilder._
import com.datastax.driver.core.{Row, Session}
abstract class CassandraRepository[M, I](session: Session, tablename: String, partitionKeyName: String)
extends Repository[M, I] {
def rowToModel(row: Row): M
def getOneRowBySinglePartitionKeyId(partitionKeyValue: I): Row = {
val selectStmt =
select()
.from(tablename)
.where(QueryBuilder.eq(partitionKeyName, partitionKeyValue))
.limit(1)
val resultSet = session.execute(selectStmt)
val row = resultSet.one()
row
}
override def getOneById(id: I): M = {
val row = getOneRowBySinglePartitionKeyId(id)
rowToModel(row)
}
}
示例10: Launcher
//设置package包名称以及导入依赖的类
package com.knoldus
import com.datastax.driver.core.{Cluster, ResultSet, Session}
import com.knoldus.utils.ConfigReader
import scala.collection.JavaConversions
import scala.util.{Failure, Success, Try}
object Launcher extends App {
Try {
CassandraConnector.getCasssandraBuilder
} match {
case Success(cluster) =>
val session = getCassandraSession(ConfigReader.getKeyspaceName, cluster)
QueryHandler.runQuery(session)
session.close()
cluster.close()
case Failure(exception) => println("Unable to Connect to Cassandra" + exception)
}
private def getCassandraSession(keyspace: String, cluster: Cluster): Session = {
Try{
CassandraConnector.getSession(keyspace, cluster)
} match {
case Success(session) => session
case Failure(exception) => throw new Exception("Unable to connect to keyspace" + exception)
}
}
}
示例11: apply
//设置package包名称以及导入依赖的类
package akka.persistence.cassandra
import akka.Done
import akka.actor.{ ActorSystem, ExtendedActorSystem }
import akka.event.Logging
import akka.persistence.cassandra.session.CassandraSessionSettings
import akka.persistence.cassandra.session.scaladsl.CassandraSession
import com.datastax.driver.core.Session
import scala.concurrent.{ ExecutionContext, Future }
def apply(system: ActorSystem, metricsCategory: String, init: Session => Future[Unit])(
implicit executionContext: ExecutionContext
): CassandraSession = {
val log = Logging(system, classOf[CassandraSession])
val provider = SessionProvider(
system.asInstanceOf[ExtendedActorSystem],
system.settings.config.getConfig("cassandra-journal")
)
val settings = CassandraSessionSettings(system.settings.config.getConfig("cassandra-journal"))
new CassandraSession(
system,
provider,
settings,
executionContext,
log,
metricsCategory,
init.andThen(_.map(_ => Done))
)
}
}
示例12: hashtagLoad
//设置package包名称以及导入依赖的类
package controllers
import javax.inject.Inject
import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.stream.scaladsl.Flow
import com.datastax.driver.core.Session
import controllers.stages.LoadStage
import play.api.libs.EventSource
import play.api.mvc.{Action, Controller}
import services.utils.CassandraHelper._
import services.utils.{CassandraHelper, CassandraUri}
import services.{Hashtag, MyTwitterListener, Tweet}
import scala.concurrent.ExecutionContext
def hashtagLoad = Action {
val source = twitterListener.hashtagSource
val loadFlow = LoadStage.getFlow(loadHashtags)
//Flow stage using take, as the limit function throws an exception when threshold is exceeded
val filter = Flow[Set[Hashtag]].take(MAX_TWEETS)
val hashtagSource = source.via(filter).via(loadFlow)
Ok.chunked(hashtagSource.map(set ? set.mkString) via EventSource.flow)
}
private def loadHashtags(s: Session, hashtags: Set[Hashtag]) = {
for (hashTag ? hashtags) {
s.execute(s"UPDATE $HASHTAG_TABLE SET hashtag_count = hashtag_count + 1 WHERE hashtag='" + hashTag.name.replace("'", "") + "';")
}
}
}
示例13: DatabaseUtil
//设置package包名称以及导入依赖的类
package com.yper.utils
import com.datastax.driver.core.{Cluster, Session}
object DatabaseUtil {
val keyspace = "yper"
def getSession: Session = DatabaseUtil.synchronized {
try {
val builder = Cluster
.builder()
.addContactPoint("localhost")
.withPort(9042)
.build()
builder.connect(keyspace)
} catch {
case e: Exception => {
println(e.getMessage)
throw e
}
}
}
}
示例14: CassandraFactory
//设置package包名称以及导入依赖的类
package com.bwsw.sj.common.utils
import java.net.InetSocketAddress
import com.bwsw.tstreams.common.{CassandraConnectorConf, CassandraHelper}
import com.bwsw.tstreams.data.cassandra.Factory
import com.bwsw.tstreams.metadata.MetadataStorageFactory
import com.datastax.driver.core.{Cluster, Session}
class CassandraFactory {
import scala.collection.JavaConverters._
private var cluster: Cluster = null
private var session: Session = null
private var cassandraConnectorConf: CassandraConnectorConf = null
private val metadataStorageFactory = new MetadataStorageFactory()
private val dataStorageFactory = new Factory()
def open(hosts: Set[(String, Int)]) = {
val cassandraHosts = hosts.map(s => new InetSocketAddress(s._1, s._2))
cluster = Cluster.builder().addContactPointsWithPorts(cassandraHosts.toList.asJava).build()
session = cluster.connect()
cassandraConnectorConf = CassandraConnectorConf.apply(cassandraHosts)
}
def getDataStorage(keyspace: String) = {
dataStorageFactory.getInstance(
cassandraConnectorConf,
keyspace = keyspace)
}
def getMetadataStorage(keyspace: String) = {
metadataStorageFactory.getInstance(
cassandraConnectorConf,
keyspace = keyspace)
}
def createKeyspace(keyspace: String) = {
CassandraHelper.createKeyspace(session, keyspace)
}
def createDataTable(keyspace: String) = {
CassandraHelper.createDataTable(session, keyspace)
}
def createMetadataTables(keyspace: String) = {
CassandraHelper.createMetadataTables(session, keyspace)
}
def dropKeyspace(keyspace: String) = {
CassandraHelper.dropKeyspace(session, keyspace)
}
def close() = {
metadataStorageFactory.closeFactory()
session.close()
cluster.close()
}
}
示例15: Post
//设置package包名称以及导入依赖的类
package demo6
import java.util.UUID
import com.datastax.driver.core.{Row, Cluster, Session}
import troy.dsl._
import troy.driver.DSL._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
case class Post(id: UUID, title: String)
object Main extends App {
import scala.concurrent.ExecutionContext.Implicits.global
val port: Int = 9042
val host: String = "127.0.0.1"
private val cluster =
new Cluster.Builder().addContactPoints(host).withPort(port).build()
implicit val session: Session = cluster.connect()
val create = withSchema {
(authorId: String, title: String) =>
cql"""
INSERT INTO test.posts (author_id , post_id , post_title )
VALUES ( $authorId, now(), $title);
""".prepared.executeAsync
}
val listByAuthor = withSchema {
(authorId: String) =>
cql"""
SELECT post_id, post_title
FROM test.posts
WHERE author_id = $authorId
"""
.prepared
.executeAsync
.as(Post)
}
println(Await.result(create("test", "title"), Duration(1, "second")))
println(Await.result(listByAuthor("test"), Duration(1, "second")))
session.close()
cluster.close()
}