本文整理汇总了Scala中redis.clients.jedis.Jedis类的典型用法代码示例。如果您正苦于以下问题:Scala Jedis类的具体用法?Scala Jedis怎么用?Scala Jedis使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Jedis类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: set
//设置package包名称以及导入依赖的类
package walfie.gbf.raidfinder.server.persistence
import com.trueaccord.scalapb.{GeneratedMessage, GeneratedMessageCompanion, Message}
import java.net.URI
import redis.clients.jedis.{BinaryJedis, Jedis}
trait ProtobufStorage {
type CacheItem[T] = GeneratedMessage with Message[T]
def set[T <: CacheItem[T]](key: String, value: T): Unit
def get[T <: CacheItem[T]](
key: String
)(implicit companion: GeneratedMessageCompanion[T]): Option[T]
def close(): Unit
}
object ProtobufStorage {
def redis(uri: URI): RedisProtobufStorage = {
new RedisProtobufStorage(new BinaryJedis(uri))
}
}
// TODO: Write integration test
class RedisProtobufStorage(redis: BinaryJedis) extends ProtobufStorage {
def set[T <: CacheItem[T]](key: String, value: T): Unit = {
redis.set(key.getBytes, value.toByteArray)
}
def get[T <: CacheItem[T]](
key: String
)(implicit companion: GeneratedMessageCompanion[T]): Option[T] = {
Option(redis.get(key.getBytes)).flatMap { bytes =>
companion.validate(bytes).toOption
}
}
def close(): Unit = redis.close()
}
object NoOpProtobufStorage extends ProtobufStorage {
def set[T <: CacheItem[T]](key: String, value: T): Unit = ()
def get[T <: CacheItem[T]](
key: String
)(implicit companion: GeneratedMessageCompanion[T]): Option[T] = None
def close(): Unit = ()
}
示例2: ConnectionPool
//设置package包名称以及导入依赖的类
package com.redislabs.provider.redis
import redis.clients.jedis.{JedisPoolConfig, Jedis, JedisPool}
import redis.clients.jedis.exceptions.JedisConnectionException
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConversions._
object ConnectionPool {
@transient private lazy val pools: ConcurrentHashMap[RedisEndpoint, JedisPool] =
new ConcurrentHashMap[RedisEndpoint, JedisPool]()
def connect(re: RedisEndpoint): Jedis = {
val pool = pools.getOrElseUpdate(re,
{
val poolConfig: JedisPoolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(250)
poolConfig.setMaxIdle(32)
poolConfig.setTestOnBorrow(false)
poolConfig.setTestOnReturn(false)
poolConfig.setTestWhileIdle(false)
poolConfig.setMinEvictableIdleTimeMillis(60000)
poolConfig.setTimeBetweenEvictionRunsMillis(30000)
poolConfig.setNumTestsPerEvictionRun(-1)
new JedisPool(poolConfig, re.host, re.port, re.timeout, re.auth, re.dbNum)
}
)
var sleepTime: Int = 4
var conn: Jedis = null
while (conn == null) {
try {
conn = pool.getResource
}
catch {
case e: JedisConnectionException if e.getCause.toString.
contains("ERR max number of clients reached") => {
if (sleepTime < 500) sleepTime *= 2
Thread.sleep(sleepTime)
}
case e: Exception => throw e
}
}
conn
}
}
示例3: Pug
//设置package包名称以及导入依赖的类
package com.mine.pug
import javax.net.ssl.{HostnameVerifier, SSLParameters, SSLSocketFactory}
import redis.clients.jedis.Jedis
import redis.clients.util.Pool
import scala.concurrent.{ExecutionContext, Future}
class Pug(host: String, port: Int, connectionTimeout: Int, soTimeout: Int,
ssl: Boolean, sSLSocketFactory: SSLSocketFactory,
sSLParameters: SSLParameters,
hostnameVerifier: HostnameVerifier) extends Jedis(host, port,
connectionTimeout, soTimeout, ssl, sSLSocketFactory, sSLParameters,
hostnameVerifier) with AsyncPug {
var ds: Pool[Pug] = _
override def close() {
if (ds != null) if (client.isBroken) this.ds.returnBrokenResource(this)
else this.ds.returnResource(this)
else client.close()
}
override def aset(key: String, value: String)(implicit ec: ExecutionContext):
Future[String] = Future {
set(key, value)
}
override def aget(key: String)(implicit ec: ExecutionContext):
Future[String] = Future {
get(key)
}
}
示例4:
//设置package包名称以及导入依赖的类
import org.apache.spark.ml.regression.LinearRegression
import com.redislabs.client.redisml.MLClient
import redis.clients.jedis.{Jedis, _}
// Load training data and train
val training = spark.read.format("libsvm").load("data/mllib/sample_linear_regression_data.txt")
val lr = new LinearRegression().setMaxIter(10).setRegParam(0.3).setElasticNetParam(0.8)
val lrModel = lr.fit(training)
println(s"Coefficients: ${lrModel.coefficients} Intercept: ${lrModel.intercept}")
// Connect to Redis
val jedis = new Jedis("localhost")
// Load model to Redis
val cmd = "my_lr_model" +: lrModel.intercept.toString +: lrModel.coefficients.toArray.mkString(",").split(",")
jedis.getClient.sendCommand(MLClient.ModuleCommand.LINREG_SET, cmd: _*)
jedis.getClient().getStatusCodeReply
// Perform prediction with Redis
val cmd = Array("my_lr_model", "1", "2", "5")
jedis.getClient.sendCommand(MLClient.ModuleCommand.LINREG_PREDICT, cmd: _*)
jedis.getClient().getStatusCodeReply
示例5: Forest
//设置package包名称以及导入依赖的类
package com.redislabs.provider.redis.ml
import org.apache.spark.ml.tree
import org.apache.spark.ml.classification.DecisionTreeClassificationModel
import redis.clients.jedis.Protocol.Command
import redis.clients.jedis.{Jedis, _}
import com.redislabs.client.redisml.MLClient
import org.apache.spark.ml.tree.{CategoricalSplit, ContinuousSplit, InternalNode}
class Forest(trees: Array[DecisionTreeClassificationModel]) {
private def subtreeToRedisString(n: org.apache.spark.ml.tree.Node, path: String = "."): String = {
val prefix: String = s",${path},"
n.getClass.getSimpleName match {
case "InternalNode" => {
val in = n.asInstanceOf[InternalNode]
val splitStr = in.split match {
case contSplit: ContinuousSplit => s"numeric,${in.split.featureIndex},${contSplit.threshold}"
case catSplit: CategoricalSplit => s"categoric,${in.split.featureIndex}," +
catSplit.leftCategories.mkString(":")
}
prefix + splitStr + subtreeToRedisString(in.leftChild, path + "l") +
subtreeToRedisString(in.rightChild, path + "r")
}
case "LeafNode" => {
prefix + s"leaf,${n.prediction}" +
s",stats,${n.getImpurityStats.mkString(":")}"
}
}
}
private def toRedisString: String = {
trees.zipWithIndex.map { case (tree, treeIndex) =>
s"${treeIndex}" + subtreeToRedisString(tree.rootNode, ".")
}.fold("") { (a, b) => a + "\n" + b }
}
def toDebugArray: Array[String] = {
toRedisString.split("\n").drop(1)
}
def loadToRedis(forestId: String = "test_forest", host: String = "localhost") {
val jedis = new Jedis(host)
val commands = toRedisString.split("\n").drop(1)
jedis.getClient.sendCommand(Command.MULTI)
jedis.getClient().getStatusCodeReply
for (cmd <- commands) {
val cmdArray = forestId +: cmd.split(",")
jedis.getClient.sendCommand(MLClient.ModuleCommand.FOREST_ADD, cmdArray: _*)
jedis.getClient().getStatusCodeReply
}
jedis.getClient.sendCommand(Command.EXEC)
jedis.getClient.getMultiBulkReply
}
}
示例6: ConnectionPool
//设置package包名称以及导入依赖的类
package com.redislabs.provider.redis
import redis.clients.jedis.{JedisPoolConfig, Jedis, JedisPool}
import redis.clients.jedis.exceptions.JedisConnectionException
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConversions._
object ConnectionPool {
@transient private lazy val pools: ConcurrentHashMap[RedisEndpoint, JedisPool] =
new ConcurrentHashMap[RedisEndpoint, JedisPool]()
def connect(re: RedisEndpoint): Jedis = {
val pool = pools.getOrElseUpdate(re,
{
val poolConfig: JedisPoolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(500)
poolConfig.setMaxIdle(32)
poolConfig.setTestOnBorrow(false)
poolConfig.setTestOnReturn(false)
poolConfig.setTestWhileIdle(false)
poolConfig.setMinEvictableIdleTimeMillis(60000)
poolConfig.setTimeBetweenEvictionRunsMillis(30000)
poolConfig.setNumTestsPerEvictionRun(-1)
new JedisPool(poolConfig, re.host, re.port, re.timeout, re.auth, re.dbNum)
}
)
var sleepTime: Int = 4
var conn: Jedis = null
while (conn == null) {
try {
conn = pool.getResource
}
catch {
case e: JedisConnectionException if e.getCause.toString.
contains("ERR max number of clients reached") => {
if (sleepTime < 500) sleepTime *= 2
Thread.sleep(sleepTime)
}
case e: Exception => throw e
}
}
conn
}
}
示例7: RedisShuffleWriter
//设置package包名称以及导入依赖的类
package org.apache.spark.shuffle.redis
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.shuffle.{ShuffleHandle, ShuffleWriter}
import org.apache.spark.{SparkEnv, TaskContext}
import redis.clients.jedis.{Jedis, JedisPool}
class RedisShuffleWriter[K, V](
handle: ShuffleHandle,
mapId: Int,
context: TaskContext)
extends ShuffleWriter[K, V] with Logging {
private val dep = handle.asInstanceOf[RedisShuffleHandle[Any, Any, Any]].dependency
private val blockManager = SparkEnv.get.blockManager
private val jedisPool = new JedisPool()
private var sorter: RedisSorter[Any, Any, Any] = null
// Are we in the process of stopping? Because map tasks can call stop() with success = true
// and then call stop() with success = false if they get an exception, we want to make sure
// we don't try deleting files, etc twice.
private var stopping = false
private var mapStatus: MapStatus = null
override def stop(success: Boolean): Option[MapStatus] = {
try {
if (stopping) {
return None
}
stopping = true
if (success) {
return Option(mapStatus)
} else {
if (sorter != null) {
sorter.clean()
sorter = null
}
return None
}
} finally {
jedisPool.close()
}
}
}
示例8: redis
//设置package包名称以及导入依赖的类
package org.sierra
import redis.clients.jedis.Jedis
trait RedisSetup {
def redis[A](f: Jedis => A): A = RedisSetupLock.synchronized{
val jedis = new Jedis("localhost")
jedis.flushAll()
val r = f(jedis)
jedis.close()
r
}
}
object RedisSetupLock