本文整理汇总了Scala中org.apache.spark.SparkEnv类的典型用法代码示例。如果您正苦于以下问题:Scala SparkEnv类的具体用法?Scala SparkEnv怎么用?Scala SparkEnv使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SparkEnv类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: Main
//设置package包名称以及导入依赖的类
package onextent.demo
import org.apache.spark.SparkEnv
import org.rogach.scallop.ScallopConf
object Main {
def main(args: Array[String]) {
object Args extends ScallopConf(args) {
val msg = opt[String]("message", descr = "say something", default = Some("world"))
}
SparkEnv.get
Args.verify()
println(s"Hello ${Args.msg.getOrElse("what?")}")
}
}
示例2: RedisShuffleWriter
//设置package包名称以及导入依赖的类
package org.apache.spark.shuffle.redis
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.shuffle.{ShuffleHandle, ShuffleWriter}
import org.apache.spark.{SparkEnv, TaskContext}
import redis.clients.jedis.{Jedis, JedisPool}
class RedisShuffleWriter[K, V](
handle: ShuffleHandle,
mapId: Int,
context: TaskContext)
extends ShuffleWriter[K, V] with Logging {
private val dep = handle.asInstanceOf[RedisShuffleHandle[Any, Any, Any]].dependency
private val blockManager = SparkEnv.get.blockManager
private val jedisPool = new JedisPool()
private var sorter: RedisSorter[Any, Any, Any] = null
// Are we in the process of stopping? Because map tasks can call stop() with success = true
// and then call stop() with success = false if they get an exception, we want to make sure
// we don't try deleting files, etc twice.
private var stopping = false
private var mapStatus: MapStatus = null
override def stop(success: Boolean): Option[MapStatus] = {
try {
if (stopping) {
return None
}
stopping = true
if (success) {
return Option(mapStatus)
} else {
if (sorter != null) {
sorter.clean()
sorter = null
}
return None
}
} finally {
jedisPool.close()
}
}
}
示例3: SharkCLIService
//设置package包名称以及导入依赖的类
package shark.server
import org.apache.hive.service.cli.CLIService
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.shims.ShimLoader
import org.apache.hive.service.auth.HiveAuthFactory
import java.io.IOException
import org.apache.hive.service.ServiceException
import javax.security.auth.login.LoginException
import org.apache.spark.SparkEnv
import shark.{SharkServer, Utils}
class SharkCLIService extends CLIService {
override def init(hiveConf: HiveConf) {
this.synchronized {
Utils.setSuperField("hiveConf", hiveConf, this)
val sharkSM = new SharkSessionManager
Utils.setSuperField("sessionManager", sharkSM, this)
addService(sharkSM)
try {
HiveAuthFactory.loginFromKeytab(hiveConf)
val serverUserName = ShimLoader.getHadoopShims
.getShortUserName(ShimLoader.getHadoopShims.getUGIForConf(hiveConf))
Utils.setSuperField("serverUserName", serverUserName, this)
} catch {
case e: IOException => {
throw new ServiceException("Unable to login to kerberos with given principal/keytab", e)
}
case e: LoginException => {
throw new ServiceException("Unable to login to kerberos with given principal/keytab", e)
}
}
// Make sure the ThreadLocal SparkEnv reference is the same for all threads.
SparkEnv.set(SharkServer.sparkEnv)
sharkInit(hiveConf)
}
}
}