当前位置: 首页>>代码示例>>Scala>>正文


Scala TaskContext类代码示例

本文整理汇总了Scala中org.apache.spark.TaskContext的典型用法代码示例。如果您正苦于以下问题:Scala TaskContext类的具体用法?Scala TaskContext怎么用?Scala TaskContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了TaskContext类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: StudyRDD

//设置package包名称以及导入依赖的类
package com.study.spark.datasource

import org.apache.spark.{Partition, TaskContext}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{Row, SQLContext}


class StudyRDD(sqlContext: SQLContext, schema: StructType) extends RDD[Row](sqlContext.sparkContext, deps=Nil) {
  @DeveloperApi
  override def compute(split: Partition, context: TaskContext): Iterator[Row] = new StudyReader(context, schema, split)

  // ??? ?? ????? 2?? ???? ??? ????.
  // ? Executor? ???? ??? ????. ???? ???? 2? ??? ???, ??? ??? ? ?? Executor? ?? 2???.
  override protected def getPartitions: Array[Partition] = {
    val arr: Array[Partition] = new Array[Partition](2)
    arr.update(0, new Partition() {
      override def index: Int = 0
    })
    arr.update(1, new Partition() {
      override def index: Int = 1
    })
    arr
  }
} 
开发者ID:hackpupu,项目名称:LML,代码行数:27,代码来源:StudyRDD.scala

示例2: StudyReader

//设置package包名称以及导入依赖的类
package com.study.spark.datasource

import org.apache.spark.{Partition, TaskContext}
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType


class StudyReader(context: TaskContext, schema: StructType, split: Partition) extends Iterator[Row] {
  private[this] var counter: Int = 0

  // Task? ???? ???? close? ????? ??.
  if(context != null) {
    context.addTaskCompletionListener(context => close())
  }

  // 100?? Row? ??? ??
  override def hasNext: Boolean = {
    if(counter < 100) {
      true
    } else {
      false
    }
  }

  // 1?? Row? ????.
  override def next(): Row = {
    if(!hasNext) {
      throw new NoSuchElementException("End of stream")
    }
    counter += 1
    Row(split.index + " field1 " + counter, "field2 " + counter, "field3: " + counter)
  }

  // close?? ? ??? ??? ??? close??.
  def close() = println("closed")
} 
开发者ID:hackpupu,项目名称:LML,代码行数:37,代码来源:StudyReader.scala

示例3: WithCalcTransactionLogging

//设置package包名称以及导入依赖的类
package biz.meetmatch.decorators

import biz.meetmatch.logging.BusinessLogger
import org.apache.spark.TaskContext

object WithCalcTransactionLogging {
  def apply[B](category: String, id: String, message: String = "")(f: => B)(implicit module: Class[_]): B = {
    val businessLogger = new BusinessLogger(module.getName)

    val taskContext = TaskContext.get
    businessLogger.transactionStarted(category, id, taskContext.stageId, taskContext.partitionId, taskContext.taskAttemptId, message)
    val result = f
    businessLogger.transactionStopped(category, id)

    result
  }
} 
开发者ID:tolomaus,项目名称:languagedetector,代码行数:18,代码来源:WithCalcTransactionLogging.scala

示例4: UnsafeUtils

//设置package包名称以及导入依赖的类
package com.microsoft.spark.perf.core

import scala.collection.concurrent.TrieMap

import sun.misc.Unsafe

import org.apache.spark.TaskContext

private[core] object UnsafeUtils {
  private val unsafeMap = new TrieMap[Int, Unsafe]

  private[core] def getUnsafeInstance: Unsafe = {
    val partitionId = TaskContext.getPartitionId()
    unsafeMap.getOrElseUpdate(partitionId, {
      val unsafeField = classOf[sun.misc.Unsafe].getDeclaredField("theUnsafe")
      unsafeField.setAccessible(true)
      unsafeField.get(null).asInstanceOf[sun.misc.Unsafe]}
    )
  }
} 
开发者ID:hdinsight,项目名称:SparkPerf,代码行数:21,代码来源:UnsafeUtils.scala

示例5: GDBRDD

//设置package包名称以及导入依赖的类
package com.esri.gdb

import org.apache.hadoop.conf.Configuration
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}


case class GDBRDD(@transient sc: SparkContext, gdbPath: String, gdbName: String, numPartitions: Int) extends RDD[Row](sc, Nil) with Logging {

  @DeveloperApi
  override def compute(partition: Partition, context: TaskContext): Iterator[Row] = {
    val part = partition.asInstanceOf[GDBPartition]
    val hadoopConf = if (sc == null) new Configuration() else sc.hadoopConfiguration
    val index = GDBIndex(gdbPath, part.hexName, hadoopConf)
    val table = GDBTable(gdbPath, part.hexName, hadoopConf)
    context.addTaskCompletionListener(context => {
      table.close()
      index.close()
    })
    table.rowIterator(index, part.startAtRow, part.numRowsToRead)
  }

  override protected def getPartitions: Array[Partition] = {
    val hadoopConf = if (sc == null) new Configuration() else sc.hadoopConfiguration
    GDBTable.findTable(gdbPath, gdbName, hadoopConf) match {
      case Some(catTab) => {
        val index = GDBIndex(gdbPath, catTab.hexName, hadoopConf)
        try {
          val numRows = index.numRows
          val numRowsPerPartition = (numRows.toDouble / numPartitions).ceil.toInt
          var startAtRow = 0
          (0 until numPartitions).map(i => {
            val endAtRow = startAtRow + numRowsPerPartition
            val numRowsToRead = if (endAtRow <= numRows) numRowsPerPartition else numRows - startAtRow
            val gdbPartition = GDBPartition(i, catTab.hexName, startAtRow, numRowsToRead)
            startAtRow += numRowsToRead
            gdbPartition
          }).toArray
        } finally {
          index.close()
        }
      }
      case _ => {
        log.error(s"Cannot find '$gdbName' in $gdbPath, creating an empty array of Partitions !")
        Array.empty[Partition]
      }
    }
  }
}

private[this] case class GDBPartition(m_index: Int,
                                      val hexName: String,
                                      val startAtRow: Int,
                                      val numRowsToRead: Int
                                     ) extends Partition {
  override def index = m_index
} 
开发者ID:mraad,项目名称:spark-gdb,代码行数:60,代码来源:GDBRDD.scala

示例6: MongoRDD

//设置package包名称以及导入依赖的类
package nsmc.rdd

import com.mongodb.BasicDBObject
import nsmc.Logging
import nsmc.mongo.{CollectionConfig, MongoConnector}

import nsmc.rdd.partitioner.{MongoRDDPartition, MongoRDDPartitioner}
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, SparkContext, TaskContext}


import scala.language.existentials
import scala.reflect.ClassTag

class MongoRDD[R] private[nsmc] (@transient sc: SparkContext,
                                 val collectionConfig: CollectionConfig)
                                (implicit ct : ClassTag[R])
  extends RDD[R](sc, Seq.empty) with Logging {

  private val proxy = new CollectionProxy(collectionConfig)

  // make sure we inherit logging from the right place: out own Logging class and not RDD
  override def log = super[Logging].log
  override def logName = super[Logging].logName
  override def logInfo(msg: => String) = super[Logging].logInfo(msg)
  override def logDebug(msg: => String) = super[Logging].logDebug(msg)
  override def logTrace(msg: => String) = super[Logging].logTrace(msg)
  override def logWarning(msg: => String) = super[Logging].logWarning(msg)
  override def logError(msg: => String) = super[Logging].logError(msg)
  override def logInfo(msg: => String, throwable: Throwable) = super[Logging].logInfo(msg, throwable)
  override def logDebug(msg: => String, throwable: Throwable) = super[Logging].logDebug(msg, throwable)
  override def logTrace(msg: => String, throwable: Throwable) = super[Logging].logTrace(msg, throwable)
  override def logWarning(msg: => String, throwable: Throwable) = super[Logging].logWarning(msg, throwable)
  override def logError(msg: => String, throwable: Throwable) = super[Logging].logError(msg, throwable)
  override def isTraceEnabled() = super[Logging].isTraceEnabled()

  override def getPartitions: Array[Partition] = {
    proxy.getPartitions
  }

  override def compute(split: Partition, context: TaskContext): Iterator[R] = {
    proxy.getPartitionIterator(split, context, new BasicDBObject(), new BasicDBObject()).asInstanceOf[Iterator[R]]
  }

} 
开发者ID:baank,项目名称:spark-mongodb-connector,代码行数:46,代码来源:MongoRDD.scala

示例7: SQLMongoRDD

//设置package包名称以及导入依赖的类
package nsmc.rdd

import com.mongodb.DBObject
import nsmc.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, SparkContext, TaskContext}

import scala.language.existentials

// For use only in creating an RDD to return for SQL integration
class SQLMongoRDD private[nsmc] (@transient sc: SparkContext,
                                 proxy: CollectionProxy,
                                 filter: DBObject,
                                 projection: DBObject)
  extends RDD[DBObject](sc, Seq.empty) with Logging {

  // make sure we inherit logging from the right place: out own Logging class and not RDD
  override def log = super[Logging].log
  override def logName = super[Logging].logName
  override def logInfo(msg: => String) = super[Logging].logInfo(msg)
  override def logDebug(msg: => String) = super[Logging].logDebug(msg)
  override def logTrace(msg: => String) = super[Logging].logTrace(msg)
  override def logWarning(msg: => String) = super[Logging].logWarning(msg)
  override def logError(msg: => String) = super[Logging].logError(msg)
  override def logInfo(msg: => String, throwable: Throwable) = super[Logging].logInfo(msg, throwable)
  override def logDebug(msg: => String, throwable: Throwable) = super[Logging].logDebug(msg, throwable)
  override def logTrace(msg: => String, throwable: Throwable) = super[Logging].logTrace(msg, throwable)
  override def logWarning(msg: => String, throwable: Throwable) = super[Logging].logWarning(msg, throwable)
  override def logError(msg: => String, throwable: Throwable) = super[Logging].logError(msg, throwable)
  override def isTraceEnabled() = super[Logging].isTraceEnabled()

  override def getPartitions: Array[Partition] = {
    proxy.getPartitions
  }

  override def compute(split: Partition, context: TaskContext): Iterator[DBObject] = {
    proxy.getPartitionIterator(split, context, filter, projection)
  }

} 
开发者ID:baank,项目名称:spark-mongodb-connector,代码行数:41,代码来源:SQLMongoRDD.scala

示例8: RedisShuffleWriter

//设置package包名称以及导入依赖的类
package org.apache.spark.shuffle.redis

import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.shuffle.{ShuffleHandle, ShuffleWriter}
import org.apache.spark.{SparkEnv, TaskContext}
import redis.clients.jedis.{Jedis, JedisPool}

class RedisShuffleWriter[K, V](
  handle: ShuffleHandle,
  mapId: Int,
  context: TaskContext)
  extends ShuffleWriter[K, V] with Logging {

  private val dep = handle.asInstanceOf[RedisShuffleHandle[Any, Any, Any]].dependency

  private val blockManager = SparkEnv.get.blockManager

  private val jedisPool = new JedisPool()

  private var sorter: RedisSorter[Any, Any, Any] = null

  // Are we in the process of stopping? Because map tasks can call stop() with success = true
  // and then call stop() with success = false if they get an exception, we want to make sure
  // we don't try deleting files, etc twice.
  private var stopping = false

  private var mapStatus: MapStatus = null

  
  override def stop(success: Boolean): Option[MapStatus] = {
    try {
      if (stopping) {
        return None
      }
      stopping = true
      if (success) {
        return Option(mapStatus)
      } else {
        if (sorter != null) {
          sorter.clean()
          sorter = null
        }
        return None
      }
    } finally {
      jedisPool.close()
    }
  }
} 
开发者ID:ambling,项目名称:RedisShuffleManager,代码行数:51,代码来源:RedisShuffleWriter.scala

示例9: UnencryptedRDD

//设置package包名称以及导入依赖的类
package org.apache.datacommons.protectr.rdds

import com.n1analytics.paillier.{PaillierContext, PaillierPublicKey}
import org.apache.datacommons.protectr.encryptors.EncryptionKeyPair
import org.apache.datacommons.protectr.types.{CSV, FileType}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, TaskContext}


class UnencryptedRDD(parent: RDD[String],fileType: FileType = CSV)
  extends RDD[String](parent) {

  def encryptHomomorphically(keyPair: EncryptionKeyPair, columnIndex: Int)
  : HomomorphicallyEncryptedRDD = {
    val publicKey: PaillierPublicKey = keyPair.getPublicKey
    val signedContext: PaillierContext = publicKey.createSignedContext
    val encryptedRDD = this.map(row => {
      val values: Array[String] = fileType.parseRecord(row)
      val numericValue: String = values(columnIndex)
      values(columnIndex) = signedContext.encrypt(numericValue.toDouble).toString
      fileType.join(values)
    })
    new HomomorphicallyEncryptedRDD(encryptedRDD, keyPair, fileType)
  }

  @DeveloperApi
  override def compute(split: Partition, context: TaskContext): Iterator[String] = {
    parent.compute(split, context)
  }

  override protected def getPartitions: Array[Partition] = parent.partitions
} 
开发者ID:data-commons,项目名称:protectr,代码行数:34,代码来源:UnencryptedRDD.scala

示例10: HomomorphicallyEncryptedRDD

//设置package包名称以及导入依赖的类
package org.apache.datacommons.protectr.rdds

import java.math.BigInteger

import com.n1analytics.paillier.{PaillierPrivateKey, EncryptedNumber}
import org.apache.datacommons.protectr.encryptors.EncryptionKeyPair
import org.apache.datacommons.protectr.types.FileType
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, TaskContext}

class HomomorphicallyEncryptedRDD
(RDD: RDD[String], keyPair: EncryptionKeyPair, fileType: FileType) extends RDD[String](RDD) {

  def sum(columnIndex: Int): BigInteger = {
    val finalRecord = this.reduce((firstRow, secondRow) => {
      val firstRecord: Array[String] = fileType.parseRecord(firstRow)
      val secondRecord: Array[String] = fileType.parseRecord(secondRow)
      val firstNumber: EncryptedNumber = EncryptedNumber.create(
        firstRecord(columnIndex), keyPair.getPrivateKey)
      val secondNumber: EncryptedNumber = EncryptedNumber.create(
        secondRecord(columnIndex), keyPair.getPrivateKey)
      firstRecord(columnIndex) = firstNumber.add(secondNumber).toString
      fileType.join(firstRecord)
    })
    val sum: String = fileType.parseRecord(finalRecord)(columnIndex)
    val result: EncryptedNumber = EncryptedNumber.create(sum, keyPair.getPrivateKey)
    result.decrypt(keyPair.getPrivateKey).decodeApproximateBigInteger
  }

  def decrypt(columnIndex: Int): UnencryptedRDD = {
    val privateKey: PaillierPrivateKey = keyPair.getPrivateKey
    val javaRDD = this.map(row =>{
        val values: Array[String] = fileType.parseRecord(row)
        val encryptedNumber: EncryptedNumber = EncryptedNumber.create(
          values(columnIndex), keyPair.getPrivateKey)
        val bigInteger: BigInteger = privateKey.decrypt(encryptedNumber).decodeApproximateBigInteger
        values(columnIndex) = bigInteger.toString
        fileType.join(values)
      })
    new UnencryptedRDD(javaRDD,fileType)
  }

  override protected def getPartitions = RDD.partitions

  @DeveloperApi
  override def compute(split: Partition, context: TaskContext): Iterator[String] = {
    RDD.compute(split, context)
  }
} 
开发者ID:data-commons,项目名称:protectr,代码行数:51,代码来源:HomomorphicallyEncryptedRDD.scala

示例11: TitanRDD

//设置package包名称以及导入依赖的类
package com.goyeau.spark.titan.connector

import com.goyeau.spark.gremlin.connector.Workaround._
import com.thinkaurelius.titan.core.{TitanFactory, TitanGraph}
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal

import scala.collection.JavaConversions._
import scala.reflect.ClassTag


class TitanRDD[E: ClassTag](sc: SparkContext,
                            traversal: GraphTraversal[_, E],
                            numSlices: Int = 0) extends RDD[E](sc, Nil) {
  private val defaultParallelism = sc.defaultParallelism
  private val readConf = GraphConf(traversal.asAdmin.getGraph.get.asInstanceOf[TitanGraph].configuration)
  
  override def compute(split: Partition, context: TaskContext): Iterator[E] = {
    val partition = split.asInstanceOf[TitanPartition[E]]
    val partitionTraversal = partition.traversal.asAdmin
    val graph = TitanFactory.open(readConf.toTitanConf)
    partitionTraversal.setGraph(graph)
    partitionTraversal.toList().toIterator
  }

  override protected def getPartitions: Array[Partition] = {
    val numElement = cloneTraversal(traversal).count().toList().head
    val numPartitions =
      if (numSlices > 0 && numElement >= numSlices) numSlices
      else if (numElement >= defaultParallelism) defaultParallelism
      else numElement.toInt
    val partitionSize = numElement / numPartitions
    
    (0 until numPartitions).toArray map { i =>
      val from = partitionSize * i
      val to = partitionSize * (i + 1)
      val partitionTraversal = cloneTraversal(traversal).range(from, to)
      TitanPartition(i, partitionTraversal)
    }
  }
} 
开发者ID:joan38,项目名称:spark-titan-connector,代码行数:43,代码来源:TitanRDD.scala


注:本文中的org.apache.spark.TaskContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。