本文整理汇总了Scala中org.apache.hadoop.mapreduce.TaskAttemptContext类的典型用法代码示例。如果您正苦于以下问题:Scala TaskAttemptContext类的具体用法?Scala TaskAttemptContext怎么用?Scala TaskAttemptContext使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TaskAttemptContext类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: ParquetOutputWriter
//设置package包名称以及导入依赖的类
package com.newegg.eims.DataPorter.Parquet
import com.newegg.eims.DataPorter.Base.{DataSetSchema, IDataRow}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapred.{JobConf, TaskAttemptContextImpl}
import org.apache.hadoop.mapreduce.TaskAttemptContext
import parquet.hadoop.ParquetOutputFormat
import parquet.hadoop.api.WriteSupport
class ParquetOutputWriter(dataSetSchema: DataSetSchema, path: Path, conf: JobConf) {
class IDataRowParquetOutputFormat(support: ParquetWriteSupport, filePath: Path) extends ParquetOutputFormat[IDataRow]() {
override def getWriteSupport(configuration: Configuration): WriteSupport[IDataRow] = {
support
}
override def getDefaultWorkFile(context: TaskAttemptContext, extension: String): Path = {
filePath
}
}
private val context = new TaskAttemptContextImpl(conf, new org.apache.hadoop.mapred.TaskAttemptID())
private val formatter = {
val support = new ParquetOutputFormat[IDataRow]().getWriteSupport(conf).asInstanceOf[ParquetWriteSupport]
support.setSchema(dataSetSchema)
new IDataRowParquetOutputFormat(support, path)
}
private val recordWriter = formatter.getRecordWriter(context)
def write(row: IDataRow): Unit = recordWriter.write(null, row)
def close(): Unit = recordWriter.close(context)
}
示例2: SuccinctHadoopMapReduceUtil
//设置package包名称以及导入依赖的类
package edu.berkeley.cs.succinct.annot.serde
import java.lang.{Boolean => JBoolean, Integer => JInteger}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapreduce.{TaskAttemptContext, TaskAttemptID}
object SuccinctHadoopMapReduceUtil {
private def firstAvailableClass(first: String, second: String): Class[_] = {
try {
Class.forName(first)
} catch {
case e: ClassNotFoundException =>
Class.forName(second)
}
}
def newTaskAttemptID(jtIdentifier: String, jobId: Int, isMap: Boolean, taskId: Int,
attemptId: Int) = {
val klass = Class.forName("org.apache.hadoop.mapreduce.TaskAttemptID")
try {
// First, attempt to use the old-style constructor that takes a boolean isMap
// (not available in YARN)
val ctor = klass.getDeclaredConstructor(classOf[String], classOf[Int], classOf[Boolean],
classOf[Int], classOf[Int])
ctor.newInstance(jtIdentifier, new JInteger(jobId), new JBoolean(isMap), new JInteger(taskId),
new JInteger(attemptId)).asInstanceOf[TaskAttemptID]
} catch {
case exc: NoSuchMethodException =>
// If that failed, look for the new constructor that takes a TaskType (not available in 1.x)
val taskTypeClass = Class.forName("org.apache.hadoop.mapreduce.TaskType")
.asInstanceOf[Class[Enum[_]]]
val taskType = taskTypeClass.getMethod("valueOf", classOf[String]).invoke(
taskTypeClass, if (isMap) "MAP" else "REDUCE")
val ctor = klass.getDeclaredConstructor(classOf[String], classOf[Int], taskTypeClass,
classOf[Int], classOf[Int])
ctor.newInstance(jtIdentifier, new JInteger(jobId), taskType, new JInteger(taskId),
new JInteger(attemptId)).asInstanceOf[TaskAttemptID]
}
}
def newTaskAttemptContext(conf: Configuration, attemptId: TaskAttemptID): TaskAttemptContext = {
val klass = firstAvailableClass(
"org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl", // hadoop2, hadoop2-yarn
"org.apache.hadoop.mapreduce.TaskAttemptContext") // hadoop1
val ctor = klass.getDeclaredConstructor(classOf[Configuration], classOf[TaskAttemptID])
ctor.newInstance(conf, attemptId).asInstanceOf[TaskAttemptContext]
}
}
示例3: SuccinctAnnotationOutputFormat
//设置package包名称以及导入依赖的类
package edu.berkeley.cs.succinct.annot.serde
import java.io.File
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.mapreduce.{RecordWriter, TaskAttemptContext}
class SuccinctAnnotationOutputFormat
extends FileOutputFormat[NullWritable, (Int, Iterator[(String, String, String)])] {
override def getRecordWriter(job: TaskAttemptContext):
RecordWriter[NullWritable, (Int, Iterator[(String, String, String)])] = {
val conf = job.getConfiguration
val ignoreParseErrors = conf.get("succinct.annotations.ignoreParseErrors", "true").toBoolean
val serializeInMemory = conf.get("succinct.annotations.serializeInMemory", "true").toBoolean
val dirs = conf.get("spark.local.dir", System.getProperty("java.io.tmpdir")).split(",")
println("ignoreParseErrors = " + ignoreParseErrors + " serializeInMemory = " + serializeInMemory
+ "Spark local dir = " + dirs(0) + " persistInMemory = false")
val path = FileOutputFormat.getOutputPath(job)
new SuccinctAnnotationRecordWriter(path, ignoreParseErrors, conf, (serializeInMemory, new File(dirs(0))))
}
}