本文整理汇总了Scala中org.apache.hadoop.mapred.FileInputFormat类的典型用法代码示例。如果您正苦于以下问题:Scala FileInputFormat类的具体用法?Scala FileInputFormat怎么用?Scala FileInputFormat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FileInputFormat类的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: WikiCounter
//设置package包名称以及导入依赖的类
package main
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapred.{FileInputFormat, JobConf}
import org.apache.hadoop.streaming.StreamXmlRecordReader
import org.apache.hadoop.util.StringUtils
import org.apache.spark.{SparkContext, SparkConf}
import scala.xml.XML
object WikiCounter {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("Word Counter")
val sc = new SparkContext(conf)
val jobConf = new JobConf()
jobConf.set("stream.recordreader.class",
"org.apache.hadoop.streaming.StreamXmlRecordReader")
jobConf.set("stream.recordreader.begin", "<page")
jobConf.set("stream.recordreader.end", "</page>")
FileInputFormat.addInputPaths(jobConf,
"file:///Users/di/Books/enwiki-20160204-pages-articles1.xml-p000000010p000030302")
val wikiDocuments = sc.hadoopRDD(jobConf,
classOf[org.apache.hadoop.streaming.StreamInputFormat],
classOf[Text], classOf[Text])
.map(_._1.toString)
val rawWikiPages = wikiDocuments.map(wikiString => {
val wikiXml = XML.loadString(wikiString)
(wikiXml \ "revision" \ "text").text
})
val tokenizedWikiData = rawWikiPages
.map(_.replaceAll("[.|,|'|\"|?|)|(|_|0-9]", " ").trim)
.flatMap(_.split("\\W+"))
.filter(_.length > 2)
val sortedByLength = tokenizedWikiData.distinct
.sortBy(_.length, ascending = false)
.sample(withReplacement = false, fraction = 0.1)
sortedByLength.saveAsTextFile("/tmp/wiki_pages")
}
}