当前位置: 首页>>代码示例>>Scala>>正文


Scala BufferedInputStream类代码示例

本文整理汇总了Scala中java.io.BufferedInputStream的典型用法代码示例。如果您正苦于以下问题:Scala BufferedInputStream类的具体用法?Scala BufferedInputStream怎么用?Scala BufferedInputStream使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了BufferedInputStream类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: HDFS

//设置package包名称以及导入依赖的类
package org.mireynol.util

import java.io.{BufferedInputStream, OutputStreamWriter}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.mutable.ListBuffer
import scala.io.Source

object HDFS {

  def log : Logger = LoggerFactory.getLogger( HDFS.getClass )

  val hadoop : FileSystem = {
    val conf = new Configuration( )
    conf.set( "fs.defaultFS", "hdfs://localhost:9000" )
    FileSystem.get( conf )
  }

  def readAndMap( path : String, mapper : ( String ) => Unit ) = {
    if ( hadoop.exists( new Path( path ) ) ) {
      val is = new BufferedInputStream( hadoop.open( new Path( path ) ) )
      Source.fromInputStream( is ).getLines( ).foreach( mapper )
    }
    else {
      // TODO - error logic here
    }
  }

  def write( filename : String, content : Iterator[ String ] ) = {
    val path = new Path( filename )
    val out = new OutputStreamWriter( hadoop.create( path, false ) )
    content.foreach( str => out.write( str + "\n" ) )
    out.flush( )
    out.close( )
  }

  def ls( path : String ) : List[ String ] = {
    val files = hadoop.listFiles( new Path( path ), false )
    val filenames = ListBuffer[ String ]( )
    while ( files.hasNext ) filenames += files.next( ).getPath( ).toString( )
    filenames.toList
  }

  def rm( path : String, recursive : Boolean ) : Unit = {
    if ( hadoop.exists( new Path( path ) ) ) {
      println( "deleting file : " + path )
      hadoop.delete( new Path( path ), recursive )
    }
    else {
      println( "File/Directory" + path + " does not exist" )
      log.warn( "File/Directory" + path + " does not exist" )
    }
  }

  def cat( path : String ) = Source.fromInputStream( hadoop.open( new Path( path ) ) ).getLines( ).foreach( println )

} 
开发者ID:reynoldsm88,项目名称:spark-drools,代码行数:61,代码来源:HDFS.scala

示例2: loadFromFile

//设置package包名称以及导入依赖的类
package org.edoardo.parser

import java.io.{BufferedInputStream, FileInputStream}

import org.edoardo.segmentation.SegmentationResult


	def loadFromFile(fileName: String, ipf: VolumeIPF): SegmentationResult = {
		implicit val in = new BufferedInputStream(new FileInputStream(fileName))
		checkLineIs("MFS Text 0")
		checkLineIs("{")
		checkLineIs("Level Set 0")
		checkLineIs("{")
		
		var done = false
		var regions: List[(Int, Int)] = List()
		while (!done) {
			val line: String = readLine
			if (line == "}")
				done = true
			else {
				val splitLine: Array[String] = line.split(",")
				regions ::= (splitLine(0).drop(1).toInt, splitLine(1).dropRight(1).toInt)
			}
		}
		
		checkLineIs("}")
		
		val result: Array[Array[Array[Boolean]]] = Array.fill[Boolean](ipf.width, ipf.height, ipf.depth)(false)
		for ((x, y, z) <- regions.flatMap(region => ipf.getRegionPixels(region._1, region._2)))
			result(x)(y)(z) = true
		new SegmentationResult(result)
	}
} 
开发者ID:EdoDodo,项目名称:rl-segmentation,代码行数:35,代码来源:MFS.scala

示例3: TgzDecompressTest

//设置package包名称以及导入依赖的类
package org.dele.misc

import java.io.{BufferedInputStream, BufferedOutputStream, FileInputStream, FileOutputStream}

import org.apache.commons.compress.archivers.tar.TarArchiveInputStream
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream
import org.apache.commons.io.IOUtils


object TgzDecompressTest extends App {
  val bfFileInputStream = new BufferedInputStream(new FileInputStream("E:\\VMShare\\malware-161126-12.tgz"))

  val tarIn = new TarArchiveInputStream(new GzipCompressorInputStream(bfFileInputStream))
  var tarEntry = tarIn.getNextEntry

  var tarEntryIdx = 0
  while (tarEntry != null) {
    val fileOrDir = if (tarEntry.isDirectory) "DIR" else "FILE"
    println(s"Extracting [${tarEntry.getName}]($fileOrDir)")

    if (!tarEntry.isDirectory) {
      val bfos = new BufferedOutputStream(new FileOutputStream(f"E:\\VMShare\\tmp\\$tarEntryIdx%04d.json"))
      val bufSize = 4096
      val buf = new Array[Byte](bufSize)
      var cnt = tarIn.read(buf, 0, bufSize)
      while (cnt != -1) {
        bfos.write(buf, 0, cnt)
        cnt = tarIn.read(buf, 0, bufSize)
      }
      bfos.close()
    }
    tarEntry = tarIn.getNextEntry
    tarEntryIdx = tarEntryIdx + 1
  }

  tarIn.close()
} 
开发者ID:new2scala,项目名称:text-util,代码行数:38,代码来源:TgzDecompressTest.scala

示例4: getFileStream

//设置package包名称以及导入依赖的类
package com.danylchuk.swiftlearner.hotels

import java.io.{BufferedInputStream, InputStream}
import java.util.zip.GZIPInputStream

import scala.collection.mutable.{Map => MutableMap}
import scala.io.Source



  private lazy val testDataIdMapped: Vector[SearchRecord] = {
    testDataTyped.map { record =>
      val userCity = cityIds.getOrElse(record.userCity, 0)
      val dest = destIds.getOrElse(record.dest, 0)
      SearchRecord(userCity, record.distance, dest)
    }.toVector
  }

  private lazy val trainDataTyped: Iterator[SearchRecord] = readData(trainDataFile)

  private lazy val testDataTyped: Iterator[SearchRecord] = readData(testDataFile)

  private lazy val trainDataFile = getFileStream("train-data.csv.gz")
  private lazy val trainLabelsFile = getFileStream("train-labels.csv.gz")
  private lazy val testDataFile = getFileStream("test-data.csv.gz")
  private lazy val testLabelsFile = getFileStream("test-labels.csv.gz")

  private def getFileStream(name: String): InputStream = {
    new BufferedInputStream(new GZIPInputStream(
      this.getClass.getClassLoader.getResourceAsStream(name)))
  }

  private def readLabels(stream: InputStream): Iterator[Int] =
    Source.fromInputStream(stream, "UTF8").getLines.map(_.toInt)

  private def readData(stream: InputStream): Iterator[SearchRecord] = {
    Source.fromInputStream(stream, "UTF8").getLines.map(SearchRecord.fromString)
  }
}

case class SearchRecord(userCity: Int, distance: Double, dest: Int)
object SearchRecord {
  def fromString(s: String) = {
    val fields = s.split(',')
    val userCity = fields(0).toInt
    val distance = fields(1).toDouble
    val dest = fields(2).toInt
    SearchRecord(userCity, distance, dest)
  }
} 
开发者ID:valdanylchuk,项目名称:swiftlearner,代码行数:51,代码来源:SearchData.scala

示例5: read

//设置package包名称以及导入依赖的类
package chapter10

import java.io.{BufferedInputStream, FileInputStream, InputStream}

trait BufferedInput {
  this: InputStream =>

  val size: Int = 100
  val buffer = new BufferedInputStream(this, size)
  override def read(): Int = buffer.read
}

object Test08 extends App {
  val txtFile = getClass.getResource("/sample.txt")
  val file = new FileInputStream(new java.io.File(txtFile.getPath)) with BufferedInput

  var byte = file.read()
  while (byte != -1){
    print(byte.toChar)
    byte = file.read()
  }

  // print(Iterator.continually(file.read).takeWhile(_ != -1).map(_.toChar).mkString)
} 
开发者ID:johnnyqian,项目名称:scala-for-the-impatient,代码行数:25,代码来源:08.scala

示例6: close

//设置package包名称以及导入依赖的类
package org.argus.jc.incremental

import java.io.{BufferedInputStream, File, IOException, InputStream}
import java.net.URL
import java.util.Properties

import scala.language.implicitConversions


package object jawa {
  type Closeable = {
    def close()
  }

  def using[A <: Closeable, B](resource: A)(block: A => B): B = {
    import _root_.scala.language.reflectiveCalls

    try {
      block(resource)
    } finally {
      resource.close()
    }
  }

  def extractor[A, B](f: A => B) = new Extractor[A, B](f)

  class Extractor[A, B](f: A => B) {
    def unapply(a: A): Some[B] = Some(f(a))
  }

  implicit def toRightBiasedEither[A, B](either: Either[A, B]): Either.RightProjection[A, B] = either.right

  implicit class PipedObject[T](val v: T) extends AnyVal {
    def |>[R](f: T => R) = f(v)
  }

  def readProperty(classLoader: ClassLoader, resource: String, name: String): Option[String] = {
    Option(classLoader.getResourceAsStream(resource))
      .flatMap(it => using(new BufferedInputStream(it))(readProperty(_, name)))
  }

  def readProperty(file: File, resource: String, name: String): Option[String] = {
    try {
      val url = new URL("jar:%s!/%s".format(file.toURI.toString, resource))
      Option(url.openStream).flatMap(it => using(new BufferedInputStream(it))(readProperty(_, name)))
    } catch {
      case _: IOException => None
    }
  }

  private def readProperty(input: InputStream, name: String): Option[String] = {
    val properties = new Properties()
    properties.load(input)
    Option(properties.getProperty(name))
  }
} 
开发者ID:arguslab,项目名称:argus-cit-intellij,代码行数:57,代码来源:package.scala

示例7: Util

//设置package包名称以及导入依赖的类
package trap.file
import java.io.BufferedInputStream
import java.io.BufferedOutputStream
//import java.io.BufferedReader
import java.io.File
import java.io.FileInputStream
import java.io.FileOutputStream
import java.io.FileWriter
import java.io.InputStream
//import java.io.InputStreamReader
import java.io.PrintWriter
import org.apache.commons.io.FileUtils
import trap.Util._

object Util {

  
  def appendToTextFile(fileName:String, textData:String) = using (new FileWriter(fileName, true)){
    fileWriter => using (new PrintWriter(fileWriter)) {
      printWriter => printWriter.println(textData)
    }
  }

  def readNthLine(srcFile:String, lineNo:Int) = try {
    using (scala.io.Source.fromFile(srcFile)){
      f => f.getLines.toArray.apply(lineNo-1)
//      f => f.getLine(lineNo)
    }
  } catch {
    case e:Throwable => ""
  }
  def readTextFileToString(fileName:String) = try {
    using (scala.io.Source.fromFile(fileName)) {
      x => x.mkString
    }
  } catch {
    case e:Throwable => ""
  }
  def readInputStreamToBytes(is:InputStream) =
    Stream.continually(is.read).takeWhile(-1 !=).map(_.toByte).toArray
  def readBinaryFileToBytes(fileName:String) = 
    using (new BufferedInputStream(new FileInputStream(fileName))) {
      bis => readInputStreamToBytes(bis) //Stream.continually(bis.read).takeWhile(-1 !=).map(_.toByte).toArray
    }
  
  def getAllFiles(dir:String, extensions:Array[String], recursive:Boolean) = try {
    FileUtils.listFiles(new File(dir), extensions, recursive).toArray.map(_.toString)
  } catch {
    case ex:Throwable => 
		ex.printStackTrace
		Array[String]()
  }
  def readBinaryFileToString(fileName:String) = new String(readBinaryFileToBytes(fileName))

} 
开发者ID:scalahub,项目名称:acc,代码行数:56,代码来源:Util.scala

示例8: FileAssistant

//设置package包名称以及导入依赖的类
package com.github.cuzfrog.utils

import java.io.FileInputStream
import java.io.BufferedInputStream
import java.io.FileOutputStream
import java.io.BufferedOutputStream

private[cuzfrog] object FileAssistant {
  def bytesFromFile(path: String): Array[Byte] = {
    val bis = new BufferedInputStream(new FileInputStream(path))
    val byteArray = Stream.continually(bis.read).takeWhile(_ != -1).map(_.toByte).toArray
    bis.close()
    byteArray
  }

  def bytesToFile(path: String, data: Array[Byte]): Unit = {
    val bos = new BufferedOutputStream(new FileOutputStream(path))
    Stream.continually(bos.write(data))
    bos.close()
  }

  
  def pathParse(path: String): (String, String, String) = {
    val PathParser = """(.*[\\/])(.*)(\.[\d\w]*)""".r
    path match {
      case PathParser(p, fn, s) => (p, fn, s)
      case _ => throw new IllegalArgumentException("Bad file path:"+path)
    }
  }
} 
开发者ID:cuzfrog,项目名称:excela,代码行数:31,代码来源:FileAssistant.scala

示例9: FileAssistant

//设置package包名称以及导入依赖的类
package com.github.cuzfrog.utils

import java.io.{BufferedInputStream, BufferedOutputStream, FileInputStream, FileOutputStream}

private[cuzfrog] object FileAssistant {
  def bytesFromFile(path: String): Array[Byte] = {
    val bis = new BufferedInputStream(new FileInputStream(path))
    val byteArray = Stream.continually(bis.read).takeWhile(_ != -1).map(_.toByte).toArray
    bis.close()
    byteArray
  }

  def bytesToFile(path: String, data: Array[Byte]): Unit = {
    val bos = new BufferedOutputStream(new FileOutputStream(path))
    Stream.continually(bos.write(data))
    bos.close()
  }

  
  def pathParse(path: String): (String, String, String) = {
    val PathParser = """(.*[\\/])(.*)(\.[\d\w]*)""".r
    path match {
      case PathParser(p, fn, s) => (p, fn, s)
      case _ => throw new IllegalArgumentException("Bad file path:"+path)
    }
  }
} 
开发者ID:cuzfrog,项目名称:excela,代码行数:28,代码来源:FileAssistant.scala

示例10: obfuscate

//设置package包名称以及导入依赖的类
package com.github.cuzfrog.utils

import java.io.{BufferedInputStream, BufferedOutputStream, FileInputStream, FileOutputStream}
import java.security.InvalidKeyException
import java.util.Random
import javax.crypto.{BadPaddingException, IllegalBlockSizeException}


  @deprecated("Drop this functionality", "0.2.0")
  def obfuscate(path: String, keys: List[Array[Byte]]): Array[Byte] = {

    val bis = new BufferedInputStream(new FileInputStream(path))
    val byteArray = Stream.continually(bis.read).takeWhile(_ != -1).map(_.toByte).toArray
    bis.close() //read the file
    val (didSucceed, decryptedString) = try {
        (true, decrypt(byteArray, keys))
      } catch {
        case [email protected](_: InvalidKeyException | _: IllegalBlockSizeException) => (false, null)
      }
    val unencrypted = if (didSucceed) decryptedString //return the unencrypted data
    else {
      val encrypted = encrypt(byteArray, keys(new Random(System.currentTimeMillis()).nextInt(keys.size)))
      val bos = new BufferedOutputStream(new FileOutputStream(path))
      Stream.continually(bos.write(encrypted))
      bos.close() //obfuscate the file/path
      byteArray //return the unencrypted data
    }
    unencrypted
  }

  private def decrypt(encrypted: Array[Byte], keys: List[Array[Byte]]): Array[Byte] = {
    keys.foreach {
      key =>
        try {
          return EncryptTool.decrypt(encrypted, key)
        } catch {
          case _: InvalidKeyException | _: BadPaddingException => //try every key.
        }
    }
    throw new InvalidKeyException("All keys have been tried, decrypt failed.")
  }

  private def encrypt(input: Array[Byte], key: Array[Byte]): Array[Byte] = try {
    EncryptTool.encrypt(input, key)
  } catch {
    case e: Throwable => throw new AssertionError("Encrypt failed, cause:" + e.toString + "|" + e.getMessage)
  }
} 
开发者ID:cuzfrog,项目名称:maila,代码行数:49,代码来源:ObfuscateTool.scala

示例11: Main

//设置package包名称以及导入依赖的类
package mesosphere.crimedemo

import java.io.BufferedInputStream
import java.net.URI
import java.util.zip.GZIPInputStream

import org.tukaani.xz.XZInputStream

import scala.io.Source

object Main {

  lazy val log = org.slf4j.LoggerFactory.getLogger(getClass.getName)

  def main(args: Array[String]): Unit = {
    val conf = new Conf(args)
    val publisher = new KafkaPublisher(conf.brokers())
    val topic = conf.topic()
    val sleep = 1000L / conf.eventsPerSecond()
    val uri = new URI(conf.uri())
    val inputStream = new BufferedInputStream(uri.toURL.openStream())

    val wrappedStream = if (conf.uri().endsWith(".gz")) {
      new GZIPInputStream(inputStream)
    }
    else if (conf.uri().endsWith(".xz")) {
      new XZInputStream(inputStream)
    }
    else {
      inputStream
    }
    val source = Source.fromInputStream(wrappedStream)

    var done = 0

    log.info(s"Reading crime from ${conf.uri()} and publishing to ${conf.brokers()} every ${sleep}ms")

    source.getLines().foreach(line => {
      publisher.publishKafka(topic, line.getBytes)
      done += 1

      if (done % 1000 == 0) {
        log.info(s"$done lines done")
      }

      Thread.sleep(sleep)
    })

    log.info(s"$done lines done")
  }
} 
开发者ID:sorididim11,项目名称:dcos-iot-demo,代码行数:52,代码来源:Main.scala

示例12: CommitLogFileIterator

//设置package包名称以及导入依赖的类
package com.bwsw.commitlog.filesystem

import java.io.{BufferedInputStream, File, FileInputStream}
import java.util.Base64

import scala.collection.mutable.ArrayBuffer


class CommitLogFileIterator(path: String) extends Iterator[Array[Byte]] {
  private val fileInputStream = new FileInputStream(new File(path))
  private val stream = new BufferedInputStream(fileInputStream)
  require {
    val begin = stream.read()
    begin == (0: Byte) || begin == -1
  }

  override def hasNext(): Boolean = {
    if (stream.available() > 0) true
    else {
      close()
      false
    }
  }

  private var isNotClosed = true
  def close(): Unit = {
    if (isNotClosed) {
      stream.close()
      fileInputStream.close()
      isNotClosed = false
    }
  }

  override def next(): Array[Byte] = {
    if (!hasNext()) throw new NoSuchElementException

    val record = new ArrayBuffer[Byte]()
    var byte = -1
    while ( {
      byte = stream.read()
      byte != -1 && byte != 0
    }) {
      record += byte.toByte
    }
   Base64.getDecoder.decode(record.toArray)
  }
} 
开发者ID:bwsw,项目名称:journaled-commit-log,代码行数:48,代码来源:CommitLogFileIterator.scala

示例13: CommitLogIterator

//设置package包名称以及导入依赖的类
package com.bwsw.commitlog.filesystem

import java.io.BufferedInputStream

import com.bwsw.commitlog.{CommitLogRecord, CommitLogRecordHeader}
import CommitLogIterator.EOF

abstract class CommitLogIterator extends Iterator[Either[NoSuchElementException, CommitLogRecord]] {
  protected val stream: BufferedInputStream

  override def hasNext(): Boolean = {
    if (stream.available() > 0) true
    else false
  }

  def close():Unit = {
    stream.close()
  }

  override def next(): Either[NoSuchElementException, CommitLogRecord] = {
    if (!hasNext()) Left(new NoSuchElementException("There is no next commit log record!"))
    else {
      val recordWithoutMessage = new Array[Byte](CommitLogRecord.headerSize)
      var byte = stream.read(recordWithoutMessage)
      if (byte != EOF && byte == CommitLogRecord.headerSize) {
        val header = CommitLogRecordHeader.fromByteArray(recordWithoutMessage)
        val message = new Array[Byte](header.messageLength)
        byte = stream.read(message)
        if (byte != EOF && byte == header.messageLength) {
          Right(CommitLogRecord(header.id, header.messageType, message, header.timestamp))
        } else {
          Left(new NoSuchElementException("There is no next commit log record!"))
        }
      } else {
        Left(new NoSuchElementException("There is no next commit log record!"))
      }
    }
  }
}

private object CommitLogIterator {
  val EOF:Int = -1
} 
开发者ID:bwsw,项目名称:tstreams-transaction-server,代码行数:44,代码来源:CommitLogIterator.scala

示例14: TermIdf

//设置package包名称以及导入依赖的类
package edu.emory.mathcs.ir.liveqa.util

import java.io.{BufferedInputStream, BufferedReader, FileInputStream, InputStreamReader}
import java.util.zip.GZIPInputStream

import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.LazyLogging


object TermIdf extends LazyLogging {
  val cfg = ConfigFactory.load()

  val termCounts = loadIdfData()
  assert(termCounts.contains(""))
  val docCount = termCounts.get("").get

  private def loadIdfData(): Map[String, Long] = {
    logger.info("Reading term document count information")
    val reader = scala.io.Source.fromInputStream(
      new GZIPInputStream(
        new BufferedInputStream(
          new FileInputStream(
            cfg.getString("qa.term_doccount_file")))))

    val data = reader.getLines.map { line =>
      val fields = line.split("\t")
      if (fields.length > 1)
        (fields(0).toLowerCase, fields(1).toLong)
      else
        ("", fields(0).toLong)
    }.toList
    reader.close()
    val termCounts = data.groupBy(_._1).mapValues(l => l.map(_._2).sum)
    logger.info("Done reading term counts")

    termCounts
  }

  def apply(term: String): Float = {
    val tf = termCounts.getOrElse(term, 1L)
    math.log((docCount - tf + 0.5f) / (tf + 0.5)).toFloat
  }
} 
开发者ID:emory-irlab,项目名称:liveqa,代码行数:44,代码来源:TermIdf.scala

示例15: computeFeatures

//设置package包名称以及导入依赖的类
package edu.emory.mathcs.ir.liveqa.scoring.features

import java.io.{BufferedInputStream, FileInputStream}
import java.util.zip.GZIPInputStream

import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.LazyLogging
import edu.emory.mathcs.ir.liveqa.base.{AnswerCandidate, Question}
import edu.emory.mathcs.ir.liveqa.util.NlpUtils


  override def computeFeatures(question: Question, answer: AnswerCandidate): Map[String, Float] = {
    val titleTerms = NlpUtils.getLemmas(question.titleNlp).toSet
    val bodyTerms = NlpUtils.getLemmas(question.bodyNlp).toSet
    val answerTerms = NlpUtils.getLemmas(answer.textNlp).toSet

    val npmi = titleTerms
      .flatMap(titleTerm => answerTerms.map((titleTerm, _)))
      .filter(npmiDict.contains)
      .map(npmiDict.getOrElse(_, 0.0))

    val features1 = Map[String, Float](
      "NpmiPositive" -> npmi.count(_ > 0.0),
      "NpmiNegative" -> npmi.count(_ < 0.0),
      "Npmi>0.9" -> npmi.count(_ > 0.9),
      "Npmi>0.8" -> npmi.count(_ > 0.8),
      "Npmi>0.7" -> npmi.count(_ > 0.7),
      "Npmi>0.6" -> npmi.count(_ > 0.6),
      "Npmi>0.5" -> npmi.count(_ > 0.5),
      "Npmi>0.4" -> npmi.count(_ > 0.4),
      "Npmi>0.3" -> npmi.count(_ > 0.3),
      "Npmi>0.2" -> npmi.count(_ > 0.2),
      "Npmi>0.1" -> npmi.count(_ > 0.1)
    )

    val features2 = if (npmi.nonEmpty) Map(
      "MaxNpmi" -> npmi.max.toFloat,
      "MinNpmi" -> npmi.min.toFloat,
      "AverageNpmi" -> npmi.sum.toFloat / npmi.size
    )
    else Map[String, Float]()

    features1 ++ features2
  }

  def readDictionary(file: String): Map[(String, String), Double] = {
    logger.info("Reading npmi dictionary...")
    val res = scala.io.Source.fromInputStream(
      new GZIPInputStream(
        new BufferedInputStream(
          new FileInputStream(file))))
      .getLines().map(_.split("\t")).map(f => (f(0), f(1)) -> f(2).toDouble).toMap
    logger.info("Done reading npmi dictionary...")

    res
  }
} 
开发者ID:emory-irlab,项目名称:liveqa,代码行数:58,代码来源:NpmiDictFeatures.scala


注:本文中的java.io.BufferedInputStream类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。