当前位置: 首页>>代码示例>>Scala>>正文


Scala BufferedWriter类代码示例

本文整理汇总了Scala中java.io.BufferedWriter的典型用法代码示例。如果您正苦于以下问题:Scala BufferedWriter类的具体用法?Scala BufferedWriter怎么用?Scala BufferedWriter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了BufferedWriter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: copyDataStructures

//设置package包名称以及导入依赖的类
package ppl.delite.framework.codegen

import java.io.{BufferedWriter, FileWriter, File}


trait Utils {
  def copyDataStructures(from: String, to: String, dsmap: String => String = s => s) {
    val dsDir = new File(from)
    if (!dsDir.exists) return
    val outDir = new File(to)
    outDir.mkdirs()
    copyDirectory(dsDir)

    def copyDirectory(dir: File) {
      for (f <- dir.listFiles) {
        if (f.isDirectory) 
          copyDirectory(f)
        else {
          val outFile = to + File.separator + f.getName
          val out = new BufferedWriter(new FileWriter(outFile))
          for (line <- io.Source.fromFile(f).getLines) {
            var remappedLine = dsmap(line)
            remappedLine = remappedLine.replaceAll("ppl.delite.framework.datastruct", "generated")
            out.write(remappedLine + System.getProperty("line.separator"))
          }
          out.close()
        }
      }
    }
  }
} 
开发者ID:leratojeffrey,项目名称:OptiSDR-Compiler,代码行数:32,代码来源:Utils.scala

示例2: FileReaderTest4SourceAndClosing2

//设置package包名称以及导入依赖的类
import java.io.{BufferedWriter, FileWriter}

import org.scalatest._

import scala.io.Source

class FileReaderTest4SourceAndClosing2 extends FlatSpec with Matchers {
  "Hello" should "have tests" in {

    implicit def toClosingSource(source: Source) = new {
      val lines = source.getLines

      def getLinesAndClose() = new Iterator[String] {
        def hasNext = if (!lines.hasNext) {source.close; false} else true
        def next = lines.next
      }
    }

    val w = new BufferedWriter(new FileWriter("/tmp/csv13.txt"))

    (9 to 12).map(i => s"""/tmp/csv${i}.txt""").foreach(fn => {
        Source.fromFile(fn).getLinesAndClose().foreach(ln => {
          w.write(ln)
          w.write("\r\n")
        })
      }
    )


  }
} 
开发者ID:ralreiroe,项目名称:embarcadero,代码行数:32,代码来源:FileReaderTest4SourceAndClosing2.scala

示例3: HTMLFile

//设置package包名称以及导入依赖的类
package ch.epfl.sbtplugin

import java.io.{BufferedWriter, File, FileWriter}

object HTMLFile {

  val content =
    """
      |<!DOCTYPE html>
      |<html>
      |<meta charset="UTF-8">
      |<title>Scala.js Call Graph Visualization</title>
      |<link rel="stylesheet" type="text/css" href="https://rawgit.com/lionelfleury/scala-js-call-graph/release/style.css">
      |<body>
      |<div id="header"><h1>Scala.js Call Graph Visualization</h1></div>
      |<div id="nav" style="overflow:auto"></div>
      |<div id="main" style="overflow:auto"></div>
      |<script type="text/javascript" src="https://rawgit.com/lionelfleury/scala-js-call-graph/release/scalajs-callgraph-opt.js"></script>
      |<script type="text/javascript" src="https://rawgit.com/lionelfleury/scala-js-call-graph/release/scalajs-callgraph-jsdeps.min.js"></script>
      |<script type="text/javascript" src="https://rawgit.com/lionelfleury/scala-js-call-graph/release/scalajs-callgraph-launcher.js"></script>
      |</body>
      |</html>
    """.stripMargin

  def writeToFile(file: File): Unit = {
    val bw = new BufferedWriter(new FileWriter(file))
    bw.write(content)
    bw.flush()
    bw.close()
  }

} 
开发者ID:lionelfleury,项目名称:scala-js-call-graph,代码行数:33,代码来源:HTMLFile.scala

示例4: CSVWriteBufOwnTest

//设置package包名称以及导入依赖的类
import java.io.{PrintWriter, BufferedWriter, FileWriter}

import com.github.tototoshi.csv.{CSVWriter, DefaultCSVFormat}
import org.scalatest._

class CSVWriteBufOwnTest extends FlatSpec with Matchers {
  "Hello" should "have tests" in {

    val bw = new PrintWriter(new BufferedWriter(new FileWriter("/tmp/csv2.txt"), 8192*256*4))

    lazy val doit = {
      println("working")
      (1 to 1000000).foreach(i => {
        bw.print(List(1, 2, 3, 4, 5, 6).mkString(","))
        bw.print("\r\n")
      })
      println("finished")
    }

    def timeSpentDoing(f: => Unit) = {

      val start = System.currentTimeMillis
      println(start)
      f
      println(System.currentTimeMillis - start)
    }

    timeSpentDoing(doit)

  }
} 
开发者ID:ralreiroe,项目名称:embarcadero,代码行数:32,代码来源:CSVWriteBufOwnTest.scala

示例5: CSVWriteBufOwnParallelTest

//设置package包名称以及导入依赖的类
import java.io.{BufferedWriter, FileWriter, PrintWriter}
import java.util.concurrent.Executors

import org.scalatest._

import scala.concurrent.{ExecutionContext, Await, Future}
import scala.concurrent.duration.Duration
import scala.util.{Success, Failure}

class CSVWriteBufOwnParallelTest extends FlatSpec with Matchers {
  "Hello" should "have tests" in {


    lazy val doit: Int => Unit = i => {
      val bw = new PrintWriter(new BufferedWriter(new FileWriter(s"""/tmp/csv${i}.txt"""), 8192 * 256 * 4))
      (1 to 10000000).foreach(i => {
        bw.print(List(1, 2, 3, 4, 5, 6).mkString(","))
        bw.print("\r\n")
      })
      bw.close()
    }

    def timeSpentDoing(f: => Unit) = {
      val start = System.currentTimeMillis
      println(start)
      f
      System.currentTimeMillis - start
    }


    implicit val ec = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(5))

    val futures = (9 to 12).map(n => Future{ timeSpentDoing( doit(n) ) })
    val combined = Future.sequence(futures)
    Await.ready(combined, Duration.Inf)
    combined onComplete {
      case Success(duration) => println(duration)
      case Failure(ex) => println(ex)
    }




  }
} 
开发者ID:ralreiroe,项目名称:embarcadero,代码行数:46,代码来源:CSVWriteBufOwnParallelTest.scala

示例6: FileReaderTest3Iterator

//设置package包名称以及导入依赖的类
import java.io.{BufferedWriter, BufferedReader, FileReader, FileWriter}

import org.scalatest._

class FileReaderTest3Iterator extends FlatSpec with Matchers {
  "Hello" should "have tests" in {


    def getContents(fileName: String): Iterator[String] = {
      val fr = new BufferedReader(new FileReader(fileName))
      def iterator = new Iterator[String] {
        def hasNext = line != null

        def next = {
          val retVal = line
          line = getLine
          retVal
        }

        def getLine = {
          var line: String = null
          try {
            line = fr.readLine
          } catch {
            case _: Throwable => line = null; fr.close()
          }
          line
        }

        var line = getLine
      }
      iterator
    }

    val w = new BufferedWriter(new FileWriter("/tmp/csv4.txt"))

    Seq("/tmp/csv.txt", "/tmp/csv2.txt").foreach(fn => {
      getContents(fn).foreach(ln => {
        w.write(ln)
        w.write("\r\n")
      })
    }
    )


  }
} 
开发者ID:ralreiroe,项目名称:embarcadero,代码行数:48,代码来源:FileReaderTest3Iterator.scala

示例7: HanaActivatorTester

//设置package包名称以及导入依赖的类
import java.io.{BufferedWriter, File, FileWriter}
import java.nio.file.{Files, Paths}


object HanaActivatorTester {
  private val extensions = List("xsapp",
    "xsprivileges",
    "hdbstructure",
    "hdbprocedure",
    "xsjslib",
    "html",
    "hdbrole",
    "xsaccess")

  def main(args: Array[String]): Unit = {
    val workPath = Paths.get(".").toAbsolutePath.normalize.toString
    val filename = "dummy"
    extensions.foreach {
      e =>
        val helloPath = workPath + File.separator + filename + "." + e
        val bw = new BufferedWriter(new FileWriter(new File(helloPath), true))
        bw.write(" ")
        bw.close()
    }
    val workDir = new File(workPath)
    val orderedExtension = workDir.listFiles.toList.sorted(new HanaExtensionComparator)
    extensions.foreach { e =>
      val helloPath = workPath + File.separator + filename + "." + e
      Files.deleteIfExists(Paths.get(helloPath))
    }
  }
} 
开发者ID:janosbinder,项目名称:hana-activator,代码行数:33,代码来源:HanaActivatorTester.scala

示例8: LogAnalyzerExportSmallData

//设置package包名称以及导入依赖的类
package com.databricks.apps.logs.chapter3

import java.io.{BufferedWriter, FileWriter, PrintWriter}

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import com.databricks.apps.logs.{ApacheAccessLog, LogAnalyzerRDD}

/**
  * LogAnalyzerExportSmallData shows how to export data of small size to a file.
  *
  * Example command to run:
  * %  ${YOUR_SPARK_HOME}/bin/spark-submit
  * --class "com.databricks.apps.logs.chapter3.LogAnalyzerExportSmallData"
  * --master spark://YOUR_SPARK_MASTER
  * target/scala-2.11/spark-logs-analyzer_2.11-2.0.jar
  * ../../data/apache.access.log output.log
  */
object LogAnalyzerExportSmallData extends App {
  // Initialize SparkSession instance.
  val spark = SparkSession
    .builder()
    .appName("Log Analyzer SQL in Scala")
    .getOrCreate()

  val logFile = args(0)

  val accessLogs: RDD[ApacheAccessLog] = spark
    .sparkContext
    .textFile(logFile)
    .map(ApacheAccessLog.parseLogLine)

  val logAnalyzerRDD = LogAnalyzerRDD(spark)
  val logStatistics = logAnalyzerRDD.processRdd(accessLogs)

  val outputFile = args(1)

  val out = new PrintWriter(new BufferedWriter(new FileWriter(outputFile)))
  val contentSizeStats = logStatistics.contentSizeStats
  out.println("Content Size Avg: %s, Min: %s, Max: %s"
    .format(contentSizeStats._1 / contentSizeStats._2,
      contentSizeStats._3,
      contentSizeStats._4))
  out.println(s"""Response code counts: ${logStatistics.responseCodeToCount.mkString("[", ",", "]")}""")
  out.println(s"""IPAddresses > 10 times: ${logStatistics.ipAddresses.mkString("[", ",", "]")}""")
  out.println(s"""Top Endpoints: ${logStatistics.topEndpoints.mkString("[", ",", "]")}""")
  out.close()

  spark.stop()
} 
开发者ID:krish121,项目名称:Spark-reference-applications,代码行数:52,代码来源:LogAnalyzerExportSmallData.scala

示例9: VideoManager

//设置package包名称以及导入依赖的类
package fgc.formatter

import java.io.BufferedWriter
import java.io.File
import java.io.FileWriter

import fgc.model.VideoData
import fgc.parser.YouTubeChannelParser
import fgc.normalizer.Normalizer
import fgc.logger.Logger

object VideoManager {
    private val DATA_FILE_PATH = "../data/formatted/video.json"

    def toFile(videoDatas: List[VideoData]): String = {
        val sortedVideos = (
            videoDatas
            .sortBy(r => (r.timestamp, r.id)).reverse
            .map(_.json)
        )
        val serialized = "[\n" + sortedVideos.mkString(",\n") + "\n]"
        val file = new File(DATA_FILE_PATH)
        val bw = new BufferedWriter(new FileWriter(file))
        bw.write(serialized)
        bw.close
        serialized
    }

    def loadVideos(): List[VideoData] = {
        (
            YouTubeChannelParser.Parsers
            .map(p => p.loadVideos)
            .flatten
            .map(_.trim.fixCharacters)
        )
    }

    def formatVideos(rawVideos: List[VideoData]): List[VideoData] = {
        Normalizer.normalize(rawVideos)
    }
}

object Formatter {
    def run(): Boolean = {
        println("running formatter")
        println("parsing videos")
        val videos = VideoManager.loadVideos
        println("normalizing videos")
        val formatted = VideoManager.formatVideos(videos)
        VideoManager.toFile(formatted)
        Logger.logParsing
        (videos != formatted)
    }

    def main(args: Array[String]) {
        println(run)
    }
} 
开发者ID:mpaulweeks,项目名称:fgc-video,代码行数:59,代码来源:formatter.scala

示例10: Logger

//设置package包名称以及导入依赖的类
package fgc.logger

import java.io.BufferedWriter
import java.io.File
import java.io.FileWriter
import scala.collection.mutable

object Logger {
    private var parsingLogs = mutable.Map[String, mutable.ListBuffer[String]]()

    def parsingFailure(channelName: String, videoTitle: String): Unit = {
        var channelLog = parsingLogs.getOrElseUpdate(channelName, new mutable.ListBuffer)
        channelLog += s"$videoTitle"
    }

    def logParsing(): Unit = {
        parsingLogs.foreach { case (channelName, logs ) =>
            val file = new File(s"logs/parse-$channelName.txt")
            val bw = new BufferedWriter(new FileWriter(file))
            logs.foreach { line =>
                bw.write(line + "\n")
            }
            bw.close
        }
        parsingLogs = mutable.Map()
    }
} 
开发者ID:mpaulweeks,项目名称:fgc-video,代码行数:28,代码来源:logger.scala

示例11: withWriter

//设置package包名称以及导入依赖的类
package tutor.utils

import java.io.{BufferedWriter, File, FileWriter, Writer}

trait WriteSupport {

  def withWriter(path: String)(f: Writer => Unit): Unit ={
    var writer: Writer = null
    try {
      val file = new File(path)
      if (!file.exists()) file.createNewFile()
      writer = new BufferedWriter(new FileWriter(file))
      f(writer)
      writer.flush()
    } finally {
      if (writer != null) writer.close()
    }
  }
} 
开发者ID:notyy,项目名称:CodeAnalyzerTutorial,代码行数:20,代码来源:WriteSupport.scala

示例12: SessionDataFileWriter

//设置package包名称以及导入依赖的类
package com.malaska.spark.training.streaming.dstream.sessionization

import java.io.BufferedWriter
import java.io.FileWriter

object SessionDataFileWriter {
  
  val eol = System.getProperty("line.separator");  
  
  def main(args: Array[String]) {
    if (args.length == 0) {
        println("SessionDataFileWriter {numberOfRecords} {outputFile} ");
        return;
    }
    
    val writer = new BufferedWriter(new FileWriter(args(1)))
    val loops = args(0).toInt
    
    for (i <- 1 to loops) {
      writer.write(SessionDataGenerator.getNextEvent + eol)
    }
    
    writer.close
  }
} 
开发者ID:TedBear42,项目名称:spark_training,代码行数:26,代码来源:SessionDataFileWriter.scala

示例13: SessionDataFileHDFSWriter

//设置package包名称以及导入依赖的类
package com.malaska.spark.training.streaming.dstream.sessionization

import java.io.BufferedWriter
import java.io.FileWriter
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.conf.Configuration
import java.io.OutputStreamWriter
import org.apache.hadoop.fs.Path
import java.util.Random

object SessionDataFileHDFSWriter {
  
  val eol = System.getProperty("line.separator");  
  
  def main(args: Array[String]) {
    if (args.length == 0) {
        println("SessionDataFileWriter {tempDir} {distDir} {numberOfFiles} {numberOfEventsPerFile} {waitBetweenFiles}");
        return;
    }
    val conf = new Configuration
    conf.addResource(new Path("/etc/hadoop/conf/core-site.xml"))
    conf.addResource(new Path("/etc/hadoop/conf/mapred-site.xml"))
    conf.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml"))
    
    val fs = FileSystem.get(new Configuration)
    val rootTempDir = args(0)
    val rootDistDir = args(1)
    val files = args(2).toInt
    val loops = args(3).toInt
    val waitBetweenFiles = args(4).toInt
    val r = new Random
    for (f <- 1 to files) {
      val rootName = "/weblog." + System.currentTimeMillis()
      val tmpPath = new Path(rootTempDir + rootName + ".tmp")
      val writer = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath)))
      
      print(f + ": [")
      
      val randomLoops = loops + r.nextInt(loops)
      
      for (i <- 1 to randomLoops) {
        writer.write(SessionDataGenerator.getNextEvent + eol)
        if (i%100 == 0) {
          print(".")
        }
      }
      println("]")
      writer.close
      
      val distPath = new Path(rootDistDir + rootName + ".dat")
      
      fs.rename(tmpPath, distPath)
      Thread.sleep(waitBetweenFiles)
    }
    println("Done")
  }
} 
开发者ID:TedBear42,项目名称:spark_training,代码行数:58,代码来源:SessionDataFileHDFSWriter.scala

示例14: name

//设置package包名称以及导入依赖的类
package mjis

import java.io.BufferedWriter


trait Phase[+O] {
  def name: String = getClass.getSimpleName.toLowerCase
  protected def getResult(): O
  def dumpResult(writer: BufferedWriter): Unit = {}
  lazy val result: O = getResult()
  def forceResult(): Unit = result
  def findings: List[Finding] = List()
}

trait AnalysisPhase[O] extends Phase[O] {
  def success: Boolean = findings.forall(_.severity != Severity.ERROR)
} 
开发者ID:jspam,项目名称:mjis,代码行数:18,代码来源:Phase.scala

示例15: GccRunner

//设置package包名称以及导入依赖的类
package mjis

import java.io.{InputStreamReader, BufferedReader, BufferedWriter}

import scala.collection.mutable.ListBuffer

class GccRunner(a: Unit, config: Config) extends Phase[Unit] {
  override protected def getResult(): Unit = {
    val gcc = Runtime.getRuntime.exec(s"gcc -m64 -Wl,-e,main -nostdlib -o ${config.outFile} ${config.asmOutFile}")
    val stderr = new BufferedReader(new InputStreamReader(gcc.getErrorStream))
    gcc.waitFor()
    val stream = Stream.continually(stderr.readLine()).takeWhile(_ != null)
    if (gcc.exitValue() != 0 || stream.nonEmpty) {
      _findings += new Finding() {
        override def pos: Position = Position.NoPosition
        override def msg: String = s"GCC returned exit status ${gcc.exitValue}\n${stream.mkString("\n")}"
        override def severity: Severity = Severity.ERROR
      }
    }
  }

  val _findings = ListBuffer[Finding]()

  override def findings: List[Finding] = _findings.toList

  override def dumpResult(writer: BufferedWriter): Unit = {}
} 
开发者ID:jspam,项目名称:mjis,代码行数:28,代码来源:GccRunner.scala


注:本文中的java.io.BufferedWriter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。