本文整理汇总了Scala中org.slf4j.Logger类的典型用法代码示例。如果您正苦于以下问题:Scala Logger类的具体用法?Scala Logger怎么用?Scala Logger使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Logger类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: HDFS
//设置package包名称以及导入依赖的类
package org.mireynol.util
import java.io.{BufferedInputStream, OutputStreamWriter}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable.ListBuffer
import scala.io.Source
object HDFS {
def log : Logger = LoggerFactory.getLogger( HDFS.getClass )
val hadoop : FileSystem = {
val conf = new Configuration( )
conf.set( "fs.defaultFS", "hdfs://localhost:9000" )
FileSystem.get( conf )
}
def readAndMap( path : String, mapper : ( String ) => Unit ) = {
if ( hadoop.exists( new Path( path ) ) ) {
val is = new BufferedInputStream( hadoop.open( new Path( path ) ) )
Source.fromInputStream( is ).getLines( ).foreach( mapper )
}
else {
// TODO - error logic here
}
}
def write( filename : String, content : Iterator[ String ] ) = {
val path = new Path( filename )
val out = new OutputStreamWriter( hadoop.create( path, false ) )
content.foreach( str => out.write( str + "\n" ) )
out.flush( )
out.close( )
}
def ls( path : String ) : List[ String ] = {
val files = hadoop.listFiles( new Path( path ), false )
val filenames = ListBuffer[ String ]( )
while ( files.hasNext ) filenames += files.next( ).getPath( ).toString( )
filenames.toList
}
def rm( path : String, recursive : Boolean ) : Unit = {
if ( hadoop.exists( new Path( path ) ) ) {
println( "deleting file : " + path )
hadoop.delete( new Path( path ), recursive )
}
else {
println( "File/Directory" + path + " does not exist" )
log.warn( "File/Directory" + path + " does not exist" )
}
}
def cat( path : String ) = Source.fromInputStream( hadoop.open( new Path( path ) ) ).getLines( ).foreach( println )
}
示例2: DataUtilities
//设置package包名称以及导入依赖的类
package org.dl4scala.examples.utilities
import java.io._
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream
import org.slf4j.{Logger, LoggerFactory}
object DataUtilities {
val logger: Logger = LoggerFactory.getLogger(DataUtilities.getClass)
private val BUFFER_SIZE = 4096
@throws(classOf[IOException])
def extractTarGz(filePath: String, outputPath: String): Unit = {
var fileCount = 0
var dirCount = 0
logger.info("Extracting files")
val tais = new TarArchiveInputStream(new GzipCompressorInputStream(
new BufferedInputStream(new FileInputStream(filePath))))
// Read the tar entries using the getNextEntry method
Stream.continually(tais.getNextTarEntry).takeWhile(_ !=null).foreach{ entry =>
// Create directories as required
if (entry.isDirectory) {
new File(outputPath + "/" + entry.getName).mkdirs
dirCount += 1
} else {
val data = new Array[Byte](BUFFER_SIZE)
val fos = new FileOutputStream(outputPath + "/" + entry.getName)
val dest = new BufferedOutputStream(fos, BUFFER_SIZE)
Stream.continually(tais.read(data, 0, BUFFER_SIZE)).takeWhile(_ != -1).foreach{ count =>
dest.write(data, 0, count)
}
dest.close()
fileCount = fileCount + 1
}
if (fileCount % 1000 == 0) logger.info(".")
}
tais.close()
}
}
示例3: EstonianTotalFinderTest
//设置package包名称以及导入依赖的类
package org.pdfextractor.algorithm.finder.et
import org.pdfextractor.algorithm.candidate.{IsDouble, HasEuroSign}
import org.pdfextractor.algorithm.finder.{AbstractFinderTest, AbstractInvoiceFileReader}
import org.pdfextractor.algorithm.io._
import org.slf4j.{Logger, LoggerFactory}
import org.springframework.beans.factory.annotation.Autowired
import org.pdfextractor.algorithm.parser.PDFFileParser
import org.pdfextractor.algorithm.phrase.PhraseTypesStore
class EstonianTotalFinderTest extends AbstractFinderTest {
val log: Logger = LoggerFactory.getLogger(classOf[PhraseTypesStore])
@Autowired var estonianTotalFinder: EstonianTotalFinder = _
"Estonian total finder" should "find from real invoice and have additional info present" in {
val inputStream = getInputStreamFromFile(AbstractInvoiceFileReader.Starman)
val parseResult = PDFFileParser.parse(inputStream)
val candidates = estonianTotalFinder.findCandidates(parseResult)
assert(candidates.nonEmpty)
val firstCandidate = candidates.head
assert(Option(firstCandidate.value).isDefined)
assert(Option(firstCandidate.x).isDefined)
assert(Option(firstCandidate.y).isDefined)
assert(firstCandidate.value == 16.87d)
assert(35 == firstCandidate.x)
assert(414 == firstCandidate.y)
assert(firstCandidate.properties.get(IsDouble).get.asInstanceOf[Boolean])
assert(!firstCandidate.properties.get(HasEuroSign).get.asInstanceOf[Boolean])
}
}
示例4: BigQueryPartitionUtils
//设置package包名称以及导入依赖的类
package com.appsflyer.spark.utils
import com.appsflyer.spark.bigquery.BigQueryServiceFactory
import com.appsflyer.spark.bigquery.streaming.BigQueryStreamWriter
import com.google.api.client.googleapis.json.GoogleJsonResponseException
import com.google.api.services.bigquery.Bigquery
import com.google.api.services.bigquery.model.{Table, TableReference, TimePartitioning}
import org.slf4j.{Logger, LoggerFactory}
import scala.util.control.NonFatal
object BigQueryPartitionUtils {
private val logger: Logger = LoggerFactory.getLogger(classOf[BigQueryStreamWriter])
val DEFAULT_TABLE_EXPIRATION_MS = 259200000L
val bqService = BigQueryServiceFactory.getService
def createBigQueryPartitionedTable(targetTable: TableReference): Any = {
val datasetId = targetTable.getDatasetId
val projectId: String = targetTable.getProjectId
val tableName = targetTable.getTableId
try {
logger.info("Creating Time Partitioned Table")
val table = new Table()
table.setTableReference(targetTable)
val timePartitioning = new TimePartitioning()
timePartitioning.setType("DAY")
timePartitioning.setExpirationMs(DEFAULT_TABLE_EXPIRATION_MS)
table.setTimePartitioning(timePartitioning)
val request = bqService.tables().insert(projectId, datasetId, table)
val response = request.execute()
} catch {
case e: GoogleJsonResponseException if e.getStatusCode == 409 =>
logger.info(s"$projectId:$datasetId.$tableName already exists")
case NonFatal(e) => throw e
}
}
}
示例5: CollectTransformer
//设置package包名称以及导入依赖的类
package transformation
import org.apache.kafka.streams.KeyValue
import org.apache.kafka.streams.kstream.Transformer
import org.apache.kafka.streams.processor.ProcessorContext
import org.apache.kafka.streams.state.KeyValueStore
import org.slf4j.{Logger, LoggerFactory}
abstract class CollectTransformer[INPUT_K, INPUT_V, STORE_V, OUTPUT_K, OUTPUT_V](storeName: String)
extends Transformer[INPUT_K, INPUT_V, KeyValue[OUTPUT_K, OUTPUT_V]] {
val log: Logger = LoggerFactory.getLogger(this.getClass)
var ctx: ProcessorContext = _
var store: KeyValueStore[INPUT_K, STORE_V] = _
override def init(context: ProcessorContext): Unit = {
log.debug(s"Init ...")
ctx = context
store = ctx.getStateStore(storeName).asInstanceOf[KeyValueStore[INPUT_K, STORE_V]]
ctx.schedule(100)
}
override def punctuate(timestamp: Long): KeyValue[OUTPUT_K, OUTPUT_V] = {
log.debug(s"Punctuating ...")
null
}
override def transform(key: INPUT_K, value: INPUT_V): KeyValue[OUTPUT_K, OUTPUT_V] = {
log.debug(s"Transforming event : $value")
val currentStoreValue = store.get(key)
if (currentStoreValue != null && collectComplete(currentStoreValue, value)) {
collectOutput(key, currentStoreValue, value)
} else {
store.put(key, appendToStore(currentStoreValue, value))
null
}
}
def appendToStore(storeValue: STORE_V, appendValue: INPUT_V): STORE_V
def collectComplete(storeValue: STORE_V, appendValue: INPUT_V): Boolean
def collectOutput(inputKey: INPUT_K, storeValue: STORE_V, mergeValue: INPUT_V): KeyValue[OUTPUT_K, OUTPUT_V]
override def close(): Unit = {
log.debug(s"Close ...")
}
}
示例6: ServiceNameServiceLocator
//设置package包名称以及导入依赖的类
package org.wex.cmsfs.lagom.service.discovery.name
import java.net.URI
import com.lightbend.lagom.internal.client.CircuitBreakers
import com.lightbend.lagom.scaladsl.api.Descriptor.Call
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.client.{CircuitBreakerComponents, CircuitBreakingServiceLocator}
import org.slf4j.{Logger, LoggerFactory}
import play.api.Configuration
import scala.concurrent.{ExecutionContext, Future}
trait ServiceNameServiceLocatorComponents extends CircuitBreakerComponents {
lazy val serviceLocator: ServiceLocator = new ServiceNameServiceLocator(configuration, circuitBreakers)(executionContext)
}
class ServiceNameServiceLocator(configuration: Configuration, circuitBreakers: CircuitBreakers)(implicit ec: ExecutionContext)
extends CircuitBreakingServiceLocator(circuitBreakers) {
private final val logger: Logger = LoggerFactory.getLogger(this.getClass)
private def getServicesByName(name: String): Option[URI] = {
Some(URI.create(s"http://${name}.cmsfs.org:9000"))
}
override def locate(name: String, serviceCall: Call[_, _]) = {
val uriOpt = getServicesByName(name)
logger.info(uriOpt.toString + " request............")
Future.successful(uriOpt)
}
}
示例7: ConsulServiceLocator
//设置package包名称以及导入依赖的类
package org.wex.cmsfs.lagom.service.discovery.consul
import java.net.URI
import com.lightbend.lagom.internal.client.CircuitBreakers
import com.lightbend.lagom.scaladsl.api.Descriptor.Call
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.client.{CircuitBreakerComponents, CircuitBreakingServiceLocator}
import org.slf4j.{Logger, LoggerFactory}
import play.api.Configuration
import scala.concurrent.{ExecutionContext, Future}
trait ConsulServiceLocatorComponents extends CircuitBreakerComponents {
lazy val serviceLocator: ServiceLocator = new ConsulServiceLocator(configuration, circuitBreakers)(executionContext)
}
class ConsulServiceLocator(configuration: Configuration, circuitBreakers: CircuitBreakers)(implicit ec: ExecutionContext)
extends CircuitBreakingServiceLocator(circuitBreakers) {
private val logger: Logger = LoggerFactory.getLogger(this.getClass)
private val consulServiceExtract = new ConsulServiceExtract(configuration)
override def locate(name: String, serviceCall: Call[_, _]): Future[Option[URI]] = Future {
logger.debug(s"request Service Name: ${name}.")
consulServiceExtract.getService(name)
}
}
示例8: decider
//设置package包名称以及导入依赖的类
package org.wex.cmsfs.common.core
import akka.stream.{ActorAttributes, Supervision}
import org.slf4j.Logger
trait CmsfsAkkaStream {
val logger: Logger
private def decider(f: (String) => String): Supervision.Decider = {
case ex: Exception =>
logger.error(f(ex.getMessage))
Supervision.Resume
}
def supervisionStrategy(f: (String) => String) = {
ActorAttributes.supervisionStrategy(decider(f))
}
def loggerFlow[T](elem: T, mess: String): T = {
logger.info(mess)
elem
}
}
示例9: ParquetUtils
//设置package包名称以及导入依赖的类
package com.scalagen.util
import scala.collection.JavaConverters._
import com.scalagen.data._
import com.scalagen.data.api.Source
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.parquet.hadoop.ParquetFileReader
import org.apache.parquet.schema.OriginalType._
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName._
import org.apache.parquet.schema._
import org.slf4j.{Logger, LoggerFactory}
object ParquetUtils {
private val logger: Logger = LoggerFactory.getLogger(getClass)
private[scalagen] def makeSchema(s: String, sources: Seq[Source[_, _]], headers: Seq[String]): MessageType = {
logger.debug(s"Making schema for ${sources.mkString(", ")}")
val sourceTypes: Seq[Type] = sources.zip(headers).map {
case (s: Source[_, _], n: String) => sourceToParquetType(s, n)
case _ => throw new IllegalArgumentException("Bad input for parquet source types.")
}
new MessageType(s, sourceTypes: _*)
}
private[scalagen] def sourceToParquetType(s: Source[_, _], columnName: String): Type = {
s match {
case _: GaussianSource | _: RandomDouble => Types.required(DOUBLE).named(columnName)
case _: IncrementingSource | _: DeincrementingSource | _: RandomInt => Types.required(INT32).named(columnName)
case _: DateSource => Types.required(BINARY).as(UTF8).named(columnName)
case _: BernoulliSource => Types.required(BOOLEAN).named(columnName)
case _ => Types.required(BINARY).as(UTF8).named(columnName)
}
}
def parquetRowCount(s: String): Long = {
parquetRowCount(new Path(s))
}
def parquetRowCount(p: Path, conf: Configuration = new Configuration()): Long = {
val fs: FileSystem = p.getFileSystem(conf)
val status: FileStatus = fs.getFileStatus(p)
ParquetFileReader.readFooters(conf, status, false).asScala.head.getParquetMetadata.getBlocks.asScala.map(_.getRowCount).sum
}
}
示例10: ShellNode
//设置package包名称以及导入依赖的类
package chen.guo.dagexe.config
import chen.guo.dagexe.util.MessageBuilder
import org.slf4j.{Logger, LoggerFactory}
case class ShellNode(commands: String*) extends DirectedExecutableItem {
private val logger: Logger = LoggerFactory.getLogger(this.getClass)
override def execute(): Int = {
logger.info(s"Start executing: ${this}")
val exitCode = commands.toSeq.foldLeft(0)((lastExit, cmd) =>
lastExit match {
case 0 =>
logger.info("Executing: " + cmd)
val cmdExitCode = if (cmd.trim == "") 0
else Runtime.getRuntime.exec(Array("/bin/bash", "-c", cmd)).waitFor
logger.info(s"${this} finishes cmd `$cmd` with code $cmdExitCode")
cmdExitCode
case x => x
})
if (exitCode == 0)
logger.info(MessageBuilder.build(
s"${getClass.getName} finishes with code 0", s"Current node is ${this}"))
else
logger.info(MessageBuilder.build(
s"${getClass.getName} finishes with code $exitCode", s"Current node is ${this}"))
exitCode
}
}
case class SleepNode(id: String, sleepTimeMillis: String) extends DirectedExecutableItem {
private val logger: Logger = LoggerFactory.getLogger(this.getClass)
val sleepTime = sleepTimeMillis.toLong
override def execute(): Int = {
logger.info(MessageBuilder.build(
s"Start executing '$id' at ${Thread.currentThread().getName}",
s"Sleeping $sleepTime milli-seconds."))
Thread.sleep(sleepTime)
0
}
}
示例11: DAG
//设置package包名称以及导入依赖的类
package chen.guo.dagexe.config
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.mutable
class DAG extends DirectedExecutableItem {
private val logger: Logger = LoggerFactory.getLogger(this.getClass)
private val allExecutables = mutable.HashSet[DirectedExecutableItem]()
def addEdge(from: DirectedExecutableItem, to: DirectedExecutableItem): Unit = {
allExecutables += from
allExecutables += to
from.addChild(to)
to.addParent(from)
}
def addEdges(froms: List[DirectedExecutableItem], merge: DirectedExecutableItem) =
froms.foreach(addEdge(_, merge))
def addEdges(from: DirectedExecutableItem, forks: List[DirectedExecutableItem]) =
forks.foreach(addEdge(from, _))
override def execute(): Int = {
checkForCycles()
0
}
private def checkForCycles() = {
val unprocessed = new mutable.HashSet[DirectedExecutableItem]()
unprocessed ++= allExecutables
val inBounds = mutable.Map(allExecutables.map(e => e -> e.getParents.size).toSeq: _*)
var startables = new mutable.Queue[DirectedExecutableItem]()
startables ++= inBounds.filter(_._2 == 0).keys
while (startables.nonEmpty) {
val job = startables.dequeue()
unprocessed -= job
for (child <- job.getChildren) {
val decreased: Int = inBounds(child) - 1
if (decreased == 0)
startables.enqueue(child)
inBounds(child) = decreased
}
}
if (unprocessed.nonEmpty) {
logger.error(s"Detected cycles in your graph definition around ${unprocessed.mkString("[", ",", "]")}")
sys.exit(1)
}
}
}
示例12: SleepWriteNode
//设置package包名称以及导入依赖的类
package chen.guo.ittests
import java.io._
import java.util.concurrent._
import chen.guo.dagexe.config.SleepNode
import chen.guo.dagexe.util.MessageBuilder
import org.slf4j.{Logger, LoggerFactory}
import scala.util.Try
class SleepWriteNode(id: String, sleepTimeMillis: String, outputFilePath: String) extends SleepNode(id, sleepTimeMillis) {
private val logger: Logger = LoggerFactory.getLogger(this.getClass)
override def execute(): Int = {
Try {
logger.info(MessageBuilder.build(
s"Start executing '$id' at ${Thread.currentThread().getName}",
s"Sleeping $sleepTime milli-seconds."))
Thread.sleep(sleepTime)
val outputFile = new File(outputFilePath)
if (!outputFile.exists()) {
outputFile.createNewFile()
}
//Write to the queue immediately to keep the order as much as possible
SleepWriteNode.buffer.put(id + System.lineSeparator)
SleepWriteNode.lock.synchronized {
val fw = new FileWriter(outputFile.getAbsoluteFile, true)
val bw = new BufferedWriter(fw)
bw.write(SleepWriteNode.buffer.take())
bw.close()
}
0
}.getOrElse(1)
}
}
object SleepWriteNode {
val buffer = new LinkedBlockingQueue[String]()
val lock = this
}
示例13: SparkCassOutputHandler
//设置package包名称以及导入依赖的类
package com.github.jparkie.spark.cassandra
import org.apache.cassandra.utils.OutputHandler
import org.slf4j.Logger
class SparkCassOutputHandler(log: Logger) extends OutputHandler {
override def warn(msg: String): Unit = {
log.warn(msg)
}
override def warn(msg: String, th: Throwable): Unit = {
log.warn(msg, th)
}
override def debug(msg: String): Unit = {
log.debug(msg)
}
override def output(msg: String): Unit = {
log.info(msg)
}
}
示例14: ItalianInvoiceIDFinderTest
//设置package包名称以及导入依赖的类
package org.pdfextractor.algorithm.finder.it
import org.pdfextractor.algorithm.finder.AbstractFinderTest
import org.slf4j.{Logger, LoggerFactory}
import org.springframework.beans.factory.annotation.Autowired
import org.pdfextractor.algorithm.phrase.PhraseTypesStore
class ItalianInvoiceIDFinderTest extends AbstractFinderTest {
val log: Logger = LoggerFactory.getLogger(classOf[PhraseTypesStore])
@Autowired var italianInvoiceIDFinder: ItalianInvoiceIDFinder = _
"Italian invoice ID finder" should "parse" in {
val idText = "Numero fattura: 3816442625428252-20"
val parsed = italianInvoiceIDFinder.parseValue(idText).asInstanceOf[String]
assert("3816442625428252-20" == parsed)
}
"Italian invoice ID finder" should "find from start" in {
assert(italianInvoiceIDFinder.searchPattern.get.findFirstIn("Fattura n.6 del 23.02.2016").nonEmpty)
}
"Italian invoice ID finder" should "find from line" in {
assert(italianInvoiceIDFinder.getValuePattern.findFirstIn("Fattura n.6 del 23.02.2016").nonEmpty)
assert("Fattura n.6" == italianInvoiceIDFinder.getValuePattern.findFirstIn("Fattura n.6 del 23.02.2016").get)
assert("Fattura n.654343-3s" == italianInvoiceIDFinder.getValuePattern.findFirstIn("Fattura n.654343-3s del 23.02.2016").get)
assert("654343-3s" == italianInvoiceIDFinder.StartR.replaceFirstIn("Fattura n.654343-3s", ""))
}
}
示例15: EstonianInvoiceIDFinderTest
//设置package包名称以及导入依赖的类
package org.pdfextractor.algorithm.finder.et
import org.pdfextractor.algorithm.candidate.Candidate
import org.pdfextractor.algorithm.finder.{AbstractFinderTest, AbstractInvoiceFileReader}
import org.pdfextractor.algorithm.io._
import org.slf4j.{Logger, LoggerFactory}
import org.springframework.beans.factory.annotation.Autowired
import org.pdfextractor.algorithm.parser.{PDFFileParser, ParseResult, Phrase}
import org.pdfextractor.algorithm.phrase.PhraseTypesStore
import scala.collection.LinearSeq
class EstonianInvoiceIDFinderTest extends AbstractFinderTest {
val log: Logger = LoggerFactory.getLogger(classOf[PhraseTypesStore])
@Autowired var estonianInvoiceIDFinder: EstonianInvoiceIDFinder = _
"Estonian invoice ID finder" should "find from phrase" in {
val invoiceAsString = getStringFromFile("EestiEnergia.txt")
val phrase: Phrase = new Phrase(1, 1, 1, 1, 1, invoiceAsString, false)
val phrases: LinearSeq[Phrase] = LinearSeq(phrase)
val parseResult: ParseResult = new ParseResult("", phrases)
val candidates: Seq[Candidate] = estonianInvoiceIDFinder.findCandidates(parseResult)
assert(candidates.nonEmpty)
val foundValues: Seq[String] = candidates.map(_.getValue.asInstanceOf[String])
assert(foundValues.head == "Arve nr 12345")
}
"Estonian invoice ID finder" should "find from real PDF" in {
val inputStream = getInputStreamFromFile(AbstractInvoiceFileReader.Starman)
val parseResult = PDFFileParser.parse(inputStream)
val candidates = estonianInvoiceIDFinder.findCandidates(parseResult)
assert(candidates.nonEmpty)
assert(candidates.size == 1)
val firstCandidate = candidates.head
assert(Option(firstCandidate.value).isDefined)
assert(Option(firstCandidate.x).isDefined)
assert(Option(firstCandidate.y).isDefined)
assert(firstCandidate.value == "Arve number A-123456")
assert(330 == firstCandidate.x)
assert(94 == firstCandidate.y)
}
}