本文整理汇总了Scala中org.slf4j.LoggerFactory类的典型用法代码示例。如果您正苦于以下问题:Scala LoggerFactory类的具体用法?Scala LoggerFactory怎么用?Scala LoggerFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LoggerFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: ApiSpec
//设置package包名称以及导入依赖的类
package au.csiro.data61.magda.registry
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.testkit.TestProbe
import ch.qos.logback.classic.{Level, Logger}
import org.flywaydb.core.Flyway
import org.scalatest.Matchers
import org.scalatest.fixture.FunSpec
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
import scalikejdbc._
abstract class ApiSpec extends FunSpec with ScalatestRouteTest with Matchers with Protocols with SprayJsonSupport {
case class FixtureParam(api: Api, webHookActorProbe: TestProbe)
val databaseUrl = Option(System.getenv("npm_package_config_databaseUrl")).getOrElse("jdbc:postgresql://localhost:5432/postgres")
// Stop Flyway from producing so much spam that Travis terminates the process.
LoggerFactory.getLogger("org.flywaydb").asInstanceOf[Logger].setLevel(Level.WARN)
val flyway = new Flyway()
flyway.setDataSource(databaseUrl, "postgres", "")
flyway.setSchemas("test")
flyway.setLocations("classpath:/sql")
override def testConfigSource =
s"""
|db.default.url = "${databaseUrl}?currentSchema=test"
|authorization.skip = true
|akka.loglevel = INFO
""".stripMargin
override def withFixture(test: OneArgTest) = {
val webHookActorProbe = TestProbe()
val api = new Api(webHookActorProbe.ref, testConfig, system, executor, materializer)
webHookActorProbe.expectMsg(1 millis, WebHookActor.Process)
DB localTx { implicit session =>
sql"DROP SCHEMA IF EXISTS test CASCADE".update.apply()
sql"CREATE SCHEMA test".update.apply()
}
flyway.migrate()
super.withFixture(test.toNoArgTest(FixtureParam(api, webHookActorProbe)))
}
}
示例2: HomeController
//设置package包名称以及导入依赖的类
package controllers
import javax.inject._
import org.slf4j.LoggerFactory
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.libs.json.{JsObject, JsValue, Json}
import play.api.mvc._
import play.modules.reactivemongo._
import play.modules.reactivemongo.json._
import play.modules.reactivemongo.json.collection.{JSONCollection, _}
import reactivemongo.api.Cursor
import scala.concurrent.Future
@Singleton
class HomeController @Inject() (val reactiveMongoApi: ReactiveMongoApi)
extends Controller with MongoController with ReactiveMongoComponents {
val logger = LoggerFactory.getLogger(this.getClass)
def collection: JSONCollection = db.collection[JSONCollection]("scrawler1")
def index = Action {
Ok(views.html.index(""))
}
def query = Action.async { request =>
val body = request.body
val query = body.asFormUrlEncoded.get("query")
val querySet = query.toSet[String]
val keywords = querySet.flatMap({ string: String =>
string.split(" ")
})
val searchQuery = Json.obj("keywords" -> Json.obj("$in" -> Json.toJson(keywords)))
logger.info(s"Internal query from client: $searchQuery")
val cursor: Cursor[JsObject] = collection.find(searchQuery).cursor[JsObject]
val result: Future[List[JsObject]] = cursor.collect[List]()
val resultJson: Future[JsValue] =
result.map { persons => Json.toJson(persons) }
resultJson.map { results =>
val title = results \\ "title"
val url = results \\ "url"
val description = results \\ "body"
val queryData: Seq[((JsValue, JsValue), JsValue)] = title.zip(url).zip(description)
Ok(views.html.result(queryData))
}
}
}
示例3: AutoFutureRunner
//设置package包名称以及导入依赖的类
package com.oradian.autofuture
import org.slf4j.LoggerFactory
import scalax.file._
object AutoFutureRunner extends App {
if (args.isEmpty) {
System.err.println("Usage: java -jar autofuture.jar [file1] [directory1] ... [directoryN]")
sys.exit(1)
}
val logger = LoggerFactory.getLogger("auto-future")
args foreach { arg =>
val path = Path.apply(arg.replace('\\', '/'), '/').toAbsolute
logger.info("Gathering sources from path: {}", path.path)
for (source <- (path ** "*.scala").par if source.isFile) {
logger.trace("Examining Scala source: {}", source.path)
AutoFuture(source.string) match {
case AutoFuture.Result.Success(body) =>
logger.debug("Auto-Futuring Scala source: {}", source.path)
source.write(body)
case AutoFuture.Result.Error(error) =>
logger.warn("Error while parsing Scala source: {}\n{}", source.path: Any, error: Any)
case AutoFuture.Result.Noop =>
logger.trace("Noop on: {}", source.path)
}
}
}
sys.exit(0)
}
示例4: WarmupZeppelin
//设置package包名称以及导入依赖的类
package biz.meetmatch.modules.util
import biz.meetmatch.modules.Module
import biz.meetmatch.util.Utils
import com.google.gson.Gson
import org.apache.spark.sql.SparkSession
import org.rogach.scallop.Scallop
import org.slf4j.LoggerFactory
import scalaj.http.Http
object WarmupZeppelin extends Module {
override def execute(scallopts: Scallop)(implicit sparkSession: SparkSession): Unit = {
val interpreterId = Utils.getConfig("zeppelin.interpreter.id")
logger.info(s"Restarting the spark interpreter using url http://localhost:8080/api/interpreter/setting/restart/$interpreterId...")
val interpreterResponse = Http(s"http://localhost:8080/api/interpreter/setting/restart/$interpreterId")
.method("put")
.timeout(connTimeoutMs = 10000, readTimeoutMs = 60000)
.asParamMap
logger.info("Response code: " + interpreterResponse.code)
if (interpreterResponse.code == 200) {
val bootstrapNotebookId = Utils.getConfig("zeppelin.bootstrapNotebookId.id")
logger.info(s"Executing the Bootstrap notebook using url http://localhost:8080/api/notebook/job/$bootstrapNotebookId...")
val notebookResponse = Http(s"http://localhost:8080/api/notebook/job/$bootstrapNotebookId")
.postForm
.timeout(connTimeoutMs = 10000, readTimeoutMs = 60000)
.asParamMap
logger.info("Response code: " + notebookResponse.code)
if (notebookResponse.code == 200) {
def stillRunningParagraphs(): Boolean = {
logger.info("Checking if the Bootstrap notebook has finished...")
val checkNotebookResponse = Http("http://localhost:8080/api/notebook/job/2BKFC4D5W")
.timeout(connTimeoutMs = 10000, readTimeoutMs = 120000)
.asString
logger.info("Response code: " + checkNotebookResponse.code)
val notebookJobResponse = new Gson().fromJson(checkNotebookResponse.body, classOf[GetNotebookJobResponse])
notebookJobResponse.body.exists(_.status != "FINISHED")
}
while (stillRunningParagraphs()) {
logger.info("Keep on polling...")
Thread.sleep(5000)
}
logger.info("The Bootstrap notebook has finished.")
}
}
}
case class GetNotebookJobResponse(status: String, body: Array[ParagraphStatus])
case class ParagraphStatus(id: String, started: String, finished: String, status: String)
override protected val logger = LoggerFactory.getLogger(this.getClass)
}
示例5: E2ESpec
//设置package包名称以及导入依赖的类
package com.hivehome.kafka.connect.sqs
import java.time.Instant
import org.scalatest.{FunSuite, Matchers}
import org.slf4j.LoggerFactory
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class E2ESpec extends FunSuite with Matchers with SQSSupport {
val logger = LoggerFactory.getLogger(getClass.getName)
private val KafkaTopic: String = "connect-test"
override val queueName = "test-sqs" // kafka connect should be setup with this SQS
queueUrl = sqs.getQueueUrl(queueName).getQueueUrl
private val props = Map(
"bootstrap.servers" -> sys.env.getOrElse("KAFKA", "localhost:9092"),
"schema.registry.url" -> sys.env.getOrElse("SCHEMA_REGISTRY", "http://localhost:8081"))
val consumer = KafkaAvroConsumer[String, String](props, topicName = KafkaTopic)
// Test is ignored because it does not run without dependent services
ignore("should route message SQS -> Kafka") {
Future {
// sleep is required so that the message to SQS
// is sent after the consumer is listening on the kafka topic
Thread.sleep(500)
logger.debug("sending message..")
sendMessage(Instant.now().toString)
logger.debug("sent message..")
}
val msgs = consumer.poll(1, accept = _ => true)
msgs should have size 1
}
}
示例6: ExecutionContextBackboneCoordinator
//设置package包名称以及导入依赖的类
package ie.zalando.pipeline.backbone.concurrent
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try
import scala.util.control.NonFatal
import org.slf4j.LoggerFactory
import cats.data.Xor
import ie.zalando.pipeline.backbone.Backbone
import ie.zalando.pipeline.backbone.Phases.{ LocalReleasePhase, TransformationPipelineFailure }
class ExecutionContextBackboneCoordinator[DA](backbone: Backbone[DA], executionContext: ExecutionContext) {
import ExecutionContextBackboneCoordinator._
val localInitPhases = backbone.initializeTopLevelContexts
def process(datum: DA): Future[Xor[TransformationPipelineFailure, DA]] = {
Future {
val (dataPhases, releasePhases) = backbone.initializeInLocalContext(-1, localInitPhases).unzip
try {
backbone.transformDatum(backbone.createStateMonad(dataPhases), datum)
} finally {
releasePhases.foreach((phase: LocalReleasePhase) => {
Try({ phase.releaseLocalResources() }).recover { case NonFatal(ex) => log.warn(s"Release phase $phase failed:", ex) }
})
}
}(executionContext)
}
}
object ExecutionContextBackboneCoordinator {
val log = LoggerFactory.getLogger(classOf[ExecutorServiceBackboneCoordinator[_]])
}
示例7: Word2VecRawTextExample
//设置package包名称以及导入依赖的类
package org.dl4scala.examples.nlp.word2vec
import org.datavec.api.util.ClassPathResource
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer
import org.deeplearning4j.text.sentenceiterator.BasicLineIterator
import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor
import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory
import org.slf4j.LoggerFactory
object Word2VecRawTextExample {
private val log = LoggerFactory.getLogger(Word2VecRawTextExample.getClass)
def main(args: Array[String]): Unit = {
// Gets Path to Text file
val filePath: String = new ClassPathResource("raw_sentences.txt").getFile.getAbsolutePath
log.info("Load & Vectorize Sentences....")
// Strip white space before and after for each line
val iter = new BasicLineIterator(filePath)
// Split on white spaces in the line to get words
val t = new DefaultTokenizerFactory
t.setTokenPreProcessor(new CommonPreprocessor)
import org.deeplearning4j.models.word2vec.Word2Vec
log.info("Building model....")
val vec: Word2Vec = new Word2Vec.Builder()
.minWordFrequency(5)
.iterations(1)
.layerSize(100)
.seed(42)
.windowSize(5)
.iterate(iter)
.tokenizerFactory(t)
.build
log.info("Fitting Word2Vec model....")
vec.fit()
log.info("Writing word vectors to text file....")
// Write word vectors to file
WordVectorSerializer.writeWordVectors(vec, "pathToWriteto.txt")
// Prints out the closest 10 words to "day". An example on what to do with these Word Vectors.
log.info("Closest Words:")
val lst = vec.wordsNearest("day", 10)
System.out.println("10 Words closest to 'day': " + lst)
}
}
示例8: ParagraphVectorsInferenceExample
//设置package包名称以及导入依赖的类
package org.dl4scala.examples.nlp.paragraphvectors
import org.datavec.api.util.ClassPathResource
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer
import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor
import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory
import org.nd4j.linalg.ops.transforms.Transforms
import org.slf4j.LoggerFactory
object ParagraphVectorsInferenceExample {
private val log = LoggerFactory.getLogger(ParagraphVectorsInferenceExample.getClass)
def main(args: Array[String]): Unit = {
val resource = new ClassPathResource("/paravec/simple.pv")
val t = new DefaultTokenizerFactory
t.setTokenPreProcessor(new CommonPreprocessor)
// we load externally originated model
val vectors = WordVectorSerializer.readParagraphVectors(resource.getFile)
vectors.setTokenizerFactory(t)
vectors.getConfiguration.setIterations(1) // please note, we set iterations to 1 here, just to speedup inference
val inferredVectorA = vectors.inferVector("This is my world .")
val inferredVectorA2 = vectors.inferVector("This is my world .")
val inferredVectorB = vectors.inferVector("This is my way .")
// high similarity expected here, since in underlying corpus words WAY and WORLD have really close context
log.info("Cosine similarity A/B: {}", Transforms.cosineSim(inferredVectorA, inferredVectorB))
// equality expected here, since inference is happening for the same sentences
log.info("Cosine similarity A/A2: {}", Transforms.cosineSim(inferredVectorA, inferredVectorA2))
}
}
示例9: PersistentActorMessage
//设置package包名称以及导入依赖的类
package info.unterstein.akka.persistence.api
import com.google.gson._
import java.lang.reflect.Type
import com.google.gson.reflect.TypeToken
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
case class PersistentActorMessage(messageType: String, scheduleDate: Long, originalMessage: Map[String, String])
object PersistentActorMessage {
private val mapToken = new TypeToken[java.util.Map[String, String]](){}.getType
private val log = LoggerFactory.getLogger(PersistentActorMessage.getClass)
private val gson = new GsonBuilder()
.registerTypeAdapter(classOf[Map[String, String]], new MapSerializer())
.create()
private class MapSerializer extends JsonSerializer[Map[String, String]] with JsonDeserializer[Map[String, String]] {
override def serialize(src: Map[String, String], typeOfSrc: Type, context: JsonSerializationContext): JsonElement = {
gson.toJsonTree(src.asJava)
}
override def deserialize(json: JsonElement, typeOfT: Type, context: JsonDeserializationContext): Map[String, String] = {
val result: java.util.Map[String, String] = gson.fromJson(json, mapToken)
result.asScala.toMap
}
}
def jsonToMap(json: String): Map[String, String] = {
try {
val result: java.util.Map[String, String] = gson.fromJson(json, mapToken)
result.asScala.toMap
} catch {
case o_O: Exception =>
log.error("Deserialization failed for " + json, o_O)
throw new RuntimeException("Deserialization failed!", o_O)
}
}
def mapToJson(map: Map[String, String]): String = gson.toJson(map.asJava)
}
示例10: MonitoringServer
//设置package包名称以及导入依赖的类
package com.scalaio.http.monitoring
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ContentTypes._
import akka.http.scaladsl.model.{HttpEntity, HttpResponse, StatusCodes, Uri}
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.stream.Materializer
import com.typesafe.config.Config
import com.yammer.metrics.HealthChecks
import com.yammer.metrics.core.HealthCheckRegistry
import org.slf4j.LoggerFactory
import play.api.libs.json.{JsArray, Json}
import scala.util.{Failure, Success}
import scala.collection.convert.wrapAsScala._
object MonitoringServer {
lazy val logger = LoggerFactory.getLogger(getClass)
def handleHealthchecks(registry: HealthCheckRegistry): Route = {
path("health") {
get {
complete {
val checks = registry.runHealthChecks
val payload = JsArray(checks.map {
case (name, result) =>
Json.obj(
"name" -> name,
"healthy" -> result.isHealthy,
"message" -> result.getMessage
)
}.toSeq)
val status = if (checks.values().forall(_.isHealthy)) OK else InternalServerError
HttpResponse(entity = HttpEntity(`application/json`, Json.stringify(payload)), status = status)
}
}
}
}
def start(serverConfig: Config, registry: HealthCheckRegistry = HealthChecks.defaultRegistry())
(implicit system: ActorSystem, materializer: Materializer): Unit = {
val host = serverConfig.getString("host")
val port = serverConfig.getInt("port")
logger.info(s"Starting monitoring server at: $host:$port")
val routes = handleHealthchecks(registry) ~ redirect(Uri("/health"), StatusCodes.SeeOther)
import system.dispatcher
Http()
.bindAndHandle(routes, host, port).onComplete {
case Success(Http.ServerBinding(address)) =>
logger.info(s"Monitoring server started at :$address")
case Failure(t) =>
logger.error("Error while trying to start monitoring server", t)
}
}
}
示例11: GeodeFiniteSourceSpec
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.geode.scaladsl
import akka.stream.scaladsl.Sink
import org.slf4j.LoggerFactory
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
import scala.language.postfixOps
class GeodeFiniteSourceSpec extends GeodeBaseSpec {
private val log = LoggerFactory.getLogger(classOf[GeodeFiniteSourceSpec])
"Geode finite source" should {
it { geodeSettings =>
"retrieves finite elements from geode" in {
val reactiveGeode = new ReactiveGeode(geodeSettings)
//#query
val source =
reactiveGeode
.query[Person](s"select * from /persons order by id")
.runWith(Sink.foreach(e => log.debug(s"$e")))
//#query
Await.ready(source, 10 seconds)
val animals =
reactiveGeode
.query[Animal](s"select * from /animals order by id")
.runWith(Sink.foreach(e => log.debug(s"$e")))
Await.ready(animals, 10 seconds)
val complexes =
reactiveGeode
.query[Complex](s"select * from /complexes order by id")
.runWith(Sink.foreach(e => log.debug(s"$e")))
Await.ready(complexes, 10 seconds)
reactiveGeode.close()
}
}
}
}
示例12: ScrapingKitReactorTest
//设置package包名称以及导入依赖的类
package ru.fediq.scrapingkit
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import akka.http.scaladsl.model.{HttpMethods, Uri}
import com.codahale.metrics.Slf4jReporter
import com.codahale.metrics.Slf4jReporter.LoggingLevel
import com.typesafe.config.ConfigFactory
import org.scalatest.FlatSpec
import org.slf4j.LoggerFactory
import ru.fediq.scrapingkit.backend.{InMemoryFifoLinksQueue, InMemoryLinksHistory, NoOpFeedExporter, NoOpPageCache}
import ru.fediq.scrapingkit.model.PageRef
import ru.fediq.scrapingkit.scraper.HtmlCrawlingScraper
import ru.fediq.scrapingkit.util.Metrics
class ScrapingKitReactorTest extends FlatSpec {
"Reactor" should "crawl something" in {
val scraperName = "crawl"
val scrapers = Map(scraperName -> new HtmlCrawlingScraper(scraperName))
val config = ConfigFactory.load()
implicit val system = ActorSystem("reactor-test", config)
val linksQueue = new InMemoryFifoLinksQueue()
val linksHistory = new InMemoryLinksHistory()
val pageCache = new NoOpPageCache()
val exporter = new NoOpFeedExporter()
val reactor = new ScrapingKitReactor(linksQueue, linksHistory, pageCache, exporter, scrapers)
linksQueue.enqueue(PageRef(Uri("http://quotes.toscrape.com/"), HttpMethods.GET, scraperName))
Slf4jReporter
.forRegistry(Metrics.metricRegistry)
.withLoggingLevel(LoggingLevel.INFO)
.outputTo(LoggerFactory.getLogger("METRICS"))
.convertDurationsTo(TimeUnit.MILLISECONDS)
.convertRatesTo(TimeUnit.SECONDS)
.build()
.start(10, TimeUnit.SECONDS)
Thread.sleep(10000)
reactor.close()
}
}
示例13: CollectTransformer
//设置package包名称以及导入依赖的类
package transformation
import org.apache.kafka.streams.KeyValue
import org.apache.kafka.streams.kstream.Transformer
import org.apache.kafka.streams.processor.ProcessorContext
import org.apache.kafka.streams.state.KeyValueStore
import org.slf4j.{Logger, LoggerFactory}
abstract class CollectTransformer[INPUT_K, INPUT_V, STORE_V, OUTPUT_K, OUTPUT_V](storeName: String)
extends Transformer[INPUT_K, INPUT_V, KeyValue[OUTPUT_K, OUTPUT_V]] {
val log: Logger = LoggerFactory.getLogger(this.getClass)
var ctx: ProcessorContext = _
var store: KeyValueStore[INPUT_K, STORE_V] = _
override def init(context: ProcessorContext): Unit = {
log.debug(s"Init ...")
ctx = context
store = ctx.getStateStore(storeName).asInstanceOf[KeyValueStore[INPUT_K, STORE_V]]
ctx.schedule(100)
}
override def punctuate(timestamp: Long): KeyValue[OUTPUT_K, OUTPUT_V] = {
log.debug(s"Punctuating ...")
null
}
override def transform(key: INPUT_K, value: INPUT_V): KeyValue[OUTPUT_K, OUTPUT_V] = {
log.debug(s"Transforming event : $value")
val currentStoreValue = store.get(key)
if (currentStoreValue != null && collectComplete(currentStoreValue, value)) {
collectOutput(key, currentStoreValue, value)
} else {
store.put(key, appendToStore(currentStoreValue, value))
null
}
}
def appendToStore(storeValue: STORE_V, appendValue: INPUT_V): STORE_V
def collectComplete(storeValue: STORE_V, appendValue: INPUT_V): Boolean
def collectOutput(inputKey: INPUT_K, storeValue: STORE_V, mergeValue: INPUT_V): KeyValue[OUTPUT_K, OUTPUT_V]
override def close(): Unit = {
log.debug(s"Close ...")
}
}
示例14: ConsulServiceLocator
//设置package包名称以及导入依赖的类
package org.wex.cmsfs.lagom.service.discovery.consul
import java.net.URI
import com.lightbend.lagom.internal.client.CircuitBreakers
import com.lightbend.lagom.scaladsl.api.Descriptor.Call
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.client.{CircuitBreakerComponents, CircuitBreakingServiceLocator}
import org.slf4j.{Logger, LoggerFactory}
import play.api.Configuration
import scala.concurrent.{ExecutionContext, Future}
trait ConsulServiceLocatorComponents extends CircuitBreakerComponents {
lazy val serviceLocator: ServiceLocator = new ConsulServiceLocator(configuration, circuitBreakers)(executionContext)
}
class ConsulServiceLocator(configuration: Configuration, circuitBreakers: CircuitBreakers)(implicit ec: ExecutionContext)
extends CircuitBreakingServiceLocator(circuitBreakers) {
private val logger: Logger = LoggerFactory.getLogger(this.getClass)
private val consulServiceExtract = new ConsulServiceExtract(configuration)
override def locate(name: String, serviceCall: Call[_, _]): Future[Option[URI]] = Future {
logger.debug(s"request Service Name: ${name}.")
consulServiceExtract.getService(name)
}
}
示例15: GitSourceDownloaderActor
//设置package包名称以及导入依赖的类
package cloud.hw.actors
import akka.actor.{Actor, Props}
import akka.routing.BalancingPool
import cloud.hw.app.ErrorCase
import cloud.hw.util.GitSourceDownloader
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory
class GitSourceDownloaderActor extends Actor{
val logger = Logger(LoggerFactory.getLogger(this.getClass))
def receive = {
case GitSourceReceiver(metadata) =>
logger.info("Download git project and send to ElasticsearchAdderActor")
val GitRepoZip = GitSourceDownloader.fromUrl(metadata("downloadUrl"))
var unzippedRepo = GitRepoZip.asInstanceOf[GitSourceDownloader].download
// Creates a balance pool of actors to split up the work.
val router = context.actorOf(BalancingPool(4).props(Props(new ElasticsearchAdderActor())))
// Run through all the lines and give each actor 15 entries to insert into
// Elasticsearch. We used 15 because more than that and we were basically DDOS
// the Elasticsearch server.
while(!unzippedRepo.isEmpty) {
logger.info("Taking 15 entries")
router ! AddGitProjectToElasticsearch(metadata("projectName"), unzippedRepo.take(15))
unzippedRepo = unzippedRepo.drop(15)
}
// Not a Git repo, then don't do anything.
case ErrorCase => logger.info("Error with Git repo download")
case _ => logger.info("Not a vaild message to GitSourceDownloaderActor")
}
}