当前位置: 首页>>代码示例>>Scala>>正文


Scala StdLexical类代码示例

本文整理汇总了Scala中scala.util.parsing.combinator.lexical.StdLexical的典型用法代码示例。如果您正苦于以下问题:Scala StdLexical类的具体用法?Scala StdLexical怎么用?Scala StdLexical使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了StdLexical类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: arithmeticParser

//设置package包名称以及导入依赖的类
package examples.parsing

import scala.util.parsing.combinator.lexical.StdLexical
import scala.util.parsing.combinator.syntactical.StdTokenParsers

 
object arithmeticParser extends StdTokenParsers {   
  type Tokens = StdLexical ; val lexical = new StdLexical
  lexical.delimiters ++= List("(", ")", "+", "-", "*", "/")

  lazy val expr =   term*("+" ^^^ {(x: Int, y: Int) => x + y} | "-" ^^^ {(x: Int, y: Int) => x - y})
  lazy val term = factor*("*" ^^^ {(x: Int, y: Int) => x * y} | "/" ^^^ {(x: Int, y: Int) => x / y})
  lazy val factor: Parser[Int] = "(" ~> expr <~ ")" | numericLit ^^ (_.toInt)
  
  def main(args: Array[String]) {
    println(
      if (args.length == 0) "usage: scala examples.parsing.arithmeticParser <expr-string>"
      else expr(new lexical.Scanner(args.mkString(" ")))
    )
  }
}


object arithmeticParserDesugared extends StdTokenParsers {   
  type Tokens = StdLexical ; val lexical = new StdLexical
  lexical.delimiters ++= List("(", ")", "+", "-", "*", "/")

  lazy val expr = chainl1(term, (keyword("+").^^^{(x: Int, y: Int) => x + y}).|(keyword("-").^^^{(x: Int, y: Int) => x - y}))
  lazy val term = chainl1(factor, (keyword("*").^^^{(x: Int, y: Int) => x * y}).|(keyword("/").^^^{(x: Int, y: Int) => x / y}))
  lazy val factor: Parser[Int] = keyword("(").~>(expr.<~(keyword(")"))).|(numericLit.^^(x => x.toInt))   
  
  def main(args: Array[String]) {
    println(
      if (args.length == 0) "usage: scala examples.parsing.arithmeticParserDesugared <expr-string>"
      else expr(new lexical.Scanner(args.mkString(" ")))
    )
  }
} 
开发者ID:chenc10,项目名称:spark-SSR-ICDCS17,代码行数:39,代码来源:ArithmeticParser.scala

示例2: LambdaParser

//设置package包名称以及导入依赖的类
package lambda

import scala.util.parsing.combinator.PackratParsers
import scala.util.parsing.combinator.lexical.StdLexical
import scala.util.parsing.combinator.syntactical.StdTokenParsers



class LambdaParser extends StdTokenParsers with PackratParsers {
  type Tokens = StdLexical
  val lexical = new LambdaLexer

  // = Grammer for lambda calculus =
  type P[+A] = PackratParser[A]
  lazy val lam_expr : P[Term] = application | non_application
  lazy val non_application : P[Term] = lambda | let | variable | parens
  lazy val lambda: P[Term] = ("\\" | "?") ~> rep1(ident) ~ "." ~ lam_expr ^^ { case i ~ "." ~ e => (i :\ e)((x,y) => Fun(x, y)) }
  lazy val let : P[Let] = "let" ~ ident ~ "=" ~ lam_expr ~ "in" ~ lam_expr ^^ { case "let" ~ i ~ "=" ~ l_e ~ "in" ~ r_e => Let(i, l_e, r_e) }
  lazy val parens : P[Term] = "(" ~> lam_expr <~ ")"
  lazy val application : P[Term] = lam_expr ~ rep1(lam_expr) ^^ { case l_e ~ r_e => (l_e /: r_e)(FApp(_, _)) }
  lazy val variable : P[Var] = ident ^^ { Var(_) }

  def parse(t: String): ParseResult[Term] = {
    val tokens = new lexical.Scanner(t)
    phrase(lam_expr)(tokens)
  }
} 
开发者ID:alpclk,项目名称:lambda-calculus-interpretter,代码行数:28,代码来源:LambdaParser.scala

示例3: CharLit

//设置package包名称以及导入依赖的类
import scala.util.parsing.combinator.syntactical.StdTokenParsers
import scala.util.parsing.combinator.token.StdTokens
import scala.util.parsing.combinator.lexical.StdLexical
import scala.util.matching.Regex



trait MyTokens extends StdTokens { 
  case class CharLit(chars: String) extends Token
}

class MyLexical extends StdLexical with MyTokens {
  def regex(r: Regex): Parser[String] = new Parser[String] {
    def apply(in: Input) = r.findPrefixMatchOf(
      in.source.subSequence(in.offset, in.source.length)) match {
        case Some(matched) =>
          Success(in.source.subSequence(in.offset,
            in.offset + matched.end).toString, in.drop(matched.end))
        case None => Failure("string matching regex `" + r +
          "' expected but " + in.first + " found", in)
      }
  }

  override def token: Parser[Token] = 
    regex("[a-zA-Z][a-zA-Z0-9]*".r) ^^ { processIdent(_) } |
    regex("0|[1-9][0-9]*".r) ^^ { NumericLit(_) } |
    regex("'([^']|'')*'".r) ^^ { StringLit(_) } |
    regex("#[0-9]+".r) ^^ { s: String => CharLit(s.drop(1).toInt.toChar.toString) } |
    delim                         
}

class MyParser extends StdTokenParsers {
  type Tokens = MyTokens
  val lexical = new MyLexical

  def parseAll[T](p: Parser[T], in: String): ParseResult[T] =
    phrase(p)(new lexical.Scanner(in))

  def charLit: Parser[String] = 
    accept("character literal", { case t: lexical.CharLit => t.chars })  

  lexical.reserved += ("boolean", "char", "int", "var", "float", "true", "false", "string")
  lexical.delimiters += (":", "=", ";")

  def vardecl: Parser[Any] = ("var" ~> repsep(ident, ",") <~ ":") ~ typ ~ opt(init) <~ ";"
  def typ = "int" | "float" | "char" | "string" | "boolean"
  def init: Parser[Any] = "=" ~ (numericLit | stringLit | charLit | "false" | "true")
}

object Main extends App {
  val parser = new MyParser
  println(parser.parseAll(parser.vardecl, "var c: int = 100;").get)
  println(parser.parseAll(parser.vardecl, "var str: string = 'Hello';").get)
  println(parser.parseAll(parser.vardecl, "var c: char = #100;").get)
} 
开发者ID:yeahnoob,项目名称:scala-impatient-2e-code,代码行数:56,代码来源:MyParser.scala


注:本文中的scala.util.parsing.combinator.lexical.StdLexical类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。