本文整理汇总了Scala中org.apache.spark.sql.catalyst.expressions.Expression类的典型用法代码示例。如果您正苦于以下问题:Scala Expression类的具体用法?Scala Expression怎么用?Scala Expression使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Expression类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: TextualUtil
//设置package包名称以及导入依赖的类
package edu.utah.cs.simba.util
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, BindReferences, Expression}
import org.apache.spark.sql.catalyst.util.ArrayData
import scala.collection.mutable
object TextualUtil {
def simFilter(leftText: ArrayData, rightText:ArrayData, sim: Double): Boolean = {
val l = leftText.numElements()
val r = rightText.numElements()
if(sim * l > r || sim * r > l ) return false
var sameText = 0
val data = mutable.Set[String]()
var i = 0
while (i < l) {
data.add(leftText.getUTF8String(i).toString)
i += 1
}
var j = 0
while (j < r){
val tmp_str = rightText.getUTF8String(j).toString
if(data.contains(tmp_str)) sameText += 1
else data.add(tmp_str)
j += 1
}
if(sameText/1.0/data.size >= sim) return true
false
}
def getText(expression: Expression, schema: Seq[Attribute], input: InternalRow): ArrayData = {
BindReferences.bindReference(expression, schema).eval(input).asInstanceOf[ArrayData]
}
}
示例2: Functions
//设置package包名称以及导入依赖的类
package org.apache.spark.sql.sparkline
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry
import org.apache.spark.sql.catalyst.expressions.Expression
import org.sparkline.spark.udfs.{ByteSize, Contains, Contains_Interpreted, Contains2}
import scala.reflect.ClassTag
object Functions {
private def registerExpression[T <: Expression](sqlContext : SQLContext,
nm : String)(implicit cTag : ClassTag[T]) : Unit = {
val (fnName, (eInfo, fb)) = FunctionRegistry.expression[T](nm)
sqlContext.functionRegistry.registerFunction(fnName, eInfo, fb)
}
def registerFunctions(implicit sqlContext : SQLContext) : Unit = {
registerExpression[ByteSize](sqlContext, "bitmap_size")
registerExpression[Contains](sqlContext, "bitmap_contains_basic")
registerExpression[Contains2](sqlContext, "bitmap_contains")
registerExpression[Contains_Interpreted](sqlContext, "bitmap_contains_i")
//registerExpression[Bitmap](sqlContext, "bitmap")
}
}
示例3: NaiveOrderResolution
//设置package包名称以及导入依赖的类
package by.skaryna.rules
import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, Expression, NamedExpression, SortOrder}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project, Sort}
import org.apache.spark.sql.catalyst.rules.Rule
object NaiveOrderResolution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Sort(Seq(order1), global1, Project(projectList, Sort(Seq(order2), global2, child2)))
if global1 == global2 && order1.direction == order2.direction &&
isOrderColumnRenamed(projectList, order1.child, order2.child) =>
val prunedProject = Project(projectList, child2)
Sort(Seq(order1), global1, prunedProject)
}
private[skaryna] def isOrderColumnRenamed(projectList: Seq[NamedExpression],
orderExpr1: Expression, orderExpr2: Expression): Boolean = {
val aliasMatch = projectList.collect {
case alias: Alias =>
alias.child == orderExpr2 && orderExpr1.isInstanceOf[AttributeReference] &&
alias.exprId == orderExpr1.asInstanceOf[AttributeReference].exprId
}
aliasMatch.contains(true)
}
}
示例4: KarpsStubs
//设置package包名称以及导入依赖的类
package org.apache.spark.sql
import org.apache.spark.SparkContext
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.execution.{QueryExecution, SQLExecution}
object KarpsStubs {
def withExecutionId[T](sc: SparkContext, executionId: String)(body: => T): T= {
SQLExecution.withExecutionId(sc, executionId)(body)
}
def withNewExecutionId[T](
sparkSession: SparkSession,
queryExecution: QueryExecution)(body: => T): T = {
SQLExecution.withNewExecutionId(sparkSession, queryExecution)(body)
}
def getBoundEncoder(df: DataFrame): ExpressionEncoder[Row] = {
df.exprEnc.resolveAndBind(df.logicalPlan.output,
df.sparkSession.sessionState.analyzer)
}
def getExpression(c: Column): Expression = c.expr
def makeColumn(exp: Expression): Column = Column.apply(exp)
}