当前位置: 首页>>代码示例>>Scala>>正文


Scala ScalaReflection类代码示例

本文整理汇总了Scala中org.apache.spark.sql.catalyst.ScalaReflection的典型用法代码示例。如果您正苦于以下问题:Scala ScalaReflection类的具体用法?Scala ScalaReflection怎么用?Scala ScalaReflection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了ScalaReflection类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: DatabaseBackup

//设置package包名称以及导入依赖的类
package unus.stage

import unus.helpers.Conf
import org.apache.spark.sql.catalyst.ScalaReflection
import org.apache.spark.sql.types.StructType
import scala.reflect.runtime.universe._
import org.apache.spark.sql.SaveMode

class DatabaseBackup[T: TypeTag](table: String) {
  private lazy val schema = ScalaReflection.schemaFor[T].dataType.asInstanceOf[StructType]

  def save(): Unit = {
    Conf.spark.read
      .format("jdbc")
      .option("url", Conf.dbUrl)
      .option("dbtable", s""""$table"""")
      .option("user", Conf.dbUsername)
      .option("password", Conf.dbPassword)
      .option("jdbcdriver","org.postgresql.Driver")
      .load()
      .write
      .format("csv")
      .option("header", "true")
      .save(Conf.dataDir + "/" + table + ".csv")
  }

  def load(): Unit = {
    Conf.spark.read
      .format("csv")
      .option("header", "true")
      .schema(schema)
      .load(Conf.dataDir + "/" + table + ".csv.gz")
      .write
      .format("jdbc")
      .option("url", Conf.dbUrl)
      .option("dbtable", s""""$table"""")
      .option("user", Conf.dbUsername)
      .option("password", Conf.dbPassword)
      .option("jdbcdriver","org.postgresql.Driver")
      .mode(SaveMode.Append)
      .save()
  }
} 
开发者ID:mindfulmachines,项目名称:unus,代码行数:44,代码来源:DatabaseBackup.scala

示例2: SchemaConverter

//设置package包名称以及导入依赖的类
import org.apache.spark.sql.types._
import org.apache.spark.sql.catalyst.ScalaReflection
import org.apache.spark.sql.catalyst.ScalaReflection.universe._
/**
 * Convert schema to case class and case class to schema
 *
 * Created by swadhin on 27/01/17.
 */
object SchemaConverter {
  type TypeConverter = (DataType) => String

  def schemaToCaseClass(schema: StructType, className: String)(implicit tc: TypeConverter): String = {
    def genField(s: StructField): String = {
      val f = tc(s.dataType)
      s match {
        case x if x.nullable => s"  ${s.name}:Option[$f] = None"
        case _               => s"  ${s.name}:$f"
      }
    }

    val fieldsStr = schema.map(genField).mkString(",\n  ")
    s"""
       |case class $className (
       |  $fieldsStr
       |)
  """.stripMargin
  }

  def caseClassToSchema[T: TypeTag] = {
    ScalaReflection.schemaFor[T].dataType.asInstanceOf[StructType]
  }

  implicit val defaultTypeConverter: TypeConverter = {
    case _: ByteType      => "Byte"
    case _: ShortType     => "Short"
    case _: IntegerType   => "Int"
    case _: LongType      => "Long"
    case _: FloatType     => "Float"
    case _: DoubleType    => "Double"
    case _: DecimalType   => "BigDecimal"
    case _: StringType    => "String"
    case _: BinaryType    => "Array[Byte]"
    case _: BooleanType   => "Boolean"
    case _: TimestampType => "java.sql.Timestamp"
    case _: DateType      => "java.sql.Date"
    case _: ArrayType     => "Seq[Int]"
    case _: MapType       => "scala.collection.Map"
    case _: StructType    => "org.apache.spark.sql.Row"
    case _                => "String"
  }
} 
开发者ID:swadhin-,项目名称:SchemaConverter,代码行数:52,代码来源:SchemaConverter.scala

示例3: PatientCacher

//设置package包名称以及导入依赖的类
package unus.stage

import unus.helpers.Conf
import unus.db.{Patient, Repository}
import unus.helpers.Cacher
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.ScalaReflection
import org.apache.spark.sql.types.StructType

class PatientCacher extends Cacher[Patient] {
  override protected val name: String = "patient"

  override protected def build(): RDD[Patient] = {
    val schema = ScalaReflection.schemaFor[Patient].dataType.asInstanceOf[StructType]

    import Conf.spark.implicits._

    Conf.spark.read.format("csv")
      .schema(schema)
      .option("header", "true")
      .load(Conf.dataDir + "/FInalDataset.csv")
      .as[Patient]
      .rdd
  }
} 
开发者ID:mindfulmachines,项目名称:unus,代码行数:26,代码来源:PatientCacher.scala


注:本文中的org.apache.spark.sql.catalyst.ScalaReflection类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。