本文整理汇总了Scala中org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions类的典型用法代码示例。如果您正苦于以下问题:Scala JDBCOptions类的具体用法?Scala JDBCOptions怎么用?Scala JDBCOptions使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了JDBCOptions类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: DatabaseWriterActivity
//设置package包名称以及导入依赖的类
package yumi.pipeline.activities
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import yumi.pipeline.{Activity, Parameters, SessionData, YumiContext}
class DatabaseWriterActivity(val parameters: Parameters) extends Activity {
val url = parameters.getString("url")
val table = parameters.getString("table")
val as = parameters.getString("as")
val mode = parameters.getString("mode")
val jdbcDriverClass = parameters.getString("driver")
val properties = parameters
.getAs[Map[String, Any]]("properties")
.map {
case (key, value) =>
(key -> value.toString)
}
val format = url.split(":").take(2).mkString(":")
override protected[this] def onInvoke(sessionData: SessionData)
(implicit yumiContext: YumiContext): SessionData = {
import yumiContext._
val completeOptions = properties +
(JDBCOptions.JDBC_TABLE_NAME -> as) +
(JDBCOptions.JDBC_DRIVER_CLASS -> jdbcDriverClass) +
("url" -> url)
dataFrameWriter.write(
dataFrame = sparkSession.table(table),
format = format,
mode = mode,
options = completeOptions,
path =url
)
sessionData
}
}
示例2: DatabaseReaderActivity
//设置package包名称以及导入依赖的类
package yumi.pipeline.activities
import java.util.Properties
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import yumi.pipeline.{Activity, Parameters, SessionData, YumiContext}
class DatabaseReaderActivity(val parameters: Parameters) extends Activity {
val url = parameters.getString("url")
val table = parameters.getString("table")
val asView = parameters.getString("as")
val jdbcDriverClass = parameters.getString("driver")
val properties = parameters
.getAs[Map[String, Any]]("properties")
.map {
case (key, value) =>
(key -> value.toString)
}
override protected[this] def onInvoke(sessionData: SessionData)
(implicit yumiContext: YumiContext): SessionData = {
import yumiContext._
val completeOptions = properties +
(JDBCOptions.JDBC_DRIVER_CLASS -> jdbcDriverClass)
val completeProperties = completeOptions
.foldLeft(new Properties()) { (properties, keyValue) =>
val (key, value) = keyValue
properties.setProperty(key, value.toString)
properties
}
sparkSession
.read
.jdbc(url = url, table = table, properties = completeProperties)
.createTempView(asView)
sessionData
}
}
示例3: DatabaseReaderActivitySpec
//设置package包名称以及导入依赖的类
package test.yumi.pipeline.activities
import java.util.Properties
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.mockito.Mockito.{verify, when}
import test.yumi.pipeline.MockSessionSpec
import yumi.YumiMap
import yumi.pipeline.activities.DatabaseReaderActivity
class DatabaseReaderActivitySpec extends MockSessionSpec {
it should "load sql server table as spark view" in new MockSessionScope {
// Arrange
val url = "url"
val table = "table_name"
val as = "as"
val driver = "com.mysql.Driver"
val properties = Map("batchSize" -> "2000")
val expectedProperties = new Properties()
properties.foreach {
case (key, value) =>
expectedProperties.setProperty(key, value)
}
expectedProperties.put(JDBCOptions.JDBC_DRIVER_CLASS, driver)
val parameters = createParameters()
.add("url", url)
.add("table", table)
.add("driver", driver)
.add("as", as)
.add("properties", properties)
.build()
val sessionData = YumiMap()
val dataFrame = mock[DataFrame]
when(dataFrameReader.jdbc(any[String], any[String], any[Properties])).thenReturn(dataFrame)
// Act
val activity = new DatabaseReaderActivity(parameters)
val resultSessionData = activity.invoke(sessionData)
// Assert
verify(sparkSession).read
verify(dataFrameReader)
.jdbc(url, table, expectedProperties)
verify(dataFrame).createTempView(as)
resultSessionData === sessionData
}
}
示例4: DatabaseWriterActivitySpec
//设置package包名称以及导入依赖的类
package test.yumi.pipeline.activities
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions
import org.mockito.Mockito.{verify, when}
import test.yumi.pipeline.MockSessionSpec
import yumi.YumiMap
import yumi.pipeline.activities.DatabaseWriterActivity
class DatabaseWriterActivitySpec extends MockSessionSpec {
it should "write to database table from spark view" in new MockSessionScope {
// Arrange
val url = "jdbc:postgresql://yumi.rds.amazonaws.com:5432/yumi_postgresql?user=yumi&password=somepassword"
val table = "table_name"
val as = "as"
val mode = "append"
val jdbcDriverClass = "org.postgresql.Driver"
val properties = Map("batchSize" -> "2000")
val completeOptions: Map[String, String] = properties +
(JDBCOptions.JDBC_TABLE_NAME -> as) +
(JDBCOptions.JDBC_DRIVER_CLASS -> jdbcDriverClass) +
("url" -> url)
val parameters = createParameters()
.add("url", url)
.add("table", table)
.add("as", as)
.add("properties", properties)
.add("mode", mode)
.add("driver", jdbcDriverClass)
.build()
val sessionData = YumiMap()
val dataFrame = mock[DataFrame]
when(sparkSession.table(table)).thenReturn(dataFrame)
// Act
val activity = new DatabaseWriterActivity(parameters)
val resultSessionData = activity.invoke(sessionData)
// Assert
verify(dataFrameWriter).write(dataFrame, "jdbc:postgresql", mode, url, completeOptions)
resultSessionData === sessionData
}
}