本文整理汇总了Scala中java.sql.PreparedStatement类的典型用法代码示例。如果您正苦于以下问题:Scala PreparedStatement类的具体用法?Scala PreparedStatement怎么用?Scala PreparedStatement使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PreparedStatement类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: genericEncoder
//设置package包名称以及导入依赖的类
package io.gustavoamigo.quill.pgsql.encoding.range.numeric
import java.sql.{Types, PreparedStatement}
import io.getquill.source.jdbc.JdbcSource
import scala.collection.immutable.NumericRange
trait Encoders {
this: JdbcSource[_, _] =>
private def genericEncoder[T](valueToString: (T => String)): Encoder[T] = {
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
val sqlLiteral = valueToString(value)
row.setObject(index + 1, sqlLiteral, Types.OTHER)
row
}
}
}
private def tuple[T](t: (T, T)) = s"[${t._1}, ${t._2}]"
private def range[T](r: NumericRange[T]) = s"[${r.head}, ${r.last}]"
implicit val intTupleEncoder: Encoder[(Int, Int)] = genericEncoder(tuple)
implicit val intRangeEncoder: Encoder[NumericRange[Int]] = genericEncoder(range)
implicit val bigIntTupleEncoder: Encoder[(BigInt, BigInt)] = genericEncoder(tuple)
implicit val bigIntRangeEncoder: Encoder[NumericRange[BigInt]] = genericEncoder(range)
implicit val longTupleEncoder: Encoder[(Long, Long)] = genericEncoder(tuple)
implicit val longRangeEncoder: Encoder[NumericRange[Long]] = genericEncoder(range)
implicit val doubleTupleEncoder: Encoder[(Double, Double)] = genericEncoder(tuple)
implicit val bigDecimalTupleEncoder: Encoder[(BigDecimal, BigDecimal)] = genericEncoder(tuple)
implicit val bigDecimalRangeEncoder: Encoder[NumericRange[BigDecimal]] = genericEncoder(range)
}
示例2: genericEncoder
//设置package包名称以及导入依赖的类
package io.gustavoamigo.quill.pgsql.encoding.range.datetime
import java.sql.{PreparedStatement, Types}
import java.time.{LocalDate, ZonedDateTime, LocalDateTime}
import java.util.Date
import io.getquill.source.jdbc.JdbcSource
trait Encoders {
this: JdbcSource[_, _] =>
import Formatters._
private def genericEncoder[T](valueToString: (T => String)): Encoder[T] = {
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
val sqlLiteral = valueToString(value)
row.setObject(index + 1, sqlLiteral, Types.OTHER)
row
}
}
}
private def tuple[T](t: (T, T))(valToStr: T => String) = s"[${valToStr(t._1)}, ${valToStr(t._2)}]"
implicit val dateTupleEncoder: Encoder[(Date, Date)] = genericEncoder(tuple(_)(formatDate))
implicit val localDateTimeTupleEncoder: Encoder[(LocalDateTime, LocalDateTime)] =
genericEncoder(tuple(_)(formatLocalDateTime))
implicit val zonedDateTimeTupleEncoder: Encoder[(ZonedDateTime, ZonedDateTime)] =
genericEncoder(tuple(_)(formatZonedDateTime))
implicit val dateTimeTupleEncoder: Encoder[(LocalDate, LocalDate)] =
genericEncoder(t => s"[${formatLocalDate(t._1)}, ${formatLocalDate(t._2)})")
}
示例3: genericEncoder
//设置package包名称以及导入依赖的类
package io.gustavoamigo.quill.pgsql.encoding.jodatime
import java.sql.{Types, PreparedStatement}
import io.getquill.source.jdbc.JdbcSource
import org.joda.time._
trait Encoders {
this: JdbcSource[_, _] =>
import Formatters._
private def genericEncoder[T](valueToString: (T => String) = (r: T) => r.toString): Encoder[T] =
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
val sqlLiteral = s"'${valueToString(value)}'"
row.setObject(index + 1, sqlLiteral, Types.OTHER)
row
}
}
implicit val jodaDateTimeEncoder: Encoder[DateTime] = genericEncoder(_.toString(jodaTzDateTimeFormatter))
implicit val jodaLocalDateEncoder: Encoder[LocalDate] = genericEncoder(_.toString(jodaDateFormatter))
implicit val jodaLocalTimeEncoder: Encoder[LocalTime] = genericEncoder(_.toString(jodaTimeFormatter))
}
示例4: genericEncoder
//设置package包名称以及导入依赖的类
package io.gustavoamigo.quill.pgsql.encoding.json.play
import java.sql.{Types, PreparedStatement}
import io.getquill.source.jdbc.JdbcSource
trait JsonEncoder {
this: JdbcSource[_, _] =>
import play.api.libs.json._
private def genericEncoder[T](valueToString: (T => String) = (r: T) => r.toString): Encoder[T] =
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
row.setObject(index + 1, valueToString(value), Types.OTHER)
row
}
}
implicit val jsonEncoder: Encoder[JsValue] = genericEncoder(Json.stringify)
}
示例5: genericEncoder
//设置package包名称以及导入依赖的类
package io.gustavoamigo.quill.pgsql.encoding.datetime
import java.sql.{PreparedStatement, Timestamp, Types}
import java.time._
import io.getquill.source.jdbc.JdbcSource
trait Encoders {
this: JdbcSource[_, _] =>
import Formatters._
private def genericEncoder[T](valueToString: (T => String) = (r: T) => r.toString): Encoder[T] =
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
val sqlLiteral = s"'${valueToString(value)}'"
row.setObject(index + 1, sqlLiteral, Types.OTHER)
row
}
}
private def encoder[T](f: PreparedStatement => (Int, T) => Unit): Encoder[T] =
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
f(row)(index + 1, value)
row
}
}
implicit val localDateTimeEncoder: Encoder[LocalDateTime] = encoder { (row: PreparedStatement) =>
(index: Int, d: LocalDateTime) => row.setObject(index, Timestamp.valueOf(d), Types.TIMESTAMP)
}
implicit val zonedDateTimeEncoder: Encoder[ZonedDateTime] = genericEncoder(_.format(bpTzDateTimeFormatter))
implicit val localDateEncoder: Encoder[LocalDate] = genericEncoder(_.format(bpDateFormatter))
implicit val localTimeEncoder: Encoder[LocalTime] = genericEncoder(_.format(bpTimeFormatter))
implicit val offsetTimeEncoder: Encoder[OffsetTime] = genericEncoder(_.format(bpTzTimeFormatter))
}
示例6: SqlServerSink
//设置package包名称以及导入依赖的类
package yumi.pipeline.sinks
import java.sql.{Connection, PreparedStatement}
import org.apache.spark.sql.jdbc.JdbcDialect
import org.apache.spark.sql.types._
class SqlServerSink extends DatabaseSink
with Serializable {
override def bulkInsertStatement (conn: Connection,
table: String,
rddSchema: StructType,
dialect: JdbcDialect,
rowCount: Int) : PreparedStatement = {
val columns = rddSchema.fields.map(x => dialect.quoteIdentifier(x.name)).mkString(",")
val parameters = rddSchema.fields.map(_ => "?").mkString("(", ",", ")")
val sql = new StringBuilder(s"INSERT INTO $table($columns) VALUES ")
(1 to rowCount).foldLeft(false) { (prependComma, index) =>
if (prependComma) {
sql.append(", ")
}
sql.append(parameters)
true
}
conn.prepareStatement(sql.toString())
}
}
示例7: OracleDatabaseSink
//设置package包名称以及导入依赖的类
package yumi.pipeline.sinks
import java.sql.{Connection, PreparedStatement}
import org.apache.spark.sql.jdbc.JdbcDialect
import org.apache.spark.sql.types._
class OracleDatabaseSink extends DatabaseSink with Serializable {
override def bulkInsertStatement (conn: Connection,
table: String,
rddSchema: StructType,
dialect: JdbcDialect,
rowCount: Int) : PreparedStatement = {
val columns = rddSchema.fields.map(x => dialect.quoteIdentifier(x.name)).mkString(",")
val parameters = rddSchema.fields.map(_ => "?").mkString(s"INTO $table($columns) VALUES (", ",", ")")
val sql = new StringBuilder("INSERT ALL ")
(1 to rowCount).foldLeft(false) { (prependComma, index) =>
if (prependComma) {
sql.append(" ")
}
sql.append(parameters)
true
}
sql.append(" SELECT 1 FROM DUAL")
conn.prepareStatement(sql.toString())
}
}
示例8: MySqlSink
//设置package包名称以及导入依赖的类
package yumi.pipeline.sinks
import java.sql.{Connection, PreparedStatement}
import org.apache.spark.sql.jdbc.JdbcDialect
import org.apache.spark.sql.types._
class MySqlSink extends DatabaseSink with Serializable {
override def bulkInsertStatement (conn: Connection,
table: String,
rddSchema: StructType,
dialect: JdbcDialect,
rowCount: Int) : PreparedStatement = {
val columns = rddSchema.fields.map(x => dialect.quoteIdentifier(x.name)).mkString(",")
val parameters = rddSchema.fields.map(_ => "?").mkString("(", ",", ")")
val sql = new StringBuilder(s"INSERT INTO $table($columns) VALUES ")
(1 to rowCount).foldLeft(false) { (prependComma, index) =>
if (prependComma) {
sql.append(", ")
}
sql.append(parameters)
true
}
conn.prepareStatement(sql.toString())
}
}
示例9: PostgreSqlSink
//设置package包名称以及导入依赖的类
package yumi.pipeline.sinks
import java.sql.{Connection, PreparedStatement}
import org.apache.spark.sql.jdbc.JdbcDialect
import org.apache.spark.sql.types._
class PostgreSqlSink extends DatabaseSink with Serializable {
override def bulkInsertStatement (conn: Connection,
table: String,
rddSchema: StructType,
dialect: JdbcDialect,
rowCount: Int) : PreparedStatement = {
val columns = rddSchema.fields.map(x => dialect.quoteIdentifier(x.name)).mkString(",")
val parameters = rddSchema.fields.map(_ => "?").mkString("(", ",", ")")
val sql = new StringBuilder(s"INSERT INTO $table($columns) VALUES ")
(1 to rowCount).foldLeft(false) { (prependComma, index) =>
if (prependComma) {
sql.append(", ")
}
sql.append(parameters)
true
}
conn.prepareStatement(sql.toString())
}
}
示例10: UserQuery
//设置package包名称以及导入依赖的类
package database
import models.{Types, User}
import java.sql.{Connection, PreparedStatement, ResultSet, SQLException, Statement}
object UserQuery {
def create(
connection : Connection,
main_email : String,
password : String,
firstname : String,
lastname : String,
avatar : String
) : Types.userid = {
val statement : PreparedStatement =
connection.prepareStatement(
"INSERT INTO users.user (" +
"main_email, " +
"password, " +
"firstname, " +
"lastname, " +
"avatar) " +
"VALUES(?, ?, ?, ?, ?)",
Statement.RETURN_GENERATED_KEYS
)
statement.setString(0, main_email)
statement.setString(1, password)
statement.setString(2, firstname)
statement.setString(3, lastname)
statement.setString(4, avatar)
val generatedKeys : ResultSet = statement.getGeneratedKeys()
if (generatedKeys.next()) {
generatedKeys.getLong(1)
}
else {
throw new SQLException("Adding ad failed. No ID obtained.")
}
}
}
示例11: CatOutput
//设置package包名称以及导入依赖的类
package formatter
import java.sql.{Connection, PreparedStatement}
import com.mysql.jdbc.exceptions.jdbc4.MySQLIntegrityConstraintViolationException
import renderer.CliCommunicator
case class CatOutput(dbCon: Connection, values: Iterable[Category]) extends Output {
override def insert(): Unit = values.foreach { c =>
val ps: PreparedStatement = dbCon.prepareStatement(
"insert into category (pattern, category) values (?,?)")
try {
// set ps parameters
ps.setString(1, c.pattern)
ps.setString(2, c.category)
// execute sql update statement
ps.executeUpdate()
} catch {
case e: MySQLIntegrityConstraintViolationException =>
CliCommunicator().talk(s"${e.getMessage}")
} finally {
ps.close()
}
}
}
示例12: OracleJdbcPartition
//设置package包名称以及导入依赖的类
package util
import org.apache.spark.Partition
import java.sql.ResultSet
import java.sql.Connection
import scala.reflect.ClassTag
import org.apache.spark.SparkContext
import java.sql.PreparedStatement
import org.apache.spark.rdd.RDD
import org.apache.spark.Logging
import org.apache.spark.TaskContext
import org.apache.spark.util
class OracleJdbcPartition(idx: Int, parameters: Map[String, Object]) extends Partition {
override def index = idx
val partitionParameters = parameters
}
abstract class OracleJdbcRdd[T:ClassTag] (
sc:SparkContext,
getConnection:() => Connection,
sql:String,
getOracleJdbcPatition :()=>Array[Partition],
preparedStatement :(PreparedStatement,OracleJdbcPartition)=> PreparedStatement,
mapRow :(ResultSet)=> T=OracleJdbcRdd.resultSetToObjectArray _)
extends RDD[T](sc,Nil)with Logging{
def getPartitions: Array[Partition] = {
getOracleJdbcPatition();
}
}
object OracleJdbcRdd {
def resultSetToObjectArray(rs: ResultSet): Array[Object] = {
Array.tabulate[Object](rs.getMetaData.getColumnCount)(i => rs.getObject(i + 1))
}
trait ConnectionFactory extends Serializable {
@throws[Exception]
def getConnection: Connection
}
}
示例13: TaskMarshaller
//设置package包名称以及导入依赖的类
package dao.postgres.marshalling
import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID
import dao.postgres.common.TaskTable
import model.{Task, TaskStatus}
import util.JdbcUtil._
object TaskMarshaller {
def unmarshalTask(rs: ResultSet): Task = {
import TaskTable._
Task(
id = rs.getObject(COL_ID).asInstanceOf[UUID],
processId = rs.getObject(COL_PROCESS_ID).asInstanceOf[UUID],
processDefinitionName = rs.getString(COL_PROC_DEF_NAME),
taskDefinitionName = rs.getString(COL_TASK_DEF_NAME),
executable = PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)),
previousAttempts = rs.getInt(COL_ATTEMPTS),
startedAt = javaDate(rs.getTimestamp(COL_STARTED)),
status = rs.getString(COL_STATUS) match {
case STATUS_SUCCEEDED => TaskStatus.Success(javaDate(rs.getTimestamp(COL_ENDED_AT)))
case STATUS_FAILED => TaskStatus.Failure(javaDate(rs.getTimestamp(COL_ENDED_AT)), Option(rs.getString(COL_REASON)))
case STATUS_RUNNING => TaskStatus.Running()
}
)
}
def marshalTask(task: Task, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)
(implicit conn: Connection) = {
import TaskTable._
var index = startIndex
columns.foreach { col =>
col match {
case COL_ID => stmt.setObject(index, task.id)
case COL_PROCESS_ID => stmt.setObject(index, task.processId)
case COL_PROC_DEF_NAME => stmt.setString(index, task.processDefinitionName)
case COL_TASK_DEF_NAME => stmt.setString(index, task.taskDefinitionName)
case COL_EXECUTABLE => stmt.setString(index, PostgresJsonMarshaller.toJson(task.executable))
case COL_ATTEMPTS => stmt.setInt(index, task.previousAttempts)
case COL_STARTED => stmt.setTimestamp(index, task.startedAt)
case COL_STATUS => stmt.setString(index, task.status match {
case TaskStatus.Success(_) => STATUS_SUCCEEDED
case TaskStatus.Failure(_, _) => STATUS_FAILED
case TaskStatus.Running() => STATUS_RUNNING
})
case COL_REASON => stmt.setString(index, task.status match {
case TaskStatus.Failure(_, reasons) => reasons.mkString(",")
case _ => null
})
case COL_ENDED_AT => stmt.setTimestamp(index, task.endedAt.getOrElse(null))
}
index += 1
}
}
}
示例14: ProcessTriggerRequestMarshaller
//设置package包名称以及导入依赖的类
package dao.postgres.marshalling
import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID
import dao.postgres.common.{ProcessTriggerRequestTable, TaskTriggerRequestTable}
import model.ProcessTriggerRequest
import util.JdbcUtil._
object ProcessTriggerRequestMarshaller {
def marshal(request: ProcessTriggerRequest, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)
(implicit conn: Connection) = {
import ProcessTriggerRequestTable._
var index = startIndex
columns.foreach { col =>
col match {
case COL_REQUEST_ID => stmt.setObject(index, request.requestId)
case COL_PROCESS_DEF_NAME => stmt.setString(index, request.processDefinitionName)
case COL_REQUESTED_AT => stmt.setTimestamp(index, request.requestedAt)
case COL_STARTED_PROCESS_ID => stmt.setObject(index, request.startedProcessId.orNull)
case COL_TASK_FILTER => stmt.setArray(index, request.taskFilter.map(makeStringArray).orNull)
}
index += 1
}
}
def unmarshal(rs: ResultSet): ProcessTriggerRequest = {
import ProcessTriggerRequestTable._
ProcessTriggerRequest(
requestId = rs.getObject(COL_REQUEST_ID).asInstanceOf[UUID],
processDefinitionName = rs.getString(COL_PROCESS_DEF_NAME),
requestedAt = javaDate(rs.getTimestamp(COL_REQUESTED_AT)),
startedProcessId = Option(rs.getObject(COL_STARTED_PROCESS_ID)).map(_.asInstanceOf[UUID]),
taskFilter = getStringArray(rs, COL_TASK_FILTER)
)
}
}
示例15: TaskDefinitionMarshaller
//设置package包名称以及导入依赖的类
package dao.postgres.marshalling
import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID
import dao.postgres.common.TaskDefinitionTable
import model.{TaskBackoff, TaskDefinition, TaskDependencies, TaskLimits}
import util.JdbcUtil._
object TaskDefinitionMarshaller {
def marshal(definition: TaskDefinition, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)
(implicit conn: Connection) = {
import TaskDefinitionTable._
var index = startIndex
columns.foreach { col =>
col match {
case COL_NAME => stmt.setString(index, definition.name)
case COL_PROC_ID => stmt.setObject(index, definition.processId)
case COL_EXECUTABLE => stmt.setString(index, PostgresJsonMarshaller.toJson(definition.executable))
case COL_MAX_ATTEMPTS => stmt.setInt(index, definition.limits.maxAttempts)
case COL_MAX_EXECUTION_TIME => stmt.setObject(index, definition.limits.maxExecutionTimeSeconds.orNull)
case COL_BACKOFF_SECONDS => stmt.setInt(index, definition.backoff.seconds)
case COL_BACKOFF_EXPONENT => stmt.setDouble(index, definition.backoff.exponent)
case COL_REQUIRED_DEPS => stmt.setArray(index, makeStringArray(definition.dependencies.required))
case COL_OPTIONAL_DEPS => stmt.setArray(index, makeStringArray(definition.dependencies.optional))
case COL_REQUIRE_EXPLICIT_SUCCESS => stmt.setBoolean(index, definition.requireExplicitSuccess)
}
index += 1
}
}
def unmarshal(rs: ResultSet): TaskDefinition = {
import TaskDefinitionTable._
TaskDefinition(
name = rs.getString(COL_NAME),
processId = rs.getObject(COL_PROC_ID).asInstanceOf[UUID],
executable = PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)),
limits = TaskLimits(
maxAttempts = rs.getInt(COL_MAX_ATTEMPTS),
maxExecutionTimeSeconds = getIntOption(rs, COL_MAX_EXECUTION_TIME)
),
backoff = TaskBackoff(
seconds = rs.getInt(COL_BACKOFF_SECONDS),
exponent = rs.getDouble(COL_BACKOFF_EXPONENT)
),
dependencies = TaskDependencies(
required = getStringArray(rs, COL_REQUIRED_DEPS).getOrElse(Seq.empty),
optional = getStringArray(rs, COL_OPTIONAL_DEPS).getOrElse(Seq.empty)
),
requireExplicitSuccess = rs.getBoolean(COL_REQUIRE_EXPLICIT_SUCCESS)
)
}
}