本文整理汇总了Scala中org.apache.hadoop.hbase.client.ConnectionFactory类的典型用法代码示例。如果您正苦于以下问题:Scala ConnectionFactory类的具体用法?Scala ConnectionFactory怎么用?Scala ConnectionFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ConnectionFactory类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。
示例1: CreateSaltedTable
//设置package包名称以及导入依赖的类
package com.cloudera.sa.apptrans.setup.hbase
import java.io.File
import org.apache.commons.lang.StringUtils
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.io.compress.Compression
import org.apache.hadoop.hbase.regionserver.{BloomType, ConstantSizeRegionSplitPolicy}
import org.apache.hadoop.hbase.util.Bytes
import scala.collection.mutable
object CreateSaltedTable {
def main(args:Array[String]): Unit = {
if (args.length == 0) {
println("<tableName> <columnFamily> <regionCount> <numOfSalts> <hbaseConfigFolder>")
}
val tableName = args(0)
val columnFamilyName = args(1)
val regionCount = args(2).toInt
val numOfSalts = args(3).toInt
val hbaseConfigFolder = args(4)
val conf = HBaseConfiguration.create()
conf.addResource(new File(hbaseConfigFolder + "hbase-site.xml").toURI.toURL)
val connection = ConnectionFactory.createConnection(conf)
val admin = connection.getAdmin
val tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName))
val columnDescriptor = new HColumnDescriptor(columnFamilyName)
columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY)
columnDescriptor.setBlocksize(64 * 1024)
columnDescriptor.setBloomFilterType(BloomType.ROW)
tableDescriptor.addFamily(columnDescriptor)
tableDescriptor.setMaxFileSize(Long.MaxValue)
tableDescriptor.setRegionSplitPolicyClassName(classOf[ConstantSizeRegionSplitPolicy].getName)
val splitKeys = new mutable.MutableList[Array[Byte]]
for (i <- 0 to regionCount) {
val regionSplitStr = StringUtils.leftPad((i*(numOfSalts/regionCount)).toString, 4, "0")
splitKeys += Bytes.toBytes(regionSplitStr)
}
admin.createTable(tableDescriptor, splitKeys.toArray)
}
}
示例2: CreateSaltedTable
//设置package包名称以及导入依赖的类
package com.hadooparchitecturebook.taxi360.setup.hbase
import java.io.File
import org.apache.commons.lang.StringUtils
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.io.compress.Compression
import org.apache.hadoop.hbase.regionserver.{BloomType, ConstantSizeRegionSplitPolicy}
import org.apache.hadoop.hbase.util.Bytes
import scala.collection.mutable
object CreateSaltedTable {
def main(args:Array[String]): Unit = {
if (args.length == 0) {
println("<tableName> <columnFamily> <regionCount> <numOfSalts> <hbaseConfigFolder>")
}
val tableName = args(0)
val columnFamilyName = args(1)
val regionCount = args(2).toInt
val numOfSalts = args(3).toInt
val hbaseConfigFolder = args(4)
val conf = HBaseConfiguration.create()
conf.addResource(new File(hbaseConfigFolder + "hbase-site.xml").toURI.toURL)
val connection = ConnectionFactory.createConnection(conf)
val admin = connection.getAdmin
val tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName))
val columnDescriptor = new HColumnDescriptor(columnFamilyName)
columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY)
columnDescriptor.setBlocksize(64 * 1024)
columnDescriptor.setBloomFilterType(BloomType.ROW)
tableDescriptor.addFamily(columnDescriptor)
tableDescriptor.setMaxFileSize(Long.MaxValue)
tableDescriptor.setRegionSplitPolicyClassName(classOf[ConstantSizeRegionSplitPolicy].getName)
val splitKeys = new mutable.MutableList[Array[Byte]]
for (i <- 0 to regionCount) {
val regionSplitStr = StringUtils.leftPad((i*(numOfSalts/regionCount)).toString, 4, "0")
splitKeys += Bytes.toBytes(regionSplitStr)
}
admin.createTable(tableDescriptor, splitKeys.toArray)
}
}
示例3: HBaseGlobalValues
//设置package包名称以及导入依赖的类
package com.hadooparchitecturebook.taxi360.server.hbase
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory}
object HBaseGlobalValues {
var appEventTableName = "app-event"
var numberOfSalts = 10000
var connection:Connection = null
def init(conf:Configuration, numberOfSalts:Int,
appEventTableName:String): Unit = {
connection = ConnectionFactory.createConnection(conf)
this.numberOfSalts = numberOfSalts
this.appEventTableName = appEventTableName
}
}
示例4: HBaseGlobalValues
//设置package包名称以及导入依赖的类
package com.cloudera.sa.apptrans.server.hbase
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory}
object HBaseGlobalValues {
var appEventTableName = "app-event"
var accountMartTableName = "account-mart"
var numberOfSalts = 10000
var connection:Connection = null
def init(conf:Configuration, numberOfSalts:Int,
appEventTableName:String,
accountMartTableName:String): Unit = {
connection = ConnectionFactory.createConnection(conf)
this.numberOfSalts = numberOfSalts
this.appEventTableName = appEventTableName
this.accountMartTableName = accountMartTableName
}
}
示例5: ConnectionProvider
//设置package包名称以及导入依赖的类
package com.flipkart.connekt.commons.connections
import java.util.Properties
import javax.sql.DataSource
import com.aerospike.client.Host
import com.aerospike.client.async.{AsyncClient, AsyncClientPolicy}
import com.couchbase.client.java.{Cluster, CouchbaseCluster}
import org.apache.commons.dbcp2.BasicDataSourceFactory
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory}
import scala.collection.JavaConverters._
class ConnectionProvider extends TConnectionProvider {
override def createHbaseConnection(hConnConfig: Configuration): Connection = ConnectionFactory.createConnection(hConnConfig)
override def createDatasourceConnection(mySQLProperties: Properties): DataSource = BasicDataSourceFactory.createDataSource(mySQLProperties)
override def createCouchBaseConnection(nodes: List[String]): Cluster = CouchbaseCluster.create(nodes.asJava)
override def createAeroSpikeConnection(nodes: List[String]): AsyncClient = {
val asyncClientPolicy = new AsyncClientPolicy()
asyncClientPolicy.asyncMaxCommands = 500
asyncClientPolicy.asyncSelectorThreads = 4
new AsyncClient(asyncClientPolicy, nodes.map(new Host(_, 3000)): _ *)
}
}
示例6: insertInto
//设置package包名称以及导入依赖的类
package swiss.army.knife.io
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.client.{Put, Table, ConnectionFactory, Connection}
import org.apache.spark.sql.DataFrame
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._
def insertInto(dataFrame: DataFrame, tableName: String, family: String, qualifier: String, partLen: Int) : Unit = {
dataFrame.foreachPartition(partitionOfRecords => {
// ?????hbase??
val hConfig: Configuration = HBaseConfiguration.create()
val connection: Connection = ConnectionFactory.createConnection(hConfig)
val table: Table = connection.getTable(TableName.valueOf(tableName))
var puts: ListBuffer[Put] = ListBuffer[Put]()
var cnt: Int = 0
try {
while(partitionOfRecords.hasNext) {
val row = partitionOfRecords.next
val put : Put = new Put(Bytes.toBytes(row(0).toString))
put.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier), Bytes.toBytes(row(1).toString))
puts += put
cnt = cnt + 1
if(cnt == partLen) {
table.put(puts.toList.asJava)
puts = ListBuffer[Put]()
cnt = 0
}
}
table.put(puts.toList.asJava)
} catch {
case e: Exception => e.printStackTrace
}
finally {
if(table != null){
table.close()}
if(connection != null){
connection.close()}
}
})
}
}