当前位置: 首页>>代码示例>>Scala>>正文


Scala TableName类代码示例

本文整理汇总了Scala中org.apache.hadoop.hbase.TableName的典型用法代码示例。如果您正苦于以下问题:Scala TableName类的具体用法?Scala TableName怎么用?Scala TableName使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了TableName类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: CreateSaltedTable

//设置package包名称以及导入依赖的类
package com.cloudera.sa.apptrans.setup.hbase

import java.io.File

import org.apache.commons.lang.StringUtils
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.io.compress.Compression
import org.apache.hadoop.hbase.regionserver.{BloomType, ConstantSizeRegionSplitPolicy}
import org.apache.hadoop.hbase.util.Bytes

import scala.collection.mutable


object CreateSaltedTable {
  def main(args:Array[String]): Unit = {

    if (args.length == 0) {
      println("<tableName> <columnFamily> <regionCount> <numOfSalts> <hbaseConfigFolder>")
    }
    val tableName = args(0)
    val columnFamilyName = args(1)
    val regionCount = args(2).toInt
    val numOfSalts = args(3).toInt
    val hbaseConfigFolder = args(4)

    val conf = HBaseConfiguration.create()

    conf.addResource(new File(hbaseConfigFolder + "hbase-site.xml").toURI.toURL)

    val connection = ConnectionFactory.createConnection(conf)

    val admin = connection.getAdmin

    val tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName))

    val columnDescriptor = new HColumnDescriptor(columnFamilyName)

    columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY)
    columnDescriptor.setBlocksize(64 * 1024)
    columnDescriptor.setBloomFilterType(BloomType.ROW)

    tableDescriptor.addFamily(columnDescriptor)

    tableDescriptor.setMaxFileSize(Long.MaxValue)
    tableDescriptor.setRegionSplitPolicyClassName(classOf[ConstantSizeRegionSplitPolicy].getName)

    val splitKeys = new mutable.MutableList[Array[Byte]]
    for (i <- 0 to regionCount) {
      val regionSplitStr = StringUtils.leftPad((i*(numOfSalts/regionCount)).toString, 4, "0")
      splitKeys += Bytes.toBytes(regionSplitStr)
    }
    admin.createTable(tableDescriptor, splitKeys.toArray)
  }
} 
开发者ID:tmalaska,项目名称:AppTrans,代码行数:56,代码来源:CreateSaltedTable.scala

示例2: HTableStage

//设置package包名称以及导入依赖的类
package akka.stream.alpakka.hbase.javadsl

import akka.stream.alpakka.hbase.HTableSettings
import akka.stream.alpakka.hbase.internal.HBaseFlowStage
import akka.stream.scaladsl.{Flow, Keep, Sink}
import akka.{Done, NotUsed}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put

import scala.collection.immutable
import scala.concurrent.Future

object HTableStage {

  def table[T](conf: Configuration,
               tableName: TableName,
               columnFamilies: java.util.List[String],
               converter: java.util.function.Function[T, Put]): HTableSettings[T] = {
    import scala.compat.java8.FunctionConverters._
    import scala.collection.JavaConverters._
    HTableSettings(conf, tableName, immutable.Seq(columnFamilies.asScala: _*), asScalaFromFunction(converter))
  }

  def sink[A](config: HTableSettings[A]): akka.stream.javadsl.Sink[A, Future[Done]] =
    Flow[A].via(flow(config)).toMat(Sink.ignore)(Keep.right).asJava

  def flow[A](settings: HTableSettings[A]): akka.stream.javadsl.Flow[A, A, NotUsed] =
    Flow.fromGraph(new HBaseFlowStage[A](settings)).asJava

} 
开发者ID:akka,项目名称:alpakka,代码行数:32,代码来源:HTableStage.scala

示例3: DataLoadSuite

//设置package包名称以及导入依赖的类
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.{TableName, HBaseTestingUtility}
import org.apache.hadoop.hbase.util.Bytes
import org.scalatest.{FunSuite, BeforeAndAfterEach, BeforeAndAfterAll}


class DataLoadSuite extends FunSuite with BeforeAndAfterEach with BeforeAndAfterAll {

  var htu: HBaseTestingUtility = null

  override def beforeAll() {

    htu = HBaseTestingUtility.createLocalHTU()

    htu.cleanupTestDir()
    println("starting minicluster")
    htu.startMiniZKCluster()
    htu.startMiniHBaseCluster(1, 1)
    println(" - minicluster started")
    try {
      htu.deleteTable(Bytes.toBytes(HBaseContants.tableName))
    } catch {
      case e: Exception => {
        println(" - no table " + HBaseContants.tableName + " found")
      }
    }
    println(" - creating table " + HBaseContants.tableName)
    htu.createTable(Bytes.toBytes(HBaseContants.tableName), HBaseContants.columnFamily)
    println(" - created table")

  }

  override def afterAll() {
    htu.deleteTable(Bytes.toBytes(HBaseContants.tableName))
    println("shuting down minicluster")
    htu.shutdownMiniHBaseCluster()
    htu.shutdownMiniZKCluster()
    println(" - minicluster shut down")
    htu.cleanupTestDir()
  }

  test("test the load") {
    HBasePopulator.populate(100, 5000, 1, htu.getConnection, HBaseContants.tableName)
    HBasePopulator.megaScan(htu.getConnection, HBaseContants.tableName)

    val table = htu.getConnection.getTable(TableName.valueOf(HBaseContants.tableName))

    println("Single Record Test")

    val scan = new Scan()
    scan.setStartRow(Bytes.toBytes("10_"))
    scan.setStopRow(Bytes.toBytes("10__"))
    scan.setCaching(1)
    val scanner = table.getScanner(scan)
    val it = scanner.iterator()
    val result = it.next()
    println(" - " + Bytes.toString(result.getRow) + ":" +
      Bytes.toString(result.getValue(HBaseContants.columnFamily,
        HBaseContants.column)))
  }
} 
开发者ID:khajaasmath786,项目名称:HBASEScalaSBT-Spark,代码行数:62,代码来源:DataLoadSuite.scala

示例4: HTableSettings

//设置package包名称以及导入依赖的类
package akka.stream.alpakka.hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put

import scala.collection.immutable

final case class HTableSettings[T](conf: Configuration,
                                   tableName: TableName,
                                   columnFamilies: immutable.Seq[String],
                                   converter: T => Put)

object HTableSettings {
  def create[T](conf: Configuration,
                tableName: TableName,
                columnFamilies: java.util.List[String],
                converter: java.util.function.Function[T, Put]): HTableSettings[T] = {
    import scala.compat.java8.FunctionConverters._
    import scala.collection.JavaConverters._
    HTableSettings(conf, tableName, immutable.Seq(columnFamilies.asScala: _*), asScalaFromFunction(converter))
  }
} 
开发者ID:akka,项目名称:alpakka,代码行数:24,代码来源:HBaseSettings.scala

示例5: CreateSaltedTable

//设置package包名称以及导入依赖的类
package com.hadooparchitecturebook.taxi360.setup.hbase

import java.io.File

import org.apache.commons.lang.StringUtils
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.io.compress.Compression
import org.apache.hadoop.hbase.regionserver.{BloomType, ConstantSizeRegionSplitPolicy}
import org.apache.hadoop.hbase.util.Bytes

import scala.collection.mutable


object CreateSaltedTable {
  def main(args:Array[String]): Unit = {

    if (args.length == 0) {
      println("<tableName> <columnFamily> <regionCount> <numOfSalts> <hbaseConfigFolder>")
    }
    val tableName = args(0)
    val columnFamilyName = args(1)
    val regionCount = args(2).toInt
    val numOfSalts = args(3).toInt
    val hbaseConfigFolder = args(4)

    val conf = HBaseConfiguration.create()

    conf.addResource(new File(hbaseConfigFolder + "hbase-site.xml").toURI.toURL)

    val connection = ConnectionFactory.createConnection(conf)

    val admin = connection.getAdmin

    val tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName))

    val columnDescriptor = new HColumnDescriptor(columnFamilyName)

    columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY)
    columnDescriptor.setBlocksize(64 * 1024)
    columnDescriptor.setBloomFilterType(BloomType.ROW)

    tableDescriptor.addFamily(columnDescriptor)

    tableDescriptor.setMaxFileSize(Long.MaxValue)
    tableDescriptor.setRegionSplitPolicyClassName(classOf[ConstantSizeRegionSplitPolicy].getName)

    val splitKeys = new mutable.MutableList[Array[Byte]]
    for (i <- 0 to regionCount) {
      val regionSplitStr = StringUtils.leftPad((i*(numOfSalts/regionCount)).toString, 4, "0")
      splitKeys += Bytes.toBytes(regionSplitStr)
    }
    admin.createTable(tableDescriptor, splitKeys.toArray)
  }
} 
开发者ID:hadooparchitecturebook,项目名称:Taxi360,代码行数:56,代码来源:CreateSaltedTable.scala

示例6: HBaseSetup

//设置package包名称以及导入依赖的类
package uk.co.odinconsultants.bitcoin.hbase

import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.HBaseAdmin

object HBaseSetup {

  val metaTable: String   = "Addresses"
  val tableName: TableName = TableName.valueOf(metaTable)
  val familyName: String  = "familyName"

  def createAddressesTable(admin: HBaseAdmin): Unit = {
    createTable(admin, metaTable, familyName)
  }

  private def createTable(admin: HBaseAdmin, tableName: String, familyName: String): Unit = {
    val tableDescriptor = new HTableDescriptor(tableName)
    val colDescriptor   = new HColumnDescriptor(familyName)
    tableDescriptor.addFamily(colDescriptor)
    if (!admin.tableExists(tableName)) {
      admin.createTable(tableDescriptor)
    }
  }

} 
开发者ID:PhillHenry,项目名称:Cryptorigin,代码行数:26,代码来源:HBaseSetup.scala

示例7: insertInto

//设置package包名称以及导入依赖的类
package swiss.army.knife.io

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.client.{Put, Table, ConnectionFactory, Connection}
import org.apache.spark.sql.DataFrame
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._



  def insertInto(dataFrame: DataFrame, tableName: String, family: String, qualifier: String, partLen: Int) : Unit = {
    dataFrame.foreachPartition(partitionOfRecords => {
      // ?????hbase??
      val hConfig: Configuration = HBaseConfiguration.create()
      val connection: Connection = ConnectionFactory.createConnection(hConfig)
      val table: Table = connection.getTable(TableName.valueOf(tableName))
      var puts: ListBuffer[Put] = ListBuffer[Put]()
      var cnt: Int = 0

      try {
        while(partitionOfRecords.hasNext) {
          val row = partitionOfRecords.next
          val put : Put = new Put(Bytes.toBytes(row(0).toString))
          put.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier), Bytes.toBytes(row(1).toString))
          puts += put
          cnt = cnt + 1
          if(cnt == partLen) {
            table.put(puts.toList.asJava)
            puts = ListBuffer[Put]()
            cnt = 0
          }
        }
        table.put(puts.toList.asJava)
      } catch {
        case e: Exception => e.printStackTrace
      }
      finally {
        if(table != null){
          table.close()}
        if(connection != null){
          connection.close()}
      }
    })
  }

} 
开发者ID:Justontheway,项目名称:SwissArmyKnife,代码行数:49,代码来源:DataFrameToHbase.scala


注:本文中的org.apache.hadoop.hbase.TableName类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。