当前位置: 首页>>代码示例>>Scala>>正文


Scala Bytes类代码示例

本文整理汇总了Scala中org.apache.hadoop.hbase.util.Bytes的典型用法代码示例。如果您正苦于以下问题:Scala Bytes类的具体用法?Scala Bytes怎么用?Scala Bytes使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Bytes类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: CreateSaltedTable

//设置package包名称以及导入依赖的类
package com.cloudera.sa.apptrans.setup.hbase

import java.io.File

import org.apache.commons.lang.StringUtils
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.io.compress.Compression
import org.apache.hadoop.hbase.regionserver.{BloomType, ConstantSizeRegionSplitPolicy}
import org.apache.hadoop.hbase.util.Bytes

import scala.collection.mutable


object CreateSaltedTable {
  def main(args:Array[String]): Unit = {

    if (args.length == 0) {
      println("<tableName> <columnFamily> <regionCount> <numOfSalts> <hbaseConfigFolder>")
    }
    val tableName = args(0)
    val columnFamilyName = args(1)
    val regionCount = args(2).toInt
    val numOfSalts = args(3).toInt
    val hbaseConfigFolder = args(4)

    val conf = HBaseConfiguration.create()

    conf.addResource(new File(hbaseConfigFolder + "hbase-site.xml").toURI.toURL)

    val connection = ConnectionFactory.createConnection(conf)

    val admin = connection.getAdmin

    val tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName))

    val columnDescriptor = new HColumnDescriptor(columnFamilyName)

    columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY)
    columnDescriptor.setBlocksize(64 * 1024)
    columnDescriptor.setBloomFilterType(BloomType.ROW)

    tableDescriptor.addFamily(columnDescriptor)

    tableDescriptor.setMaxFileSize(Long.MaxValue)
    tableDescriptor.setRegionSplitPolicyClassName(classOf[ConstantSizeRegionSplitPolicy].getName)

    val splitKeys = new mutable.MutableList[Array[Byte]]
    for (i <- 0 to regionCount) {
      val regionSplitStr = StringUtils.leftPad((i*(numOfSalts/regionCount)).toString, 4, "0")
      splitKeys += Bytes.toBytes(regionSplitStr)
    }
    admin.createTable(tableDescriptor, splitKeys.toArray)
  }
} 
开发者ID:tmalaska,项目名称:AppTrans,代码行数:56,代码来源:CreateSaltedTable.scala

示例2: HBaseHelloWorld

//设置package包名称以及导入依赖的类
import org.apache.spark._
import org.apache.hadoop._

import org.apache.hadoop.hbase.client.{HBaseAdmin, Result}
import org.apache.hadoop.hbase.{ HBaseConfiguration, HTableDescriptor }
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.io.ImmutableBytesWritable

import org.apache.hadoop.hbase.client.{HBaseAdmin,HTable,Put,Get}
import org.apache.hadoop.hbase.util.Bytes

object HBaseHelloWorld {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("HBaseRead")
    val sc = new SparkContext(sparkConf)
    val conf = HBaseConfiguration.create()
    val tableName = "enterprises"

    conf.set("hbase.master", "localhost:60000")
    conf.setInt("timeout", 120000)
    conf.set(TableInputFormat.INPUT_TABLE, tableName)

    val admin = new HBaseAdmin(conf)
    if (!admin.isTableAvailable(tableName)) {
      val tableDesc = new HTableDescriptor(tableName)
      admin.createTable(tableDesc)
    }

    val hBaseRDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat], classOf[ImmutableBytesWritable], classOf[Result])
    println("Number of Records found : " + hBaseRDD.count())

    val pairs = hBaseRDD.map(s => (s, 1))
    val counts = pairs.reduceByKey((a, b) => a + b)

    sc.stop()
  }
} 
开发者ID:octoai,项目名称:octo-spark,代码行数:38,代码来源:HBaseHelloWorld.scala

示例3: Employee

//设置package包名称以及导入依赖的类
package com.zaloni.mgohain.sparkHbaseIntegration.services

import org.apache.hadoop.hbase.client.{HBaseAdmin, HTable, Put}
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor}

object Employee {
  def main(args: Array[String]) {
    if (args.length != 1) {
      System.err.println("In correct number of arguments " + args.length)
      System.out.println("Please provide correct arguments.")
      System.exit(1)
    }
    val hbaseConf = HBaseConfiguration.create()
    val tableName = "employee"
    hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, tableName)
    hbaseConf.set("hbase.zookeeper.quorum","quickstart.cloudera")
    hbaseConf.set("hbase.zookeeper.property.client.port","2181")
    val admin = new HBaseAdmin(hbaseConf)
    val cfProfessionalData = Bytes.toBytes("professional_data")
    val cfPersonalData = Bytes.toBytes("personal_data")
    if (!admin.isTableAvailable(tableName)) {
      val tableDesc = new HTableDescriptor(tableName)
      tableDesc.addFamily(new HColumnDescriptor(cfProfessionalData))
      tableDesc.addFamily(new HColumnDescriptor(cfPersonalData))
    }
    val hTable = new HTable(hbaseConf,tableName)
    //val records = sc.textFile(args(0))
    val put = new Put(Bytes.toBytes("e_1"))
    val eId = Bytes.toBytes("Emp_id")
    val name = Bytes.toBytes("Name")
    val dsgtn = Bytes.toBytes("Designation")
    val doj = Bytes.toBytes("DOJ")
    val addr = Bytes.toBytes("Address")
    val phn = Bytes.toBytes("Phone")
    val dob = Bytes.toBytes("DOB")
    put.add(cfProfessionalData, eId, Bytes.toBytes(1))
    put.add(cfProfessionalData, name, Bytes.toBytes("Mridul Gohain"))
    put.add(cfProfessionalData, dsgtn, Bytes.toBytes("SE"))
    put.add(cfProfessionalData, doj, Bytes.toBytes("15-07-2015"))
    put.add(cfPersonalData, addr, Bytes.toBytes("Chabua"))
    put.add(cfPersonalData, phn, Bytes.toBytes("9859559606"))
    put.add(cfPersonalData, dob, Bytes.toBytes("04-10-1991"))
    hTable.put(put)
    hTable.close()
  }
} 
开发者ID:mridulZaloni,项目名称:sparkStreaming,代码行数:49,代码来源:Employee.scala

示例4: DataLoadSuite

//设置package包名称以及导入依赖的类
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.{TableName, HBaseTestingUtility}
import org.apache.hadoop.hbase.util.Bytes
import org.scalatest.{FunSuite, BeforeAndAfterEach, BeforeAndAfterAll}


class DataLoadSuite extends FunSuite with BeforeAndAfterEach with BeforeAndAfterAll {

  var htu: HBaseTestingUtility = null

  override def beforeAll() {

    htu = HBaseTestingUtility.createLocalHTU()

    htu.cleanupTestDir()
    println("starting minicluster")
    htu.startMiniZKCluster()
    htu.startMiniHBaseCluster(1, 1)
    println(" - minicluster started")
    try {
      htu.deleteTable(Bytes.toBytes(HBaseContants.tableName))
    } catch {
      case e: Exception => {
        println(" - no table " + HBaseContants.tableName + " found")
      }
    }
    println(" - creating table " + HBaseContants.tableName)
    htu.createTable(Bytes.toBytes(HBaseContants.tableName), HBaseContants.columnFamily)
    println(" - created table")

  }

  override def afterAll() {
    htu.deleteTable(Bytes.toBytes(HBaseContants.tableName))
    println("shuting down minicluster")
    htu.shutdownMiniHBaseCluster()
    htu.shutdownMiniZKCluster()
    println(" - minicluster shut down")
    htu.cleanupTestDir()
  }

  test("test the load") {
    HBasePopulator.populate(100, 5000, 1, htu.getConnection, HBaseContants.tableName)
    HBasePopulator.megaScan(htu.getConnection, HBaseContants.tableName)

    val table = htu.getConnection.getTable(TableName.valueOf(HBaseContants.tableName))

    println("Single Record Test")

    val scan = new Scan()
    scan.setStartRow(Bytes.toBytes("10_"))
    scan.setStopRow(Bytes.toBytes("10__"))
    scan.setCaching(1)
    val scanner = table.getScanner(scan)
    val it = scanner.iterator()
    val result = it.next()
    println(" - " + Bytes.toString(result.getRow) + ":" +
      Bytes.toString(result.getValue(HBaseContants.columnFamily,
        HBaseContants.column)))
  }
} 
开发者ID:khajaasmath786,项目名称:HBASEScalaSBT-Spark,代码行数:62,代码来源:DataLoadSuite.scala

示例5: HBaseWriter

//设置package包名称以及导入依赖的类
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat



object HBaseWriter {

  def toStock(rec: String): Stock = {
    val splits = rec.split(",")
    Stock(splits(0), splits(1), splits(2), splits(3), splits(4), splits(5), splits(6))
  }

  def toPut(stock: Stock): (ImmutableBytesWritable, Put) = {
    val rowKey = stock.pdate
    val put = new Put(Bytes.toBytes(rowKey))

    put.add(StockHBaseType.cfPrices, StockHBaseType.colOpen, Bytes.toBytes(stock.open))
    put.add(StockHBaseType.cfPrices, StockHBaseType.colHigh, Bytes.toBytes(stock.high))
    put.add(StockHBaseType.cfPrices, StockHBaseType.colLow, Bytes.toBytes(stock.low))
    put.add(StockHBaseType.cfPrices, StockHBaseType.colClose, Bytes.toBytes(stock.close))
    put.add(StockHBaseType.cfPrices, StockHBaseType.colAdjClose, Bytes.toBytes(stock.adjClose))
    put.add(StockHBaseType.cfVolume, StockHBaseType.colVol, Bytes.toBytes(stock.volume))

    (new ImmutableBytesWritable(Bytes.toBytes(rowKey)), put)
  }
  
  
  
  def main(args: Array[String]): Unit = {
    if(args.size != 3){
      println("Usage: HBaseConnector <data source> <table name> <number of thread>")
      System.exit(-1)
    }
    
    val sourceFile = args(0)
    val tableName = args(1)
    val numThread = args(2).toInt
    // This parameters controls how many parallel load occurs.
    // More is better, but more thread creates more pressure to HBase heap memory.
    
    val sc = Connection.createSparkConf()
    val hbaseConf = Connection.createHBaseConf()

    
    hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, tableName)
    
    sc.textFile(sourceFile).
        filter(line => line.split(",").size == 7 && !line.startsWith("Date")).
        repartition(numThread).
        map(toStock).
        map(toPut).
        saveAsNewAPIHadoopFile("/user/user01/out",
            classOf[ImmutableBytesWritable], 
            classOf[Put], 
            classOf[TableOutputFormat[Put]], 
            hbaseConf)
  }

} 
开发者ID:Einext,项目名称:spark-projects,代码行数:62,代码来源:HBaseWriter.scala

示例6: CreateSaltedTable

//设置package包名称以及导入依赖的类
package com.hadooparchitecturebook.taxi360.setup.hbase

import java.io.File

import org.apache.commons.lang.StringUtils
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.ConnectionFactory
import org.apache.hadoop.hbase.io.compress.Compression
import org.apache.hadoop.hbase.regionserver.{BloomType, ConstantSizeRegionSplitPolicy}
import org.apache.hadoop.hbase.util.Bytes

import scala.collection.mutable


object CreateSaltedTable {
  def main(args:Array[String]): Unit = {

    if (args.length == 0) {
      println("<tableName> <columnFamily> <regionCount> <numOfSalts> <hbaseConfigFolder>")
    }
    val tableName = args(0)
    val columnFamilyName = args(1)
    val regionCount = args(2).toInt
    val numOfSalts = args(3).toInt
    val hbaseConfigFolder = args(4)

    val conf = HBaseConfiguration.create()

    conf.addResource(new File(hbaseConfigFolder + "hbase-site.xml").toURI.toURL)

    val connection = ConnectionFactory.createConnection(conf)

    val admin = connection.getAdmin

    val tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName))

    val columnDescriptor = new HColumnDescriptor(columnFamilyName)

    columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY)
    columnDescriptor.setBlocksize(64 * 1024)
    columnDescriptor.setBloomFilterType(BloomType.ROW)

    tableDescriptor.addFamily(columnDescriptor)

    tableDescriptor.setMaxFileSize(Long.MaxValue)
    tableDescriptor.setRegionSplitPolicyClassName(classOf[ConstantSizeRegionSplitPolicy].getName)

    val splitKeys = new mutable.MutableList[Array[Byte]]
    for (i <- 0 to regionCount) {
      val regionSplitStr = StringUtils.leftPad((i*(numOfSalts/regionCount)).toString, 4, "0")
      splitKeys += Bytes.toBytes(regionSplitStr)
    }
    admin.createTable(tableDescriptor, splitKeys.toArray)
  }
} 
开发者ID:hadooparchitecturebook,项目名称:Taxi360,代码行数:56,代码来源:CreateSaltedTable.scala

示例7: HadoopUtils

//设置package包名称以及导入依赖的类
package com.flipkart.connekt.commons.utils

import org.apache.hadoop.hbase.util.Bytes

object HadoopUtils {
  implicit class stringHandyFunctions(val s: String) {
    def getUtf8Bytes = Bytes.toBytes(s)
  }

  implicit class longHandyFunctions(val l: Long) {
    def getBytes = Bytes.toBytes(l)
  }

  implicit class booleanHandyFunctions(val b: Boolean) {
    def getBytes = Bytes.toBytes(b)
  }


  implicit class byteArrayHandyFunctions(val b: Array[Byte]) {
    def getString = Bytes.toString(b)
    def getLong = Bytes.toLong(b)
    def getBoolean = Bytes.toBoolean(b)
    def getInt = Bytes.toInt(b)
  }
} 
开发者ID:ayush03agarwal,项目名称:connekt,代码行数:26,代码来源:HadoopUtils.scala

示例8: insertInto

//设置package包名称以及导入依赖的类
package swiss.army.knife.io

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.client.{Put, Table, ConnectionFactory, Connection}
import org.apache.spark.sql.DataFrame
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._



  def insertInto(dataFrame: DataFrame, tableName: String, family: String, qualifier: String, partLen: Int) : Unit = {
    dataFrame.foreachPartition(partitionOfRecords => {
      // ?????hbase??
      val hConfig: Configuration = HBaseConfiguration.create()
      val connection: Connection = ConnectionFactory.createConnection(hConfig)
      val table: Table = connection.getTable(TableName.valueOf(tableName))
      var puts: ListBuffer[Put] = ListBuffer[Put]()
      var cnt: Int = 0

      try {
        while(partitionOfRecords.hasNext) {
          val row = partitionOfRecords.next
          val put : Put = new Put(Bytes.toBytes(row(0).toString))
          put.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier), Bytes.toBytes(row(1).toString))
          puts += put
          cnt = cnt + 1
          if(cnt == partLen) {
            table.put(puts.toList.asJava)
            puts = ListBuffer[Put]()
            cnt = 0
          }
        }
        table.put(puts.toList.asJava)
      } catch {
        case e: Exception => e.printStackTrace
      }
      finally {
        if(table != null){
          table.close()}
        if(connection != null){
          connection.close()}
      }
    })
  }

} 
开发者ID:Justontheway,项目名称:SwissArmyKnife,代码行数:49,代码来源:DataFrameToHbase.scala

示例9: HBaseReader

//设置package包名称以及导入依赖的类
import org.apache.hadoop.hbase.client.{ HBaseAdmin, Result }
import org.apache.hadoop.hbase.HTableDescriptor
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes



object HBaseReader {

  def main(args: Array[String]) {

    if(args.size != 1){
      println("Usage: HBaseReader <table name>")
      System.exit(-1)
    }
    
    val tableName = args(0)
    val master = ""

    val sc = Connection.createSparkConf()
    val conf = Connection.createHBaseConf()

    val admin = new HBaseAdmin(conf)
    if (!admin.isTableAvailable(tableName)) {
      val tableDesc = new HTableDescriptor(tableName)
      admin.createTable(tableDesc)
    }
    
    conf.set(TableInputFormat.INPUT_TABLE, tableName)
    val hBaseRDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],
      classOf[ImmutableBytesWritable],
      classOf[Result])

    println("Number of Records found : " + hBaseRDD.count())

    val stocksRdd = hBaseRDD.map {
      case (key: ImmutableBytesWritable, value: Result) =>
        Stock(
          new String(key.get),
          new String(value.getValue(Bytes.toBytes("prices"), Bytes.toBytes("Open"))),
          new String(value.getValue(Bytes.toBytes("prices"), Bytes.toBytes("Close"))),
          new String(value.getValue(Bytes.toBytes("prices"), Bytes.toBytes("High"))),
          new String(value.getValue(Bytes.toBytes("prices"), Bytes.toBytes("Low"))),
          new String(value.getValue(Bytes.toBytes("prices"), Bytes.toBytes("AdjClose"))),
          new String(value.getValue(Bytes.toBytes("volume"), Bytes.toBytes("vol"))))
    }
    stocksRdd.take(10).foreach(println)

  }
} 
开发者ID:Einext,项目名称:spark-projects,代码行数:52,代码来源:HBaseReader.scala

示例10: formatsRead

//设置package包名称以及导入依赖的类
package top.spoofer.hbrdd.unit

import org.apache.hadoop.hbase.util.Bytes
import org.json4s._

trait HbRddReaders {
  implicit val hbBooleanReader = new HbRddFormatsReader[Boolean] {
    def formatsRead(readData: Array[Byte]): Boolean = Bytes.toBoolean(readData)
  }

  implicit val hbByteArrayReader = new HbRddFormatsReader[Array[Byte]] {
    def formatsRead(readData: Array[Byte]): Array[Byte] = readData
  }

  implicit val hbShortReader = new HbRddFormatsReader[Short] {
    def formatsRead(readData: Array[Byte]): Short = Bytes.toShort(readData)
  }

  implicit val hbIntReader = new HbRddFormatsReader[Int] {
    def formatsRead(readData: Array[Byte]): Int = Bytes.toInt(readData)
  }

  implicit val hbFloatReader = new HbRddFormatsReader[Float] {
    def formatsRead(readData: Array[Byte]): Float = Bytes.toFloat(readData)
  }

  implicit val hbDoubleReader = new HbRddFormatsReader[Double] {
    def formatsRead(readData: Array[Byte]): Double = Bytes.toDouble(readData)
  }

  implicit val hbLongReader = new HbRddFormatsReader[Long] {
    def formatsRead(readData: Array[Byte]): Long = Bytes.toLong(readData)
  }

  implicit val hbStringReader = new HbRddFormatsReader[String] {
    def formatsRead(readData: Array[Byte]): String = Bytes.toString(readData)
  }

  
  implicit val hbJsonReader = new HbRddFormatsReader[JValue] {
    import org.json4s.jackson.JsonMethods._
    def formatsRead(readData: Array[Byte]): JValue = parse(Bytes.toString(readData))
  }
} 
开发者ID:TopSpoofer,项目名称:hbrdd,代码行数:45,代码来源:HbRddReaders.scala

示例11: formatsWrite

//设置package包名称以及导入依赖的类
package top.spoofer.hbrdd.unit

import org.apache.hadoop.hbase.util.Bytes
import org.json4s._

trait HbRddWriters {
  implicit val hbBooleanWriter = new HbRddFormatsWriter[Boolean] {
    def formatsWrite(writeData: Boolean): Array[Byte] = Bytes.toBytes(writeData)
  }

  implicit val hbArrayWriter = new HbRddFormatsWriter[Array[Byte]] {
    def formatsWrite(writeData: Array[Byte]): Array[Byte] = writeData
  }

  implicit val hbShortWriter = new HbRddFormatsWriter[Short] {
    def formatsWrite(writeData: Short): Array[Byte] = Bytes.toBytes(writeData)
  }

  implicit val hbIntWriter = new HbRddFormatsWriter[Int] {
    def formatsWrite(writeData: Int): Array[Byte] = Bytes.toBytes(writeData)
  }

  implicit val hbFloatWriter = new HbRddFormatsWriter[Float] {
    def formatsWrite(writeData: Float): Array[Byte] = Bytes.toBytes(writeData)
  }

  implicit val hbDoubleWrite = new HbRddFormatsWriter[Double] {
    def formatsWrite(writeData: Double): Array[Byte] = Bytes.toBytes(writeData)
  }

  implicit val hbLongWrite = new HbRddFormatsWriter[Long] {
    def formatsWrite(writeData: Long): Array[Byte] = Bytes.toBytes(writeData)
  }

  implicit val hbStringWrite = new HbRddFormatsWriter[String] {
    def formatsWrite(writeData: String): Array[Byte] = Bytes.toBytes(writeData)
  }

  implicit val hbJsonWrite = new HbRddFormatsWriter[JValue] {
    import org.json4s.jackson.JsonMethods._
    def formatsWrite(writeData: JValue): Array[Byte] = Bytes.toBytes(compact(writeData))
  }
} 
开发者ID:TopSpoofer,项目名称:hbrdd,代码行数:44,代码来源:HbRddWriters.scala

示例12: HbaseExample

//设置package包名称以及导入依赖的类
package com.hortonworks.examples

import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Get, HBaseAdmin, HTable}
import org.apache.hadoop.hbase.mapreduce.{TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.{SparkConf, SparkContext}

object HbaseExample {

  def main(arg: Array[String]) {

    if (arg.length < 2) {
      System.err.println("Usage: HbaseExample Hbase-Table RowKey")
      System.exit(1)
    }

    val jobName = "SparkHBaseTest"
    val conf = new SparkConf().setAppName(jobName)
    val sc = new SparkContext(conf)

    val hbaseTable = arg(0)
    val rowKey = arg(1)

    println("hbase table: " + hbaseTable)
    println("row key to get: " + rowKey)

    runTest(hbaseTable, rowKey, sc)
  }

  def runTest(hbaseTable: String, rowKey: String, sc: SparkContext): Null = {
    val hconf = HBaseConfiguration.create()

    hconf.set(TableOutputFormat.OUTPUT_TABLE, hbaseTable)
    hconf.set(TableInputFormat.INPUT_TABLE, hbaseTable)
    HBaseAdmin.checkHBaseAvailable(hconf)
    val htable = new HTable(hconf, hbaseTable)
    println("Hbase table connection established")
    println(htable.getTableDescriptor)
    println(htable.get(new Get(Bytes.toBytes(rowKey))).getMap)
    println("Getting count of records in table now...")
    val hBaseRDD = sc.newAPIHadoopRDD(hconf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])
    println(hBaseRDD.count())
    println("Done!")
    null
  }
} 
开发者ID:clukasikhw,项目名称:kerberized-spark-hbase-hdp2.4-example,代码行数:50,代码来源:HbaseExample.scala


注:本文中的org.apache.hadoop.hbase.util.Bytes类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。