当前位置: 首页>>代码示例>>Scala>>正文


Scala Put类代码示例

本文整理汇总了Scala中org.apache.hadoop.hbase.client.Put的典型用法代码示例。如果您正苦于以下问题:Scala Put类的具体用法?Scala Put怎么用?Scala Put使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Put类的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Scala代码示例。

示例1: Employee

//设置package包名称以及导入依赖的类
package com.zaloni.mgohain.sparkHbaseIntegration.services

import org.apache.hadoop.hbase.client.{HBaseAdmin, HTable, Put}
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor}

object Employee {
  def main(args: Array[String]) {
    if (args.length != 1) {
      System.err.println("In correct number of arguments " + args.length)
      System.out.println("Please provide correct arguments.")
      System.exit(1)
    }
    val hbaseConf = HBaseConfiguration.create()
    val tableName = "employee"
    hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, tableName)
    hbaseConf.set("hbase.zookeeper.quorum","quickstart.cloudera")
    hbaseConf.set("hbase.zookeeper.property.client.port","2181")
    val admin = new HBaseAdmin(hbaseConf)
    val cfProfessionalData = Bytes.toBytes("professional_data")
    val cfPersonalData = Bytes.toBytes("personal_data")
    if (!admin.isTableAvailable(tableName)) {
      val tableDesc = new HTableDescriptor(tableName)
      tableDesc.addFamily(new HColumnDescriptor(cfProfessionalData))
      tableDesc.addFamily(new HColumnDescriptor(cfPersonalData))
    }
    val hTable = new HTable(hbaseConf,tableName)
    //val records = sc.textFile(args(0))
    val put = new Put(Bytes.toBytes("e_1"))
    val eId = Bytes.toBytes("Emp_id")
    val name = Bytes.toBytes("Name")
    val dsgtn = Bytes.toBytes("Designation")
    val doj = Bytes.toBytes("DOJ")
    val addr = Bytes.toBytes("Address")
    val phn = Bytes.toBytes("Phone")
    val dob = Bytes.toBytes("DOB")
    put.add(cfProfessionalData, eId, Bytes.toBytes(1))
    put.add(cfProfessionalData, name, Bytes.toBytes("Mridul Gohain"))
    put.add(cfProfessionalData, dsgtn, Bytes.toBytes("SE"))
    put.add(cfProfessionalData, doj, Bytes.toBytes("15-07-2015"))
    put.add(cfPersonalData, addr, Bytes.toBytes("Chabua"))
    put.add(cfPersonalData, phn, Bytes.toBytes("9859559606"))
    put.add(cfPersonalData, dob, Bytes.toBytes("04-10-1991"))
    hTable.put(put)
    hTable.close()
  }
} 
开发者ID:mridulZaloni,项目名称:sparkStreaming,代码行数:49,代码来源:Employee.scala

示例2: HTableStage

//设置package包名称以及导入依赖的类
package akka.stream.alpakka.hbase.javadsl

import akka.stream.alpakka.hbase.HTableSettings
import akka.stream.alpakka.hbase.internal.HBaseFlowStage
import akka.stream.scaladsl.{Flow, Keep, Sink}
import akka.{Done, NotUsed}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put

import scala.collection.immutable
import scala.concurrent.Future

object HTableStage {

  def table[T](conf: Configuration,
               tableName: TableName,
               columnFamilies: java.util.List[String],
               converter: java.util.function.Function[T, Put]): HTableSettings[T] = {
    import scala.compat.java8.FunctionConverters._
    import scala.collection.JavaConverters._
    HTableSettings(conf, tableName, immutable.Seq(columnFamilies.asScala: _*), asScalaFromFunction(converter))
  }

  def sink[A](config: HTableSettings[A]): akka.stream.javadsl.Sink[A, Future[Done]] =
    Flow[A].via(flow(config)).toMat(Sink.ignore)(Keep.right).asJava

  def flow[A](settings: HTableSettings[A]): akka.stream.javadsl.Flow[A, A, NotUsed] =
    Flow.fromGraph(new HBaseFlowStage[A](settings)).asJava

} 
开发者ID:akka,项目名称:alpakka,代码行数:32,代码来源:HTableStage.scala

示例3: HBaseWriter

//设置package包名称以及导入依赖的类
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat



object HBaseWriter {

  def toStock(rec: String): Stock = {
    val splits = rec.split(",")
    Stock(splits(0), splits(1), splits(2), splits(3), splits(4), splits(5), splits(6))
  }

  def toPut(stock: Stock): (ImmutableBytesWritable, Put) = {
    val rowKey = stock.pdate
    val put = new Put(Bytes.toBytes(rowKey))

    put.add(StockHBaseType.cfPrices, StockHBaseType.colOpen, Bytes.toBytes(stock.open))
    put.add(StockHBaseType.cfPrices, StockHBaseType.colHigh, Bytes.toBytes(stock.high))
    put.add(StockHBaseType.cfPrices, StockHBaseType.colLow, Bytes.toBytes(stock.low))
    put.add(StockHBaseType.cfPrices, StockHBaseType.colClose, Bytes.toBytes(stock.close))
    put.add(StockHBaseType.cfPrices, StockHBaseType.colAdjClose, Bytes.toBytes(stock.adjClose))
    put.add(StockHBaseType.cfVolume, StockHBaseType.colVol, Bytes.toBytes(stock.volume))

    (new ImmutableBytesWritable(Bytes.toBytes(rowKey)), put)
  }
  
  
  
  def main(args: Array[String]): Unit = {
    if(args.size != 3){
      println("Usage: HBaseConnector <data source> <table name> <number of thread>")
      System.exit(-1)
    }
    
    val sourceFile = args(0)
    val tableName = args(1)
    val numThread = args(2).toInt
    // This parameters controls how many parallel load occurs.
    // More is better, but more thread creates more pressure to HBase heap memory.
    
    val sc = Connection.createSparkConf()
    val hbaseConf = Connection.createHBaseConf()

    
    hbaseConf.set(TableOutputFormat.OUTPUT_TABLE, tableName)
    
    sc.textFile(sourceFile).
        filter(line => line.split(",").size == 7 && !line.startsWith("Date")).
        repartition(numThread).
        map(toStock).
        map(toPut).
        saveAsNewAPIHadoopFile("/user/user01/out",
            classOf[ImmutableBytesWritable], 
            classOf[Put], 
            classOf[TableOutputFormat[Put]], 
            hbaseConf)
  }

} 
开发者ID:Einext,项目名称:spark-projects,代码行数:62,代码来源:HBaseWriter.scala

示例4: HTableSettings

//设置package包名称以及导入依赖的类
package akka.stream.alpakka.hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.TableName
import org.apache.hadoop.hbase.client.Put

import scala.collection.immutable

final case class HTableSettings[T](conf: Configuration,
                                   tableName: TableName,
                                   columnFamilies: immutable.Seq[String],
                                   converter: T => Put)

object HTableSettings {
  def create[T](conf: Configuration,
                tableName: TableName,
                columnFamilies: java.util.List[String],
                converter: java.util.function.Function[T, Put]): HTableSettings[T] = {
    import scala.compat.java8.FunctionConverters._
    import scala.collection.JavaConverters._
    HTableSettings(conf, tableName, immutable.Seq(columnFamilies.asScala: _*), asScalaFromFunction(converter))
  }
} 
开发者ID:akka,项目名称:alpakka,代码行数:24,代码来源:HBaseSettings.scala

示例5: insertInto

//设置package包名称以及导入依赖的类
package swiss.army.knife.io

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
import org.apache.hadoop.hbase.client.{Put, Table, ConnectionFactory, Connection}
import org.apache.spark.sql.DataFrame
import scala.collection.mutable.ListBuffer
import scala.collection.JavaConverters._



  def insertInto(dataFrame: DataFrame, tableName: String, family: String, qualifier: String, partLen: Int) : Unit = {
    dataFrame.foreachPartition(partitionOfRecords => {
      // ?????hbase??
      val hConfig: Configuration = HBaseConfiguration.create()
      val connection: Connection = ConnectionFactory.createConnection(hConfig)
      val table: Table = connection.getTable(TableName.valueOf(tableName))
      var puts: ListBuffer[Put] = ListBuffer[Put]()
      var cnt: Int = 0

      try {
        while(partitionOfRecords.hasNext) {
          val row = partitionOfRecords.next
          val put : Put = new Put(Bytes.toBytes(row(0).toString))
          put.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier), Bytes.toBytes(row(1).toString))
          puts += put
          cnt = cnt + 1
          if(cnt == partLen) {
            table.put(puts.toList.asJava)
            puts = ListBuffer[Put]()
            cnt = 0
          }
        }
        table.put(puts.toList.asJava)
      } catch {
        case e: Exception => e.printStackTrace
      }
      finally {
        if(table != null){
          table.close()}
        if(connection != null){
          connection.close()}
      }
    })
  }

} 
开发者ID:Justontheway,项目名称:SwissArmyKnife,代码行数:49,代码来源:DataFrameToHbase.scala

示例6: HbaseTest

//设置package包名称以及导入依赖的类
package teleporter.stream.integration.template

import akka.Done
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.{KillSwitch, KillSwitches}
import org.apache.hadoop.hbase.client.Put
import teleporter.integration.component.hbase.{Hbase, HbaseAction}
import teleporter.integration.core.Streams._
import teleporter.integration.core.{Message, TeleporterCenter}
import teleporter.integration.utils.Bytes._

import scala.concurrent.Future


object HbaseTest extends StreamLogic {
  override def apply(key: String, center: TeleporterCenter): (KillSwitch, Future[Done]) = {
    import center.{materializer, self}
    Source.single(1)
      .map { o ?
        val put = new Put("1")
        put.addColumn("f", "q", "v")
        Message.apply(HbaseAction("teleporter", put))
      }
      .via(Hbase.flow("/sink/test/hbase_test/hbase_test"))
      .viaMat(KillSwitches.single)(Keep.right).watchTermination()(Keep.both)
      .to(Sink.ignore).run()
  }
} 
开发者ID:huanwuji,项目名称:teleporter,代码行数:29,代码来源:HbaseTest.scala


注:本文中的org.apache.hadoop.hbase.client.Put类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。