当前位置: 首页>>代码示例>>Java>>正文


Java HTable.put方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.HTable.put方法的典型用法代码示例。如果您正苦于以下问题:Java HTable.put方法的具体用法?Java HTable.put怎么用?Java HTable.put使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.HTable的用法示例。


在下文中一共展示了HTable.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doPuts

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
private int doPuts(int maxOps, final HTable... tables) throws Exception {
  int count = 0;
  try {
    while (count < maxOps) {
      Put put = new Put(Bytes.toBytes("row-" + count));
      put.addColumn(FAMILY, QUALIFIER, Bytes.toBytes("data-" + count));
      for (final HTable table : tables) {
        table.put(put);
      }
      count += tables.length;
    }
  } catch (RetriesExhaustedWithDetailsException e) {
    for (Throwable t : e.getCauses()) {
      if (!(t instanceof ThrottlingException)) {
        throw e;
      }
    }
    LOG.error("put failed after nRetries=" + count, e);
  }
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestQuotaThrottle.java

示例2: putLob

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
/**
 * 上传对象到LOB
 * @param tableName Hyperbase表名
 * @param row rowkey byte形式
 * @param filename 文件名
 * @param fileData 文件
 */
public void putLob(String tableName, String row, String filename, byte[] fileData){
    byte[] rowkey = Bytes.toBytes(row);
    try {
        HTable htable = new HTable(conf, tableName);
        Put put = new Put(rowkey);
        put.add(Bytes.toBytes(family1), Bytes.toBytes(f1_q1), Bytes.toBytes(filename));
        put.add(Bytes.toBytes(family2), Bytes.toBytes(f2_q1), fileData);
        htable.put(put);
        htable.flushCommits();
        htable.close();
    } catch (IOException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
}
 
开发者ID:Transwarp-DE,项目名称:Transwarp-Sample-Code,代码行数:23,代码来源:LobUtil.java

示例3: testHBASE14489

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testHBASE14489() throws IOException {
  TableName tableName = TableName.valueOf("testHBASE14489");
  HTable table = util.createTable(tableName, new byte[][] { A });
  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  table.put(put);

  Scan s = new Scan();
  s.setFilter(new FilterAllFilter());
  ResultScanner scanner = table.getScanner(s);
  try {
    for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
    }
  } finally {
    scanner.close();
  }
  verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerFilterRowCalled" },
    tableName, new Boolean[] { true });
  util.deleteTable(tableName);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestRegionObserverInterface.java

示例4: prepareData

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
private Store prepareData() throws IOException {
  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTable table = TEST_UTIL.createTable(tableName, family);
  Random rand = new Random();
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[128 * 1024];
      rand.nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).add(family, qualifier, value));
    }
    admin.flush(tableName);
  }
  return getStoreWithName(tableName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestCompactionWithThroughputController.java

示例5: before

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
@Before
public void before()  throws Exception {
  final byte[][] SPLIT_KEYS = new byte[][] { ROW_B, ROW_C };
  HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);

  Put puta = new Put( ROW_A );
  puta.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(puta);

  Put putb = new Put( ROW_B );
  putb.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(putb);

  Put putc = new Put( ROW_C );
  putc.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(putc);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestServerCustomProtocol.java

示例6: setupTableWithRegionReplica

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
/**
 * Setup a clean table with a certain region_replica count
 *
 * It will set tbl which needs to be closed after test
 *
 * @param tableName
 * @param replicaCount
 * @throws Exception
 */
void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(tablename);
  desc.setRegionReplication(replicaCount);
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
  desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
  createTable(TEST_UTIL, desc, SPLITS);

  tbl = (HTable) connection.getTable(tablename, tableExecutorService);
  List<Put> puts = new ArrayList<Put>();
  for (byte[] row : ROWKEYS) {
    Put p = new Put(row);
    p.add(FAM, Bytes.toBytes("val"), row);
    puts.add(p);
  }
  tbl.put(puts);
  tbl.flushCommits();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestHBaseFsck.java

示例7: insertData

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void insertData(String tableName) {
    System.out.println("start insert data ......");
    HTablePool pool = new HTablePool(configuration, 1000);
    HTable table = (HTable) pool.getTable(tableName);
    Put put = new Put("112233bbbcccc".getBytes());// 一个PUT代表一行数据,再NEW一个PUT表示第二行数据,每行一个唯一的ROWKEY,此处rowkey为put构造方法中传入的值
    put.add("column1".getBytes(), null, "aaa".getBytes());// 本行数据的第一列
    put.add("column2".getBytes(), null, "bbb".getBytes());// 本行数据的第三列
    put.add("column3".getBytes(), null, "ccc".getBytes());// 本行数据的第三列
    try {
        table.put(put);
    } catch (IOException e) {
        e.printStackTrace();
    }
    System.out.println("end insert data ......");
}
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:16,代码来源:MyClass.java

示例8: setupBeforeClass

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
/**
 * A set up method to start the test cluster. AggregateProtocolImpl is registered and will be
 * loaded during region startup.
 * @throws Exception
 */
@BeforeClass
public static void setupBeforeClass() throws Exception {

  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    "org.apache.hadoop.hbase.coprocessor.AggregateImplementation");

  util.startMiniCluster(2);
  final byte[][] SPLIT_KEYS = new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] };
  HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
  /**
   * The testtable has one CQ which is always populated and one variable CQ for each row rowkey1:
   * CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2
   */
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    Double d = new Double(i);
    put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(d));
    table.put(put);
    Put p2 = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(d)), Bytes.toBytes(d * 0.10));
    table.put(p2);
  }
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:TestDoubleColumnInterpreter.java

示例9: generateHBaseDatasetCompositeKeyInt

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetCompositeKeyInt(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  int startVal = 0;
  int stopVal = 1000;
  int interval = 47;
  long counter = 0;
  for (int i = startVal; i < stopVal; i += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(12).putInt(i).array();

    for(int j = 0; j < 8; ++j) {
      rowKey[4 + j] = (byte)(counter >> (56 - (j * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:37,代码来源:TestTableGenerator.java

示例10: generateHBaseDatasetDoubleOB

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetDoubleOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例11: generateHBaseDatasetBigIntOBDesc

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDatasetBigIntOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

 HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));

if (numberRegions > 1) {
  admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
  admin.createTable(desc);
}

HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
  byte[] bytes = new byte[9];
  org.apache.hadoop.hbase.util.PositionedByteRange br =
          new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
  org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
          org.apache.hadoop.hbase.util.Order.DESCENDING);
  Put p = new Put(bytes);
  p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
  table.put(p);
}

table.flushCommits();
table.close();

admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:34,代码来源:TestTableGenerator.java

示例12: testPreWALRestoreSkip

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testPreWALRestoreSkip() throws Exception {
  LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip");
  TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
  HTable table = util.createTable(tableName, new byte[][] { A, B, C });

  JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
  ServerName sn2 = rs1.getRegionServer().getServerName();
  String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();

  util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
  while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) {
    Thread.sleep(100);
  }

  Put put = new Put(ROW);
  put.add(A, A, A);
  put.add(B, B, B);
  put.add(C, C, C);
  table.put(put);
  table.flushCommits();

  cluster.killRegionServer(rs1.getRegionServer().getServerName());
  Threads.sleep(20000); // just to be sure that the kill has fully started.
  util.waitUntilAllRegionsAssigned(tableName);

  verifyMethodResult(SimpleRegionObserver.class, new String[] { "getCtPreWALRestore",
      "getCtPostWALRestore", "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
      tableName,
      new Integer[] {0, 0, 0, 0});

  util.deleteTable(tableName);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:TestRegionObserverInterface.java

示例13: testOveralyOnOtherCluster

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
@Test
public void testOveralyOnOtherCluster() throws Exception {
  // just run HDFS
  HBaseTestingUtility util1 = new HBaseTestingUtility();
  MiniDFSCluster dfs = util1.startMiniDFSCluster(1);

  // run HBase on that HDFS
  HBaseTestingUtility util2 = new HBaseTestingUtility();
  // set the dfs
  util2.setDFSCluster(dfs, false);
  util2.startMiniCluster();

  //ensure that they are pointed at the same place
  FileSystem fs = dfs.getFileSystem();
  FileSystem targetFs = util2.getDFSCluster().getFileSystem();
  assertFsSameUri(fs, targetFs);

  fs = FileSystem.get(util1.getConfiguration());
  targetFs = FileSystem.get(util2.getConfiguration());
  assertFsSameUri(fs, targetFs);

  Path randomFile = new Path("/"+UUID.randomUUID());
  assertTrue(targetFs.createNewFile(randomFile));
  assertTrue(fs.exists(randomFile));

  // do a simple create/write to ensure the cluster works as expected
  byte[] family = Bytes.toBytes("testfamily");
  byte[] tablename = Bytes.toBytes("testtable");
  HTable table = util2.createTable(tablename, family);
  Put p = new Put(new byte[] { 1, 2, 3 });
  p.add(family, null, new byte[] { 1 });
  table.put(p);
  table.flushCommits();

  // shutdown and make sure cleanly shutting down
  util2.shutdownMiniCluster();
  util1.shutdownMiniDFSCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestHBaseOnOtherDfsCluster.java

示例14: createMultiRegionsWithWritableSerialization

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
/**
 * Inserts multiple regions into hbase:meta using Writable serialization instead of PB
 */
public int createMultiRegionsWithWritableSerialization(final Configuration c,
    final TableName tableName, byte [][] startKeys)
throws IOException {
  Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
  HTable meta = new HTable(c, TableName.META_TABLE_NAME);

  List<HRegionInfo> newRegions
      = new ArrayList<HRegionInfo>(startKeys.length);
  int count = 0;
  for (int i = 0; i < startKeys.length; i++) {
    int j = (i + 1) % startKeys.length;
    HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
    Put put = new Put(hri.getRegionName());
    put.setDurability(Durability.SKIP_WAL);
    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
      getBytes(hri)); //this is the old Writable serialization

    //also add the region as it's daughters
    put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
        getBytes(hri)); //this is the old Writable serialization

    put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
        getBytes(hri)); //this is the old Writable serialization

    meta.put(put);
    LOG.info("createMultiRegionsWithWritableSerialization: PUT inserted " + hri.toString());

    newRegions.add(hri);
    count++;
  }
  meta.close();
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestMetaMigrationConvertingToPB.java

示例15: generateHBaseDataset2

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void generateHBaseDataset2(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor("f"));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  int rowCount = 0;
  byte[] bytes = null;
  final int numColumns = 5;
  Random random = new Random();
  int iteration = 0;
  while (rowCount < 1000) {
    char rowKeyChar = 'a';
    for (int i = 0; i < numberRegions; i++) {
      Put p = new Put((""+rowKeyChar+iteration).getBytes());
      for (int j = 1; j <= numColumns; j++) {
        bytes = new byte[5000]; random.nextBytes(bytes);
        p.add("f".getBytes(), ("c"+j).getBytes(), bytes);
      }
      table.put(p);

      ++rowKeyChar;
      ++rowCount;
    }
    ++iteration;
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:44,代码来源:TestTableGenerator.java


注:本文中的org.apache.hadoop.hbase.client.HTable.put方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。