當前位置: 首頁>>代碼示例>>Java>>正文


Java HTable.put方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.HTable.put方法的典型用法代碼示例。如果您正苦於以下問題:Java HTable.put方法的具體用法?Java HTable.put怎麽用?Java HTable.put使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.HTable的用法示例。


在下文中一共展示了HTable.put方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: doPuts

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
private int doPuts(int maxOps, final HTable... tables) throws Exception {
  int count = 0;
  try {
    while (count < maxOps) {
      Put put = new Put(Bytes.toBytes("row-" + count));
      put.addColumn(FAMILY, QUALIFIER, Bytes.toBytes("data-" + count));
      for (final HTable table : tables) {
        table.put(put);
      }
      count += tables.length;
    }
  } catch (RetriesExhaustedWithDetailsException e) {
    for (Throwable t : e.getCauses()) {
      if (!(t instanceof ThrottlingException)) {
        throw e;
      }
    }
    LOG.error("put failed after nRetries=" + count, e);
  }
  return count;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:TestQuotaThrottle.java

示例2: putLob

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
/**
 * 上傳對象到LOB
 * @param tableName Hyperbase表名
 * @param row rowkey byte形式
 * @param filename 文件名
 * @param fileData 文件
 */
public void putLob(String tableName, String row, String filename, byte[] fileData){
    byte[] rowkey = Bytes.toBytes(row);
    try {
        HTable htable = new HTable(conf, tableName);
        Put put = new Put(rowkey);
        put.add(Bytes.toBytes(family1), Bytes.toBytes(f1_q1), Bytes.toBytes(filename));
        put.add(Bytes.toBytes(family2), Bytes.toBytes(f2_q1), fileData);
        htable.put(put);
        htable.flushCommits();
        htable.close();
    } catch (IOException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
}
 
開發者ID:Transwarp-DE,項目名稱:Transwarp-Sample-Code,代碼行數:23,代碼來源:LobUtil.java

示例3: testHBASE14489

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Test(timeout = 300000)
public void testHBASE14489() throws IOException {
  TableName tableName = TableName.valueOf("testHBASE14489");
  HTable table = util.createTable(tableName, new byte[][] { A });
  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  table.put(put);

  Scan s = new Scan();
  s.setFilter(new FilterAllFilter());
  ResultScanner scanner = table.getScanner(s);
  try {
    for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
    }
  } finally {
    scanner.close();
  }
  verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerFilterRowCalled" },
    tableName, new Boolean[] { true });
  util.deleteTable(tableName);
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestRegionObserverInterface.java

示例4: prepareData

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
private Store prepareData() throws IOException {
  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  HTable table = TEST_UTIL.createTable(tableName, family);
  Random rand = new Random();
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[128 * 1024];
      rand.nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).add(family, qualifier, value));
    }
    admin.flush(tableName);
  }
  return getStoreWithName(tableName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestCompactionWithThroughputController.java

示例5: before

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Before
public void before()  throws Exception {
  final byte[][] SPLIT_KEYS = new byte[][] { ROW_B, ROW_C };
  HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);

  Put puta = new Put( ROW_A );
  puta.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(puta);

  Put putb = new Put( ROW_B );
  putb.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(putb);

  Put putc = new Put( ROW_C );
  putc.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(putc);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:TestServerCustomProtocol.java

示例6: setupTableWithRegionReplica

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
/**
 * Setup a clean table with a certain region_replica count
 *
 * It will set tbl which needs to be closed after test
 *
 * @param tableName
 * @param replicaCount
 * @throws Exception
 */
void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(tablename);
  desc.setRegionReplication(replicaCount);
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
  desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
  createTable(TEST_UTIL, desc, SPLITS);

  tbl = (HTable) connection.getTable(tablename, tableExecutorService);
  List<Put> puts = new ArrayList<Put>();
  for (byte[] row : ROWKEYS) {
    Put p = new Put(row);
    p.add(FAM, Bytes.toBytes("val"), row);
    puts.add(p);
  }
  tbl.put(puts);
  tbl.flushCommits();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TestHBaseFsck.java

示例7: insertData

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void insertData(String tableName) {
    System.out.println("start insert data ......");
    HTablePool pool = new HTablePool(configuration, 1000);
    HTable table = (HTable) pool.getTable(tableName);
    Put put = new Put("112233bbbcccc".getBytes());// 一個PUT代表一行數據,再NEW一個PUT表示第二行數據,每行一個唯一的ROWKEY,此處rowkey為put構造方法中傳入的值
    put.add("column1".getBytes(), null, "aaa".getBytes());// 本行數據的第一列
    put.add("column2".getBytes(), null, "bbb".getBytes());// 本行數據的第三列
    put.add("column3".getBytes(), null, "ccc".getBytes());// 本行數據的第三列
    try {
        table.put(put);
    } catch (IOException e) {
        e.printStackTrace();
    }
    System.out.println("end insert data ......");
}
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:16,代碼來源:MyClass.java

示例8: setupBeforeClass

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
/**
 * A set up method to start the test cluster. AggregateProtocolImpl is registered and will be
 * loaded during region startup.
 * @throws Exception
 */
@BeforeClass
public static void setupBeforeClass() throws Exception {

  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    "org.apache.hadoop.hbase.coprocessor.AggregateImplementation");

  util.startMiniCluster(2);
  final byte[][] SPLIT_KEYS = new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] };
  HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
  /**
   * The testtable has one CQ which is always populated and one variable CQ for each row rowkey1:
   * CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2
   */
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    Double d = new Double(i);
    put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(d));
    table.put(put);
    Put p2 = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(d)), Bytes.toBytes(d * 0.10));
    table.put(p2);
  }
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:32,代碼來源:TestDoubleColumnInterpreter.java

示例9: generateHBaseDatasetCompositeKeyInt

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void generateHBaseDatasetCompositeKeyInt(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  int startVal = 0;
  int stopVal = 1000;
  int interval = 47;
  long counter = 0;
  for (int i = startVal; i < stopVal; i += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(12).putInt(i).array();

    for(int j = 0; j < 8; ++j) {
      rowKey[4 + j] = (byte)(counter >> (56 - (j * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:37,代碼來源:TestTableGenerator.java

示例10: generateHBaseDatasetDoubleOB

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void generateHBaseDatasetDoubleOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:34,代碼來源:TestTableGenerator.java

示例11: generateHBaseDatasetBigIntOBDesc

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void generateHBaseDatasetBigIntOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

 HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));

if (numberRegions > 1) {
  admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
  admin.createTable(desc);
}

HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
  byte[] bytes = new byte[9];
  org.apache.hadoop.hbase.util.PositionedByteRange br =
          new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
  org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
          org.apache.hadoop.hbase.util.Order.DESCENDING);
  Put p = new Put(bytes);
  p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
  table.put(p);
}

table.flushCommits();
table.close();

admin.flush(tableName);
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:34,代碼來源:TestTableGenerator.java

示例12: testPreWALRestoreSkip

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Test (timeout=300000)
public void testPreWALRestoreSkip() throws Exception {
  LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip");
  TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
  HTable table = util.createTable(tableName, new byte[][] { A, B, C });

  JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
  ServerName sn2 = rs1.getRegionServer().getServerName();
  String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();

  util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
  while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) {
    Thread.sleep(100);
  }

  Put put = new Put(ROW);
  put.add(A, A, A);
  put.add(B, B, B);
  put.add(C, C, C);
  table.put(put);
  table.flushCommits();

  cluster.killRegionServer(rs1.getRegionServer().getServerName());
  Threads.sleep(20000); // just to be sure that the kill has fully started.
  util.waitUntilAllRegionsAssigned(tableName);

  verifyMethodResult(SimpleRegionObserver.class, new String[] { "getCtPreWALRestore",
      "getCtPostWALRestore", "getCtPreWALRestoreDeprecated", "getCtPostWALRestoreDeprecated"},
      tableName,
      new Integer[] {0, 0, 0, 0});

  util.deleteTable(tableName);
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:35,代碼來源:TestRegionObserverInterface.java

示例13: testOveralyOnOtherCluster

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Test
public void testOveralyOnOtherCluster() throws Exception {
  // just run HDFS
  HBaseTestingUtility util1 = new HBaseTestingUtility();
  MiniDFSCluster dfs = util1.startMiniDFSCluster(1);

  // run HBase on that HDFS
  HBaseTestingUtility util2 = new HBaseTestingUtility();
  // set the dfs
  util2.setDFSCluster(dfs, false);
  util2.startMiniCluster();

  //ensure that they are pointed at the same place
  FileSystem fs = dfs.getFileSystem();
  FileSystem targetFs = util2.getDFSCluster().getFileSystem();
  assertFsSameUri(fs, targetFs);

  fs = FileSystem.get(util1.getConfiguration());
  targetFs = FileSystem.get(util2.getConfiguration());
  assertFsSameUri(fs, targetFs);

  Path randomFile = new Path("/"+UUID.randomUUID());
  assertTrue(targetFs.createNewFile(randomFile));
  assertTrue(fs.exists(randomFile));

  // do a simple create/write to ensure the cluster works as expected
  byte[] family = Bytes.toBytes("testfamily");
  byte[] tablename = Bytes.toBytes("testtable");
  HTable table = util2.createTable(tablename, family);
  Put p = new Put(new byte[] { 1, 2, 3 });
  p.add(family, null, new byte[] { 1 });
  table.put(p);
  table.flushCommits();

  // shutdown and make sure cleanly shutting down
  util2.shutdownMiniCluster();
  util1.shutdownMiniDFSCluster();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TestHBaseOnOtherDfsCluster.java

示例14: createMultiRegionsWithWritableSerialization

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
/**
 * Inserts multiple regions into hbase:meta using Writable serialization instead of PB
 */
public int createMultiRegionsWithWritableSerialization(final Configuration c,
    final TableName tableName, byte [][] startKeys)
throws IOException {
  Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
  HTable meta = new HTable(c, TableName.META_TABLE_NAME);

  List<HRegionInfo> newRegions
      = new ArrayList<HRegionInfo>(startKeys.length);
  int count = 0;
  for (int i = 0; i < startKeys.length; i++) {
    int j = (i + 1) % startKeys.length;
    HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
    Put put = new Put(hri.getRegionName());
    put.setDurability(Durability.SKIP_WAL);
    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
      getBytes(hri)); //this is the old Writable serialization

    //also add the region as it's daughters
    put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
        getBytes(hri)); //this is the old Writable serialization

    put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
        getBytes(hri)); //this is the old Writable serialization

    meta.put(put);
    LOG.info("createMultiRegionsWithWritableSerialization: PUT inserted " + hri.toString());

    newRegions.add(hri);
    count++;
  }
  meta.close();
  return count;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:TestMetaMigrationConvertingToPB.java

示例15: generateHBaseDataset2

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void generateHBaseDataset2(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor("f"));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  int rowCount = 0;
  byte[] bytes = null;
  final int numColumns = 5;
  Random random = new Random();
  int iteration = 0;
  while (rowCount < 1000) {
    char rowKeyChar = 'a';
    for (int i = 0; i < numberRegions; i++) {
      Put p = new Put((""+rowKeyChar+iteration).getBytes());
      for (int j = 1; j <= numColumns; j++) {
        bytes = new byte[5000]; random.nextBytes(bytes);
        p.add("f".getBytes(), ("c"+j).getBytes(), bytes);
      }
      table.put(p);

      ++rowKeyChar;
      ++rowCount;
    }
    ++iteration;
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:44,代碼來源:TestTableGenerator.java


注:本文中的org.apache.hadoop.hbase.client.HTable.put方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。