当前位置: 首页>>代码示例>>Java>>正文


Java Table.put方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Table.put方法的典型用法代码示例。如果您正苦于以下问题:Java Table.put方法的具体用法?Java Table.put怎么用?Java Table.put使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Table的用法示例。


在下文中一共展示了Table.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: loadData

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private static void loadData(final Table ht, final byte[][] families,
    final int rows, final int flushes) throws IOException {
  List<Put> puts = new ArrayList<Put>(rows);
  byte[] qualifier = Bytes.toBytes("val");
  for (int i = 0; i < flushes; i++) {
    for (int k = 0; k < rows; k++) {
      byte[] row = Bytes.toBytes(random.nextLong());
      Put p = new Put(row);
      for (int j = 0; j < families.length; ++j) {
        p.add(families[ j ], qualifier, row);
      }
      puts.add(p);
    }
    ht.put(puts);
    TEST_UTIL.flush();
    puts.clear();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestCompactionState.java

示例2: runCoprocessorConnectionToRemoteTable

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private void runCoprocessorConnectionToRemoteTable(Class<? extends BaseRegionObserver> clazz,
    boolean[] completeCheck) throws Throwable {
  HTableDescriptor primary = new HTableDescriptor(primaryTable);
  primary.addFamily(new HColumnDescriptor(family));
  // add our coprocessor
  primary.addCoprocessor(clazz.getName());

  HTableDescriptor other = new HTableDescriptor(otherTable);
  other.addFamily(new HColumnDescriptor(family));


  Admin admin = UTIL.getHBaseAdmin();
  admin.createTable(primary);
  admin.createTable(other);

  Table table = new HTable(UTIL.getConfiguration(), TableName.valueOf("primary"));
  Put p = new Put(new byte[] { 'a' });
  p.add(family, null, new byte[] { 'a' });
  table.put(p);
  table.close();

  Table target = new HTable(UTIL.getConfiguration(), otherTable);
  assertTrue("Didn't complete update to target table!", completeCheck[0]);
  assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
  target.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestOpenTableInCoprocessor.java

示例3: setupBeforeClass

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // set configure to indicate which cp should be loaded
  Configuration conf = util.getConfiguration();
  conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000);
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(),
      ProtobufCoprocessorService.class.getName());
  conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      ProtobufCoprocessorService.class.getName());
  util.startMiniCluster(2);

  Admin admin = util.getHBaseAdmin();
  HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
  desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
  admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]});
  util.waitUntilAllRegionsAssigned(TEST_TABLE);

  Table table = new HTable(conf, TEST_TABLE);
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
    table.put(put);
  }
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestCoprocessorEndpoint.java

示例4: testBulkDeleteEndpoint

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
public void testBulkDeleteEndpoint() throws Throwable {
  TableName tableName = TableName.valueOf("testBulkDeleteEndpoint");
  Table ht = createTable(tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    byte[] rowkey = Bytes.toBytes(j);
    puts.add(createPut(rowkey, "v1"));
  }
  ht.put(puts);
  // Deleting all the rows.
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 5, DeleteType.ROW, null);
  assertEquals(100, noOfRowsDeleted);

  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    rows++;
  }
  assertEquals(0, rows);
  ht.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestBulkDeleteProtocol.java

示例5: testTimeRangeMapRed

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
@Test
public void testTimeRangeMapRed()
throws IOException, InterruptedException, ClassNotFoundException {
  final HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
  final HColumnDescriptor col = new HColumnDescriptor(FAMILY_NAME);
  col.setMaxVersions(Integer.MAX_VALUE);
  desc.addFamily(col);
  admin.createTable(desc);
  List<Put> puts = new ArrayList<Put>();
  for (Map.Entry<Long, Boolean> entry : TIMESTAMP.entrySet()) {
    Put put = new Put(KEY);
    put.setDurability(Durability.SKIP_WAL);
    put.add(FAMILY_NAME, COLUMN_NAME, entry.getKey(), Bytes.toBytes(false));
    puts.add(put);
  }
  Table table = new HTable(UTIL.getConfiguration(), desc.getTableName());
  table.put(puts);
  runTestOnTable();
  verify(table);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestTimeRangeMapRed.java

示例6: createTableAndWriteDataWithLabels

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
    throws Exception {
  Table table = null;
  try {
    table = TEST_UTIL.createTable(tableName, fam);
    int i = 1;
    List<Put> puts = new ArrayList<Put>();
    for (String labelExp : labelExps) {
      Put put = new Put(Bytes.toBytes("row" + i));
      put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
      put.setCellVisibility(new CellVisibility(labelExp));
      puts.add(put);
      i++;
    }
    table.put(puts);
  } finally {
    if (table != null) {
      table.close();
    }
  }
  return table;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestVisibilityLabelsWithACL.java

示例7: doIndexPut

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private void doIndexPut(final List<IndexPut> puts) throws IOException {
  Table temptable = null;
  Table tempCCT = null;
  for (IndexPut put : puts) {
    for (Map.Entry<byte[], Put> entry : put.getPuts().entrySet()) {
      temptable = indexTableMaps.get(entry.getKey());
      temptable.put(entry.getValue());
      Put cctPut = IndexUtils.parseCCTPut(indexDesc, entry.getValue());
      if (cctPut != null) {
        System.out.println(
            "winter index table name: " + Bytes.toString(entry.getKey()) + ", values: " + entry
                .getValue() + ", cct value: " + cctPut);
        tempCCT = cctTableMaps.get(Bytes.add(entry.getKey(), IndexConstants.CCT_FIX));
        tempCCT.put(cctPut);
      }
      // something to do here
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:IndexTable.java

示例8: fillTable

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private void fillTable() throws IOException, InterruptedException {
  Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILIES, 3,
      Bytes.toBytes("row0"), Bytes.toBytes("row99"), NUM_RS);
  Random rand = new Random(19387129L);
  for (int iStoreFile = 0; iStoreFile < 4; ++iStoreFile) {
    for (int iRow = 0; iRow < 100; ++iRow) {
      final byte[] row = Bytes.toBytes("row" + iRow);
      Put put = new Put(row);
      Delete del = new Delete(row);
      for (int iCol = 0; iCol < 10; ++iCol) {
        final byte[] cf = rand.nextBoolean() ? CF1 : CF2;
        final long ts = Math.abs(rand.nextInt());
        final byte[] qual = Bytes.toBytes("col" + iCol);
        if (rand.nextBoolean()) {
          final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
              "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
              ts + "_random_" + rand.nextLong());
          put.add(cf, qual, ts, value);
        } else if (rand.nextDouble() < 0.8) {
          del.addColumn(cf, qual, ts);
        } else {
          del.addColumn(cf, qual, ts);
        }
      }
      table.put(put);
      table.delete(del);
    }
  }
  TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:TestWALFiltering.java

示例9: testConstraintFails

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
/**
 * Test that constraints will fail properly
 * @throws Exception
 */
@SuppressWarnings("unchecked")
@Test(timeout = 60000)
public void testConstraintFails() throws Exception {

  // create the table
  // it would be nice if this was also a method on the util
  HTableDescriptor desc = new HTableDescriptor(tableName);
  for (byte[] family : new byte[][] { dummy, test }) {
    desc.addFamily(new HColumnDescriptor(family));
  }

  // add a constraint that is sure to fail
  Constraints.add(desc, AllFailConstraint.class);

  util.getHBaseAdmin().createTable(desc);
  Table table = new HTable(util.getConfiguration(), tableName);

  // test that we do fail on violation
  Put put = new Put(row1);
  put.add(dummy, new byte[0], "fail".getBytes());
  LOG.warn("Doing put in table");
  try {
    table.put(put);
    fail("This put should not have suceeded - AllFailConstraint was not run!");
  } catch (RetriesExhaustedWithDetailsException e) {
    List<Throwable> causes = e.getCauses();
    assertEquals(
        "More than one failure cause - should only be the failure constraint exception",
        1, causes.size());
    Throwable t = causes.get(0);
    assertEquals(ConstraintException.class, t.getClass());
  }
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestConstraint.java

示例10: putAndWaitWithFamily

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private void putAndWaitWithFamily(byte[] row, byte[] fam,
    Table source, Table... targets)
  throws Exception {
  Put put = new Put(row);
  put.add(fam, row, val);
  source.put(put);

  Get get = new Get(row);
  get.addFamily(fam);
  for (int i = 0; i < NB_RETRIES; i++) {
    if (i==NB_RETRIES-1) {
      fail("Waited too much time for put replication");
    }
    boolean replicatedToAll = true;
    for (Table target : targets) {
      Result res = target.get(get);
      if (res.size() == 0) {
        LOG.info("Row not available");
        replicatedToAll = false;
        break;
      } else {
        assertEquals(res.size(), 1);
        assertArrayEquals(res.value(), val);
      }
    }
    if (replicatedToAll) {
      break;
    } else {
      Thread.sleep(SLEEP_TIME);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestPerTableCFReplication.java

示例11: putAndWait

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private void putAndWait(byte[] row, byte[] fam, Table source, Table target)
    throws Exception {
  Put put = new Put(row);
  put.add(fam, row, row);
  source.put(put);
  wait(row, target, false);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:TestMasterReplication.java

示例12: createTableAndWriteDataWithLabels

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
    throws Exception {
  List<Put> puts = new ArrayList<Put>();
  for (int i = 0; i < labelExps.length; i++) {
    Put put = new Put(Bytes.toBytes("row" + (i+1)));
    put.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.LATEST_TIMESTAMP, ZERO);
    put.setCellVisibility(new CellVisibility(labelExps[i]));
    puts.add(put);
  }
  Table table = TEST_UTIL.createTable(tableName, TEST_FAMILY);
  table.put(puts);
  return table;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestWithDisabledAuthorization.java

示例13: testMoveRegion

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
@Test
public void testMoveRegion() throws IOException, InterruptedException {
  String tableNameString = "testMoveRegion";
  TableName tableName = TableName.valueOf(tableNameString);
  Table t = TEST_UTIL.createTable(tableName, Bytes.toBytes("D"));
  TEST_UTIL.waitUntilAllRegionsAssigned(t.getName());
  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
  HRegionInfo regionInfo;
  byte[] row =  Bytes.toBytes("r1");


  for (int i = 0; i < 30; i++) {
    boolean moved = false;
    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
      regionInfo = locator.getRegionLocation(row, true).getRegionInfo();
    }

    int currentServerIdx = cluster.getServerWith(regionInfo.getRegionName());
    int destServerIdx = (currentServerIdx +1)% cluster.getLiveRegionServerThreads().size();
    HRegionServer currentServer = cluster.getRegionServer(currentServerIdx);
    HRegionServer destServer = cluster.getRegionServer(destServerIdx);
    byte[] destServerName = Bytes.toBytes(destServer.getServerName().getServerName());


    // Do a put. The counters should be non-zero now
    Put p = new Put(row);
    p.addColumn(Bytes.toBytes("D"), Bytes.toBytes("Zero"), Bytes.toBytes("VALUE"));
    t.put(p);


    MetricsRegionAggregateSource currentAgg = currentServer.getRegion(regionInfo.getRegionName())
        .getMetrics()
        .getSource()
        .getAggregateSource();

    String prefix = "namespace_"+ NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+
        "_table_"+tableNameString +
        "_region_" + regionInfo.getEncodedName()+
        "_metric";

    metricsHelper.assertCounter(prefix + "_mutateCount", 1, currentAgg);


    try {
      admin.move(regionInfo.getEncodedNameAsBytes(), destServerName);
      moved = true;
      Thread.sleep(5000);
    } catch (IOException ioe) {
      moved = false;
    }
    TEST_UTIL.waitUntilAllRegionsAssigned(t.getName());

    if (moved) {
      MetricsRegionAggregateSource destAgg = destServer.getRegion(regionInfo.getRegionName())
          .getMetrics()
          .getSource()
          .getAggregateSource();
      metricsHelper.assertCounter(prefix + "_mutateCount", 0, destAgg);
    }
  }

  TEST_UTIL.deleteTable(tableName);

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:65,代码来源:TestRemoveRegionMetrics.java

示例14: testRegionObserver

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testRegionObserver() throws IOException {
  TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRegionObserver");
  // recreate table every time in order to reset the status of the
  // coprocessor.
  Table table = util.createTable(tableName, new byte[][] {A, B, C});
  try {
    verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
        "hadPrePut", "hadPostPut", "hadDelete", "hadPostStartRegionOperation",
        "hadPostCloseRegionOperation", "hadPostBatchMutateIndispensably" }, tableName,
      new Boolean[] { false, false, false, false, false, false, false, false });

    Put put = new Put(ROW);
    put.add(A, A, A);
    put.add(B, B, B);
    put.add(C, C, C);
    table.put(put);

    verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
        "hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete",
        "hadPostStartRegionOperation", "hadPostCloseRegionOperation",
        "hadPostBatchMutateIndispensably" }, TEST_TABLE, new Boolean[] { false, false, true,
        true, true, true, false, true, true, true });

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
        tableName,
        new Integer[] {1, 1, 0, 0});

    Get get = new Get(ROW);
    get.addColumn(A, A);
    get.addColumn(B, B);
    get.addColumn(C, C);
    table.get(get);

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
    "hadDelete", "hadPrePreparedDeleteTS"},
    tableName,
    new Boolean[] {true, true, true, true, false, false}
        );

    Delete delete = new Delete(ROW);
    delete.deleteColumn(A, A);
    delete.deleteColumn(B, B);
    delete.deleteColumn(C, C);
    table.delete(delete);

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
      "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete", "hadPrePreparedDeleteTS"},
      tableName,
      new Boolean[] {true, true, true, true, true, true, true, true}
        );
  } finally {
    util.deleteTable(tableName);
    table.close();
  }
  verifyMethodResult(SimpleRegionObserver.class,
      new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
      tableName,
      new Integer[] {1, 1, 1, 1});
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:64,代码来源:TestRegionObserverInterface.java

示例15: testReplayEditsAfterRegionMovedWithMultiCF

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
/**
 *
 * @throws Exception
 */
@Test
public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception {
  final TableName tableName =
      TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF");
  byte[] family1 = Bytes.toBytes("cf1");
  byte[] family2 = Bytes.toBytes("cf2");
  byte[] qualifier = Bytes.toBytes("q");
  byte[] value = Bytes.toBytes("testV");
  byte[][] familys = { family1, family2 };
  TEST_UTIL.createTable(tableName, familys);
  Table htable = new HTable(TEST_UTIL.getConfiguration(), tableName);
  Put put = new Put(Bytes.toBytes("r1"));
  put.add(family1, qualifier, value);
  htable.put(put);
  ResultScanner resultScanner = htable.getScanner(new Scan());
  int count = 0;
  while (resultScanner.next() != null) {
    count++;
  }
  resultScanner.close();
  assertEquals(1, count);

  MiniHBaseCluster hbaseCluster = TEST_UTIL.getMiniHBaseCluster();
  List<HRegion> regions = hbaseCluster.getRegions(tableName);
  assertEquals(1, regions.size());

  // move region to another regionserver
  Region destRegion = regions.get(0);
  int originServerNum = hbaseCluster
      .getServerWith(destRegion.getRegionInfo().getRegionName());
  assertTrue("Please start more than 1 regionserver", hbaseCluster
      .getRegionServerThreads().size() > 1);
  int destServerNum = 0;
  while (destServerNum == originServerNum) {
    destServerNum++;
  }
  HRegionServer originServer = hbaseCluster.getRegionServer(originServerNum);
  HRegionServer destServer = hbaseCluster.getRegionServer(destServerNum);
  // move region to destination regionserver
  moveRegionAndWait(destRegion, destServer);

  // delete the row
  Delete del = new Delete(Bytes.toBytes("r1"));
  htable.delete(del);
  resultScanner = htable.getScanner(new Scan());
  count = 0;
  while (resultScanner.next() != null) {
    count++;
  }
  resultScanner.close();
  assertEquals(0, count);

  // flush region and make major compaction
  Region region =  destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName());
  region.flush(true);
  // wait to complete major compaction
  for (Store store : region.getStores()) {
    store.triggerMajorCompaction();
  }
  region.compact(true);

  // move region to origin regionserver
  moveRegionAndWait(destRegion, originServer);
  // abort the origin regionserver
  originServer.abort("testing");

  // see what we get
  Result result = htable.get(new Get(Bytes.toBytes("r1")));
  if (result != null) {
    assertTrue("Row is deleted, but we get" + result.toString(),
        (result == null) || result.isEmpty());
  }
  resultScanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:79,代码来源:TestWALReplay.java


注:本文中的org.apache.hadoop.hbase.client.Table.put方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。