当前位置: 首页>>代码示例>>Java>>正文


Java Table.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Table.close方法的典型用法代码示例。如果您正苦于以下问题:Java Table.close方法的具体用法?Java Table.close怎么用?Java Table.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Table的用法示例。


在下文中一共展示了Table.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createRegion

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
protected HRegionInfo createRegion(Configuration conf, final Table htbl,
    byte[] startKey, byte[] endKey) throws IOException {
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  HTableDescriptor htd = htbl.getTableDescriptor();
  HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

  LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);
  Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
      hri.getEncodedName());
  fs.mkdirs(p);
  Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
  FSDataOutputStream out = fs.create(riPath);
  out.write(hri.toDelimitedByteArray());
  out.close();

  // add to meta.
  MetaTableAccessor.addRegionToMeta(meta, hri);
  meta.close();
  return hri;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:OfflineMetaRebuildTestCore.java

示例2: testIncrementHook

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testIncrementHook() throws IOException {
  TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook");
  Table table = util.createTable(tableName, new byte[][] {A, B, C});
  try {
    Increment inc = new Increment(Bytes.toBytes(0));
    inc.addColumn(A, A, 1);

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
        tableName,
        new Boolean[] {false, false, false}
        );

    table.increment(inc);

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
        tableName,
        new Boolean[] {true, true, true}
        );
  } finally {
    util.deleteTable(tableName);
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestRegionObserverInterface.java

示例3: testTimeRangeMapRed

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
@Test
public void testTimeRangeMapRed()
throws IOException, InterruptedException, ClassNotFoundException {
  final HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
  final HColumnDescriptor col = new HColumnDescriptor(FAMILY_NAME);
  col.setMaxVersions(Integer.MAX_VALUE);
  desc.addFamily(col);
  admin.createTable(desc);
  List<Put> puts = new ArrayList<Put>();
  for (Map.Entry<Long, Boolean> entry : TIMESTAMP.entrySet()) {
    Put put = new Put(KEY);
    put.setDurability(Durability.SKIP_WAL);
    put.add(FAMILY_NAME, COLUMN_NAME, entry.getKey(), Bytes.toBytes(false));
    puts.add(put);
  }
  Table table = new HTable(UTIL.getConfiguration(), desc.getTableName());
  table.put(puts);
  runTestOnTable();
  verify(table);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestTimeRangeMapRed.java

示例4: testConstraintPasses

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
/**
 * Test that we run a passing constraint
 * @throws Exception
 */
@SuppressWarnings("unchecked")
@Test
public void testConstraintPasses() throws Exception {
  // create the table
  // it would be nice if this was also a method on the util
  HTableDescriptor desc = new HTableDescriptor(tableName);
  for (byte[] family : new byte[][] { dummy, test }) {
    desc.addFamily(new HColumnDescriptor(family));
  }
  // add a constraint
  Constraints.add(desc, CheckWasRunConstraint.class);

  util.getHBaseAdmin().createTable(desc);
  Table table = new HTable(util.getConfiguration(), tableName);
  try {
    // test that we don't fail on a valid put
    Put put = new Put(row1);
    byte[] value = Integer.toString(10).getBytes();
    put.add(dummy, new byte[0], value);
    table.put(put);
  } finally {
    table.close();
  }
  assertTrue(CheckWasRunConstraint.wasRun);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestConstraint.java

示例5: testTableWithCFNameStartWithUnderScore

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
@Test(timeout = 120000)
public void testTableWithCFNameStartWithUnderScore() throws Exception {
  Path dir = util.getDataTestDirOnTestFS("cfNameStartWithUnderScore");
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  String family = "_cf";
  Path familyDir = new Path(dir, family);

  byte[] from = Bytes.toBytes("begin");
  byte[] to = Bytes.toBytes("end");
  Configuration conf = util.getConfiguration();
  String tableName = "mytable_cfNameStartWithUnderScore";
  Table table = util.createTable(TableName.valueOf(tableName), family);
  HFileTestUtil.createHFile(conf, fs, new Path(familyDir, "hfile"), Bytes.toBytes(family),
    QUALIFIER, from, to, 1000);

  LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf);
  String[] args = { dir.toString(), tableName };
  try {
    loader.run(args);
    assertEquals(1000, util.countRows(table));
  } finally {
    if (null != table) {
      table.close();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestLoadIncrementalHFiles.java

示例6: countAllTables

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private void countAllTables(final HBaseConnection tableConfiguration) throws IOException {
    HBaseTable hBaseTable;
    for (final EventStoreTimeIntervalEnum interval : EventStoreTimeIntervalEnum.values()) {
        hBaseTable = (HBaseTable) eventStoreTableFactory.getEventStoreTable(interval);
        final Table tableInterface = hBaseTable.getTable();
        LOGGER.info("Row count for " + hBaseTable.getName() + " (" + countRows(tableInterface) + ")");
        tableInterface.close();
    }
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:10,代码来源:StatisticsTestService.java

示例7: prepareData

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private static void prepareData() {
  try {
    Table table = new HTable(TestFilterWrapper.conf, name);
    assertTrue("Fail to create the table", admin.tableExists(name));
    List<Put> puts = new ArrayList<Put>();

    // row1 => <f1:c1, 1_c1, ts=1>, <f1:c2, 1_c2, ts=2>, <f1:c3, 1_c3,ts=3>,
    // <f1:c4,1_c4, ts=4>, <f1:c5, 1_c5, ts=5>
    // row2 => <f1:c1, 2_c1, ts=2>, <f1,c2, 2_c2, ts=2>, <f1:c3, 2_c3,ts=2>,
    // <f1:c4,2_c4, ts=2>, <f1:c5, 2_c5, ts=2>
    // row3 => <f1:c1, 3_c1, ts=3>, <f1:c2, 3_c2, ts=3>, <f1:c3, 3_c3,ts=2>,
    // <f1:c4,3_c4, ts=3>, <f1:c5, 3_c5, ts=3>
    for (int i = 1; i < 4; i++) {
      Put put = new Put(Bytes.toBytes("row" + i));
      for (int j = 1; j < 6; j++) {
        long timestamp = j;
        if (i != 1)
          timestamp = i;
        put.add(Bytes.toBytes("f1"), Bytes.toBytes("c" + j), timestamp,
            Bytes.toBytes(i + "_c" + j));
      }
      puts.add(put);
    }

    table.put(puts);
    table.close();
  } catch (IOException e) {
    assertNull("Exception found while putting data into table", e);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:TestFilterWrapper.java

示例8: invokeBulkDeleteProtocol

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private long invokeBulkDeleteProtocol(TableName tableName, final Scan scan, final int rowBatchSize,
    final DeleteType deleteType, final Long timeStamp) throws Throwable {
  Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
  long noOfDeletedRows = 0L;
  Batch.Call<BulkDeleteService, BulkDeleteResponse> callable =
    new Batch.Call<BulkDeleteService, BulkDeleteResponse>() {
    ServerRpcController controller = new ServerRpcController();
    BlockingRpcCallback<BulkDeleteResponse> rpcCallback =
      new BlockingRpcCallback<BulkDeleteResponse>();

    public BulkDeleteResponse call(BulkDeleteService service) throws IOException {
      Builder builder = BulkDeleteRequest.newBuilder();
      builder.setScan(ProtobufUtil.toScan(scan));
      builder.setDeleteType(deleteType);
      builder.setRowBatchSize(rowBatchSize);
      if (timeStamp != null) {
        builder.setTimestamp(timeStamp);
      }
      service.delete(controller, builder.build(), rpcCallback);
      return rpcCallback.get();
    }
  };
  Map<byte[], BulkDeleteResponse> result = ht.coprocessorService(BulkDeleteService.class, scan
      .getStartRow(), scan.getStopRow(), callable);
  for (BulkDeleteResponse response : result.values()) {
    noOfDeletedRows += response.getRowsDeleted();
  }
  ht.close();
  return noOfDeletedRows;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:TestBulkDeleteProtocol.java

示例9: doBulkLoad

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
/**
 * Perform a bulk load of the given directory into the given
 * pre-existing table.  This method is not threadsafe.
 *
 * @param hfofDir the directory that was provided as the output path
 * of a job using HFileOutputFormat
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table)
  throws TableNotFoundException, IOException
{
  Admin admin = null;
  Table t = table;
  Connection conn = table.getConnection();
  boolean closeConnWhenFinished = false;
  try {
    if (conn instanceof ClusterConnection && ((ClusterConnection) conn).isManaged()) {
      LOG.warn("managed connection cannot be used for bulkload. Creating unmanaged connection.");
      // can only use unmanaged connections from here on out.
      conn = ConnectionFactory.createConnection(table.getConfiguration());
      t = conn.getTable(table.getName());
      closeConnWhenFinished = true;
      if (conn instanceof ClusterConnection && ((ClusterConnection) conn).isManaged()) {
        throw new RuntimeException("Failed to create unmanaged connection.");
      }
      admin = conn.getAdmin();
    } else {
      admin = conn.getAdmin();
    }
    try (RegionLocator rl = conn.getRegionLocator(t.getName())) {
      doBulkLoad(hfofDir, admin, t, rl);
    }
  } finally {
    if (admin != null) admin.close();
    if (closeConnWhenFinished) {
      t.close();
      conn.close();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:43,代码来源:LoadIncrementalHFiles.java

示例10: addToEachStartKey

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
private static int addToEachStartKey(final int expected) throws IOException {
  Table t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  Table meta = new HTable(TEST_UTIL.getConfiguration(),
      TableName.META_TABLE_NAME);
  int rows = 0;
  Scan scan = new Scan();
  scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
  ResultScanner s = meta.getScanner(scan);
  for (Result r = null; (r = s.next()) != null;) {
    HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
    if (hri == null) break;
    if (!hri.getTable().equals(TABLENAME)) {
      continue;
    }

    // If start key, add 'aaa'.
    if(!hri.getTable().equals(TABLENAME)) {
      continue;
    }
    byte [] row = getStartKey(hri);
    Put p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.add(getTestFamily(), getTestQualifier(), row);
    t.put(p);
    rows++;
  }
  s.close();
  Assert.assertEquals(expected, rows);
  t.close();
  meta.close();
  return rows;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestMasterTransitions.java

示例11: countRows

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
/**
 * Return the number of rows in the given table.
 */
public int countRows(final TableName tableName) throws IOException {
  Table table = getConnection().getTable(tableName);
  try {
    return countRows(table);
  } finally {
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:HBaseTestingUtility.java

示例12: RowResultGenerator

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
public RowResultGenerator(final String tableName, final RowSpec rowspec,
    final Filter filter, final boolean cacheBlocks)
    throws IllegalArgumentException, IOException {
  Table table = RESTServlet.getInstance().getTable(tableName);
  try {
    Get get = new Get(rowspec.getRow());
    if (rowspec.hasColumns()) {
      for (byte[] col: rowspec.getColumns()) {
        byte[][] split = KeyValue.parseColumn(col);
        if (split.length == 1) {
          get.addFamily(split[0]);
        } else if (split.length == 2) {
          get.addColumn(split[0], split[1]);
        } else {
          throw new IllegalArgumentException("Invalid column specifier.");
        }
      }
    }
    get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
    get.setMaxVersions(rowspec.getMaxVersions());
    if (filter != null) {
      get.setFilter(filter);
    }
    get.setCacheBlocks(cacheBlocks);
    Result result = table.get(get);
    if (result != null && !result.isEmpty()) {
      valuesI = result.listCells().iterator();
    }
  } catch (DoNotRetryIOException | NeedUnmanagedConnectionException e) {
    // Warn here because Stargate will return 404 in the case if multiple
    // column families were specified but one did not exist -- currently
    // HBase will fail the whole Get.
    // Specifying multiple columns in a URI should be uncommon usage but
    // help to avoid confusion by leaving a record of what happened here in
    // the log.
    LOG.warn(StringUtils.stringifyException(e));
  } finally {
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:RowResultGenerator.java

示例13: setUpBeforeClass

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.startMiniCluster(3);
  REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
  client = new Client(new Cluster().add("localhost",
    REST_TEST_UTIL.getServletPort()));
  context = JAXBContext.newInstance(
      TableModel.class,
      TableInfoModel.class,
      TableListModel.class,
      TableRegionModel.class);
  Admin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(TABLE)) {
    return;
  }
  HTableDescriptor htd = new HTableDescriptor(TABLE);
  htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
  admin.createTable(htd);
  byte[] k = new byte[3];
  byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN));
  List<Put> puts = new ArrayList<>();
  for (byte b1 = 'a'; b1 < 'z'; b1++) {
    for (byte b2 = 'a'; b2 < 'z'; b2++) {
      for (byte b3 = 'a'; b3 < 'z'; b3++) {
        k[0] = b1;
        k[1] = b2;
        k[2] = b3;
        Put put = new Put(k);
        put.setDurability(Durability.SKIP_WAL);
        put.add(famAndQf[0], famAndQf[1], k);
        puts.add(put);
      }
    }
  }
  Connection connection = TEST_UTIL.getConnection();
  
  Table table =  connection.getTable(TABLE);
  table.put(puts);
  table.close();
  // get the initial layout (should just be one region)
  
  RegionLocator regionLocator = connection.getRegionLocator(TABLE);
  List<HRegionLocation> m = regionLocator.getAllRegionLocations();
  assertEquals(m.size(), 1);
  // tell the master to split the table
  admin.split(TABLE);
  // give some time for the split to happen

  long timeout = System.currentTimeMillis() + (15 * 1000);
  while (System.currentTimeMillis() < timeout && m.size()!=2){
    try {
      Thread.sleep(250);
    } catch (InterruptedException e) {
      LOG.warn(StringUtils.stringifyException(e));
    }
    // check again
    m = regionLocator.getAllRegionLocations();
  }

  // should have two regions now
  assertEquals(m.size(), 2);
  regionMap = m;
  LOG.info("regions: " + regionMap);
  regionLocator.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:66,代码来源:TestTableResource.java

示例14: testJoinedScanners

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
@Test
public void testJoinedScanners() throws Exception {
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  HBaseTestingUtility htu = new HBaseTestingUtility();

  final int DEFAULT_BLOCK_SIZE = 1024*1024;
  htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 1);
  htu.getConfiguration().setLong("hbase.hregion.max.filesize", 322122547200L);
  MiniHBaseCluster cluster = null;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte [][] families = {cf_essential, cf_joined};

    TableName tableName = TableName.valueOf(this.getClass().getSimpleName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    for(byte[] family : families) {
      HColumnDescriptor hcd = new HColumnDescriptor(family);
      hcd.setDataBlockEncoding(blockEncoding);
      desc.addFamily(hcd);
    }
    htu.getHBaseAdmin().createTable(desc);
    Table ht = new HTable(htu.getConfiguration(), tableName);

    long rows_to_insert = 1000;
    int insert_batch = 20;
    long time = System.nanoTime();
    Random rand = new Random(time);

    LOG.info("Make " + Long.toString(rows_to_insert) + " rows, total size = "
      + Float.toString(rows_to_insert * valueWidth / 1024 / 1024) + " MB");

    byte [] val_large = new byte[valueWidth];

    List<Put> puts = new ArrayList<Put>();

    for (long i = 0; i < rows_to_insert; i++) {
      Put put = new Put(Bytes.toBytes(Long.toString (i)));
      if (rand.nextInt(100) <= selectionRatio) {
        put.add(cf_essential, col_name, flag_yes);
      } else {
        put.add(cf_essential, col_name, flag_no);
      }
      put.add(cf_joined, col_name, val_large);
      puts.add(put);
      if (puts.size() >= insert_batch) {
        ht.put(puts);
        puts.clear();
      }
    }
    if (puts.size() >= 0) {
      ht.put(puts);
      puts.clear();
    }

    LOG.info("Data generated in "
      + Double.toString((System.nanoTime() - time) / 1000000000.0) + " seconds");

    boolean slow = true;
    for (int i = 0; i < 10; ++i) {
      runScanner(ht, slow);
      slow = !slow;
    }

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:75,代码来源:TestJoinedScanners.java

示例15: main

import org.apache.hadoop.hbase.client.Table; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  int numRegions = Integer.parseInt(args[0]);
  long numRows = Long.parseLong(args[1]);

  HTableDescriptor htd = new HTableDescriptor(TABLENAME);
  htd.setMaxFileSize(10L * 1024 * 1024 * 1024);
  htd.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
  htd.addFamily(new HColumnDescriptor(FAMILY1));
  htd.addFamily(new HColumnDescriptor(FAMILY2));
  htd.addFamily(new HColumnDescriptor(FAMILY3));

  Configuration conf = HBaseConfiguration.create();
  Connection conn = ConnectionFactory.createConnection(conf);
  Admin admin = conn.getAdmin();
  if (admin.tableExists(TABLENAME)) {
    admin.disableTable(TABLENAME);
    admin.deleteTable(TABLENAME);
  }
  if (numRegions >= 3) {
    byte[] startKey = new byte[16];
    byte[] endKey = new byte[16];
    Arrays.fill(endKey, (byte) 0xFF);
    admin.createTable(htd, startKey, endKey, numRegions);
  } else {
    admin.createTable(htd);
  }
  admin.close();

  Table table = conn.getTable(TABLENAME);
  byte[] qf = Bytes.toBytes("qf");
  Random rand = new Random();
  byte[] value1 = new byte[16];
  byte[] value2 = new byte[256];
  byte[] value3 = new byte[4096];
  for (long i = 0; i < numRows; i++) {
    Put put = new Put(Hashing.md5().hashLong(i).asBytes());
    rand.setSeed(i);
    rand.nextBytes(value1);
    rand.nextBytes(value2);
    rand.nextBytes(value3);
    put.addColumn(FAMILY1, qf, value1);
    put.addColumn(FAMILY2, qf, value2);
    put.addColumn(FAMILY3, qf, value3);
    table.put(put);
    if (i % 10000 == 0) {
      LOG.info(i + " rows put");
    }
  }
  table.close();
  conn.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:52,代码来源:TestPerColumnFamilyFlush.java


注:本文中的org.apache.hadoop.hbase.client.Table.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。