当前位置: 首页>>代码示例>>Java>>正文


Java HTablePool.getTable方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.HTablePool.getTable方法的典型用法代码示例。如果您正苦于以下问题:Java HTablePool.getTable方法的具体用法?Java HTablePool.getTable怎么用?Java HTablePool.getTable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.HTablePool的用法示例。


在下文中一共展示了HTablePool.getTable方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: QueryByCondition1

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
public static void QueryByCondition1(String tableName) {

        HTablePool pool = new HTablePool(configuration, 1000);
        HTable table = (HTable) pool.getTable(tableName);
        try {
            Get scan = new Get("abcdef".getBytes());// 根据rowkey查询
            Result r = table.get(scan);
            System.out.println("获得到rowkey:" + new String(r.getRow()));
            for (KeyValue keyValue : r.raw()) {
                System.out.println("列:" + new String(keyValue.getFamily())
                        + "====值:" + new String(keyValue.getValue()));
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:17,代码来源:MyClass.java

示例2: QueryByCondition2

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 当列column1的值为aaa时进行查询
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("获得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:24,代码来源:MyClass.java

示例3: main

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException {
	Configuration conf = HBaseConfiguration.create();
	conf.set("hbase.zookeeper.quorum", "master.hadoop,slave1.hadoop,slave2.hadoop");
	HTablePool pool = new HTablePool(conf, 1, new TableFactory2());
	HTableInterface table = pool.getTable(Bytes.toBytes("test3"));
	
	Get get1 = new Get(Bytes.toBytes("1"));
	table.get(get1);
	System.out.println(table);
	
	table.close();
	
	HTableInterface table2 = pool.getTable(Bytes.toBytes("test3"));
	table.get(get1);
	System.out.println(table2);
	table2.close();
}
 
开发者ID:Justice-love,项目名称:oceandata,代码行数:18,代码来源:PoolTest0921.java

示例4: BaseEntityBatch

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
/**
 * Checks an HTable out of the HTablePool and modifies it to take advantage of
 * batch puts. This is very useful when performing many consecutive puts.
 *
 * @param clientTemplate
 *          The client template to use
 * @param entityMapper
 *          The EntityMapper to use for mapping
 * @param pool
 *          The HBase table pool
 * @param tableName
 *          The name of the HBase table
 * @param writeBufferSize
 *          The batch buffer size in bytes.
 */
public BaseEntityBatch(HBaseClientTemplate clientTemplate,
    EntityMapper<E> entityMapper, HTablePool pool, String tableName,
    long writeBufferSize) {
  this.table = pool.getTable(tableName);
  this.table.setAutoFlush(false);
  this.clientTemplate = clientTemplate;
  this.entityMapper = entityMapper;
  this.state = ReaderWriterState.NEW;

  /**
   * If the writeBufferSize is less than the currentBufferSize, then the
   * buffer will get flushed automatically by HBase. This should never happen,
   * since we're getting a fresh table out of the pool, and the writeBuffer
   * should be empty.
   */
  try {
    table.setWriteBufferSize(writeBufferSize);
  } catch (IOException e) {
    throw new DatasetIOException("Error flushing commits for table ["
        + table + "]", e);
  }
}
 
开发者ID:cloudera,项目名称:cdk,代码行数:38,代码来源:BaseEntityBatch.java

示例5: insertData

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
public static void insertData(String tableName) {
    System.out.println("start insert data ......");
    HTablePool pool = new HTablePool(configuration, 1000);
    HTable table = (HTable) pool.getTable(tableName);
    Put put = new Put("112233bbbcccc".getBytes());// 一个PUT代表一行数据,再NEW一个PUT表示第二行数据,每行一个唯一的ROWKEY,此处rowkey为put构造方法中传入的值
    put.add("column1".getBytes(), null, "aaa".getBytes());// 本行数据的第一列
    put.add("column2".getBytes(), null, "bbb".getBytes());// 本行数据的第三列
    put.add("column3".getBytes(), null, "ccc".getBytes());// 本行数据的第三列
    try {
        table.put(put);
    } catch (IOException e) {
        e.printStackTrace();
    }
    System.out.println("end insert data ......");
}
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:16,代码来源:MyClass.java

示例6: RowResultGenerator

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
public RowResultGenerator(final String tableName, final RowSpec rowspec,
    final Filter filter) throws IllegalArgumentException, IOException {
  HTablePool pool = RESTServlet.getInstance().getTablePool(); 
  HTableInterface table = pool.getTable(tableName);
  try {
    Get get = new Get(rowspec.getRow());
    if (rowspec.hasColumns()) {
      for (byte[] col: rowspec.getColumns()) {
        byte[][] split = KeyValue.parseColumn(col);
        if (split.length == 2 && split[1].length != 0) {
          get.addColumn(split[0], split[1]);
        } else {
          get.addFamily(split[0]);
        }
      }
    }
    get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
    get.setMaxVersions(rowspec.getMaxVersions());
    if (filter != null) {
      get.setFilter(filter);
    }
    Result result = table.get(get);
    if (result != null && !result.isEmpty()) {
      valuesI = result.list().iterator();
    }
  } catch (DoNotRetryIOException e) {
    // Warn here because Stargate will return 404 in the case if multiple
    // column families were specified but one did not exist -- currently
    // HBase will fail the whole Get.
    // Specifying multiple columns in a URI should be uncommon usage but
    // help to avoid confusion by leaving a record of what happened here in
    // the log.
    LOG.warn(StringUtils.stringifyException(e));
  } finally {
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:38,代码来源:RowResultGenerator.java

示例7: getTableSchema

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
private HTableDescriptor getTableSchema() throws IOException,
    TableNotFoundException {
  HTablePool pool = servlet.getTablePool();
  HTableInterface table = pool.getTable(tableResource.getName());
  try {
    return table.getTableDescriptor();
  } finally {
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:11,代码来源:SchemaResource.java

示例8: ScannerResultGenerator

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
    final Filter filter) throws IllegalArgumentException, IOException {
  HTablePool pool = RESTServlet.getInstance().getTablePool(); 
  HTableInterface table = pool.getTable(tableName);
  try {
    Scan scan;
    if (rowspec.hasEndRow()) {
      scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
    } else {
      scan = new Scan(rowspec.getStartRow());
    }
    if (rowspec.hasColumns()) {
      byte[][] columns = rowspec.getColumns();
      for (byte[] column: columns) {
        byte[][] split = KeyValue.parseColumn(column);
        if (split.length > 1 && (split[1] != null && split[1].length != 0)) {
          scan.addColumn(split[0], split[1]);
        } else {
          scan.addFamily(split[0]);
        }
      }
    }
    scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());          
    scan.setMaxVersions(rowspec.getMaxVersions());
    if (filter != null) {
      scan.setFilter(filter);
    }
    // always disable block caching on the cluster when scanning
    scan.setCacheBlocks(false);
    scanner = table.getScanner(scan);
    cached = null;
    id = Long.toString(System.currentTimeMillis()) +
           Integer.toHexString(scanner.hashCode());
  } finally {
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:38,代码来源:ScannerResultGenerator.java

示例9: getTableRegions

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
private Map<HRegionInfo,HServerAddress> getTableRegions()
    throws IOException {
  HTablePool pool = servlet.getTablePool();
  HTableInterface table = pool.getTable(tableResource.getName());
  try {
    return ((HTable)table).getRegionsInfo();
  } finally {
    pool.putTable(table);
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:11,代码来源:RegionsResource.java

示例10: getTableSchema

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
private HTableDescriptor getTableSchema() throws IOException,
    TableNotFoundException {
  HTablePool pool = servlet.getTablePool();
  HTableInterface table = pool.getTable(tableResource.getName());
  try {
    return table.getTableDescriptor();
  } finally {
    pool.putTable(table);
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:11,代码来源:SchemaResource.java

示例11: QueryByCondition3

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
public static void QueryByCondition3(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);

            List<Filter> filters = new ArrayList<Filter>();

            Filter filter1 = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa"));
            filters.add(filter1);

            Filter filter2 = new SingleColumnValueFilter(Bytes
                    .toBytes("column2"), null, CompareOp.EQUAL, Bytes
                    .toBytes("bbb"));
            filters.add(filter2);

            Filter filter3 = new SingleColumnValueFilter(Bytes
                    .toBytes("column3"), null, CompareOp.EQUAL, Bytes
                    .toBytes("ccc"));
            filters.add(filter3);

            FilterList filterList1 = new FilterList(filters);

            Scan scan = new Scan();
            scan.setFilter(filterList1);
            ResultScanner rs = table.getScanner(scan);
            for (Result r : rs) {
                System.out.println("获得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
            rs.close();

        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:43,代码来源:MyClass.java

示例12: updateBinary

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
Response updateBinary(final byte[] message, final HttpHeaders headers,
    final boolean replace) {
  servlet.getMetrics().incrementRequests(1);
  if (servlet.isReadOnly()) {
    return Response.status(Response.Status.FORBIDDEN)
      .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
      .build();
  }
  HTablePool pool = servlet.getTablePool();
  HTableInterface table = null;
  try {
    byte[] row = rowspec.getRow();
    byte[][] columns = rowspec.getColumns();
    byte[] column = null;
    if (columns != null) {
      column = columns[0];
    }
    long timestamp = HConstants.LATEST_TIMESTAMP;
    List<String> vals = headers.getRequestHeader("X-Row");
    if (vals != null && !vals.isEmpty()) {
      row = Bytes.toBytes(vals.get(0));
    }
    vals = headers.getRequestHeader("X-Column");
    if (vals != null && !vals.isEmpty()) {
      column = Bytes.toBytes(vals.get(0));
    }
    vals = headers.getRequestHeader("X-Timestamp");
    if (vals != null && !vals.isEmpty()) {
      timestamp = Long.valueOf(vals.get(0));
    }
    if (column == null) {
      return Response.status(Response.Status.BAD_REQUEST)
        .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
        .build();
    }
    Put put = new Put(row);
    byte parts[][] = KeyValue.parseColumn(column);
    if (parts.length == 2 && parts[1].length > 0) {
      put.add(parts[0], parts[1], timestamp, message);
    } else {
      put.add(parts[0], null, timestamp, message);
    }
    table = pool.getTable(tableResource.getName());
    table.put(put);
    if (LOG.isDebugEnabled()) {
      LOG.debug("PUT " + put.toString());
    }
    servlet.getMetrics().incrementSucessfulPutRequests(1);
    return Response.ok().build();
  } catch (IOException e) {
    servlet.getMetrics().incrementFailedPutRequests(1);
    return Response.status(Response.Status.SERVICE_UNAVAILABLE)
      .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
      .build();
  } finally {
    if (table != null) try {
      table.close();
    } catch (IOException ioe) { }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:61,代码来源:RowResource.java

示例13: checkAndPut

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
/**
 * Validates the input request parameters, parses columns from CellSetModel,
 * and invokes checkAndPut on HTable.
 *
 * @param model instance of CellSetModel
 * @return Response 200 OK, 304 Not modified, 400 Bad request
 */
Response checkAndPut(final CellSetModel model) {
  HTablePool pool = servlet.getTablePool();
  HTableInterface table = null;
  try {
    if (model.getRows().size() != 1) {
      return Response.status(Response.Status.BAD_REQUEST)
        .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
        .build();
    }

    RowModel rowModel = model.getRows().get(0);
    byte[] key = rowModel.getKey();
    if (key == null) {
      key = rowspec.getRow();
    }

    List<CellModel> cellModels = rowModel.getCells();
    int cellModelCount = cellModels.size();
    if (key == null || cellModelCount <= 1) {
      return Response.status(Response.Status.BAD_REQUEST)
        .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
        .build();
    }

    Put put = new Put(key);
    CellModel valueToCheckCell = cellModels.get(cellModelCount - 1);
    byte[] valueToCheckColumn = valueToCheckCell.getColumn();
    byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn);
    if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) {
      CellModel valueToPutCell = null;
      for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
        if(Bytes.equals(cellModels.get(i).getColumn(),
            valueToCheckCell.getColumn())) {
          valueToPutCell = cellModels.get(i);
          break;
        }
      }
      if (valueToPutCell != null) {
        put.add(valueToPutParts[0], valueToPutParts[1], valueToPutCell
          .getTimestamp(), valueToPutCell.getValue());
      } else {
        return Response.status(Response.Status.BAD_REQUEST)
          .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
          .build();
      }
    } else {
      return Response.status(Response.Status.BAD_REQUEST)
        .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
        .build();
    }

    table = pool.getTable(this.tableResource.getName());
    boolean retValue = table.checkAndPut(key, valueToPutParts[0],
      valueToPutParts[1], valueToCheckCell.getValue(), put);
    if (LOG.isDebugEnabled()) {
      LOG.debug("CHECK-AND-PUT " + put.toString() + ", returns " + retValue);
    }
    table.flushCommits();
    ResponseBuilder response = Response.ok();
    if (!retValue) {
      response = Response.status(304);
    }
    return response.build();
  } catch (IOException e) {
    return Response.status(Response.Status.SERVICE_UNAVAILABLE)
      .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
      .build();
  } finally {
    if (table != null) try {
      table.close();
    } catch (IOException ioe) { }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:81,代码来源:RowResource.java

示例14: RowResultGenerator

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
public RowResultGenerator(final String tableName, final RowSpec rowspec,
    final Filter filter) throws IllegalArgumentException, IOException {
  HTablePool pool = RESTServlet.getInstance().getTablePool(); 
  HTableInterface table = pool.getTable(tableName);
  try {
    Get get = new Get(rowspec.getRow());
    if (rowspec.hasColumns()) {
      for (byte[] col: rowspec.getColumns()) {
        byte[][] split = KeyValue.parseColumn(col);
        if (split.length == 2 && split[1].length != 0) {
          get.addColumn(split[0], split[1]);
        } else {
          get.addFamily(split[0]);
        }
      }
    } else {
      // rowspec does not explicitly specify columns, return them all
      for (HColumnDescriptor family: 
          table.getTableDescriptor().getFamilies()) {
        get.addFamily(family.getName());
      }
    }
    get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
    get.setMaxVersions(rowspec.getMaxVersions());
    if (filter != null) {
      get.setFilter(filter);
    }
    Result result = table.get(get);
    if (result != null && !result.isEmpty()) {
      valuesI = result.list().iterator();
    }
  } catch (NoSuchColumnFamilyException e) {
    // Warn here because Stargate will return 404 in the case if multiple
    // column families were specified but one did not exist -- currently
    // HBase will fail the whole Get.
    // Specifying multiple columns in a URI should be uncommon usage but
    // help to avoid confusion by leaving a record of what happened here in
    // the log.
    LOG.warn(StringUtils.stringifyException(e));
  } finally {
    pool.putTable(table);
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:44,代码来源:RowResultGenerator.java

示例15: updateBinary

import org.apache.hadoop.hbase.client.HTablePool; //导入方法依赖的package包/类
Response updateBinary(final byte[] message, final HttpHeaders headers,
    final boolean replace) {
  servlet.getMetrics().incrementRequests(1);
  if (servlet.isReadOnly()) {
    throw new WebApplicationException(Response.Status.FORBIDDEN);
  }
  HTablePool pool = servlet.getTablePool();
  HTableInterface table = null;
  try {
    byte[] row = rowspec.getRow();
    byte[][] columns = rowspec.getColumns();
    byte[] column = null;
    if (columns != null) {
      column = columns[0];
    }
    long timestamp = HConstants.LATEST_TIMESTAMP;
    List<String> vals = headers.getRequestHeader("X-Row");
    if (vals != null && !vals.isEmpty()) {
      row = Bytes.toBytes(vals.get(0));
    }
    vals = headers.getRequestHeader("X-Column");
    if (vals != null && !vals.isEmpty()) {
      column = Bytes.toBytes(vals.get(0));
    }
    vals = headers.getRequestHeader("X-Timestamp");
    if (vals != null && !vals.isEmpty()) {
      timestamp = Long.valueOf(vals.get(0));
    }
    if (column == null) {
      throw new WebApplicationException(Response.Status.BAD_REQUEST);
    }
    Put put = new Put(row);
    byte parts[][] = KeyValue.parseColumn(column);
    if (parts.length == 2 && parts[1].length > 0) {
      put.add(parts[0], parts[1], timestamp, message);
    } else {
      put.add(parts[0], null, timestamp, message);
    }
    table = pool.getTable(tableResource.getName());
    table.put(put);
    if (LOG.isDebugEnabled()) {
      LOG.debug("PUT " + put.toString());
    }
    return Response.ok().build();
  } catch (IOException e) {
    throw new WebApplicationException(e,
                Response.Status.SERVICE_UNAVAILABLE);
  } finally {
    if (table != null) {
      try {
        pool.putTable(table);
      } catch (IOException ioe) {
        throw new WebApplicationException(ioe,
            Response.Status.SERVICE_UNAVAILABLE);
      }
    }
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:59,代码来源:RowResource.java


注:本文中的org.apache.hadoop.hbase.client.HTablePool.getTable方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。