当前位置: 首页>>代码示例>>Java>>正文


Java HTableInterface类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.HTableInterface的典型用法代码示例。如果您正苦于以下问题:Java HTableInterface类的具体用法?Java HTableInterface怎么用?Java HTableInterface使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HTableInterface类属于org.apache.hadoop.hbase.client包,在下文中一共展示了HTableInterface类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: QueryAll

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
public static void QueryAll(String tableName) {
    try {
        HTableInterface table = conn.getTable(tableName);

        ResultScanner rs = table.getScanner(new Scan());
        for (Result r : rs) {
            System.out.println("rowkey:" + new String(r.getRow()));
            for (KeyValue keyValue : r.raw()) {
                System.out.println("column:" + new String(keyValue.getFamily())
                        + "====value:" + new String(keyValue.getValue()));
            }
        }
        table.close();
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:18,代码来源:MyClass.java

示例2: copyTable

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
/**
 * 拷贝表
 * 
 * @throws IOException
 */
public static void copyTable(String oldTableName, String newTableName,String ColumnFamily, String ColumnName)throws IOException {
	if(CreateNewTable(newTableName))
		logger.info("创建表"+newTableName+"表成功");
	else{
		logger.info("创建表"+newTableName+"表失败");
	}
	Scan s = new Scan();
	s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
	s.setMaxVersions(1);
	s.setCacheBlocks(false);
	ResultScanner rs = hbase_table.getScanner(s);
	
	HTableInterface hbase_table_new = conn.getTable(newTableName);
	for (Result r : rs) {
		byte[] key = r.getRow();
		byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
		Put put = new Put(key);
		put.add(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName), value);
		hbase_table_new.put(put);
	}
	rs.close();
	hbase_table_new.close();
}
 
开发者ID:ItGql,项目名称:SparkIsax,代码行数:29,代码来源:HBaseUtils.java

示例3: beforeTest

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
@Before
public void beforeTest() throws Exception {
  pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                               TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  HTable table = createTable(pruneStateTable.getName(), new byte[][]{DataJanitorState.FAMILY}, false,
                             // Prune state table is a non-transactional table, hence no transaction co-processor
                             Collections.<String>emptyList());
  table.close();
  connection = HConnectionManager.createConnection(conf);

  dataJanitorState =
    new DataJanitorState(new DataJanitorState.TableSupplier() {
      @Override
      public HTableInterface get() throws IOException {
        return connection.getTable(pruneStateTable);
      }
    });

}
 
开发者ID:apache,项目名称:incubator-tephra,代码行数:20,代码来源:DataJanitorStateTest.java

示例4: updateProfileCountsForSaleInHBase

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
public void updateProfileCountsForSaleInHBase(Long buyerId, Long sellerId, ItemSaleEvent event) throws IOException, InterruptedException {
  HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
  ArrayList<Row> actions = new ArrayList<Row>();
  
  Increment buyerValueIncrement = new Increment(generateProfileRowKey(buyerId));
  buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, event.getItemValue());
  buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue());
  actions.add(buyerValueIncrement);
  
  Increment sellerValueIncrement = new Increment(generateProfileRowKey(sellerId));
  sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, event.getItemValue());
  sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue());
  actions.add(sellerValueIncrement);
  
  profileTable.batch(actions);
  
}
 
开发者ID:amitchmca,项目名称:hadooparchitecturebook,代码行数:18,代码来源:BasicFraudHBaseService.java

示例5: logInProfileInHBase

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
public void logInProfileInHBase(long userId, String ipAddress) throws IOException, Exception {
  HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
  
  ArrayList<Row> actions = new ArrayList<Row>();
  
  byte[] profileRowKey = generateProfileRowKey(userId);

  Delete delete = new Delete(profileRowKey);
  delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL);
  delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL);
  actions.add(delete);
  
  Increment increment = new Increment(profileRowKey);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
  actions.add(increment);
  
  Put put = new Put(profileRowKey);
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
  actions.add(put);
  
  profileTable.batch(actions);
}
 
开发者ID:amitchmca,项目名称:hadooparchitecturebook,代码行数:24,代码来源:BasicFraudHBaseService.java

示例6: createProfile

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
@Override
public void createProfile(long userId, ProfilePojo pojo, String ipAddress) throws Exception {
  HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
  
  ArrayList<Row> actions = new ArrayList<Row>();
  
  byte[] rowKey = generateProfileRowKey(userId);
  Put put = new Put(rowKey);
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.FIXED_INFO_COL, Bytes.toBytes(pojo.getUsername() + "|" + pojo.getAge() + "|" + System.currentTimeMillis()));
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
  actions.add(put);
  
  Increment increment = new Increment(rowKey);
  
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_SELLS_COL, 0);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_PURCHASES_COL, 0);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_PURCHASES_COL, 0);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, 0);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, 0);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, 0);
  actions.add(increment);
  
  profileTable.batch(actions);
}
 
开发者ID:amitchmca,项目名称:hadooparchitecturebook,代码行数:27,代码来源:BasicFraudHBaseService.java

示例7: SecondaryIndexTable

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
public SecondaryIndexTable(TransactionServiceClient transactionServiceClient, HTableInterface hTable,
                           byte[] secondaryIndex) {
  secondaryIndexTableName = TableName.valueOf(hTable.getName().getNameAsString() + ".idx");
  HTable secondaryIndexHTable = null;
  try (HBaseAdmin hBaseAdmin = new HBaseAdmin(hTable.getConfiguration())) {
    if (!hBaseAdmin.tableExists(secondaryIndexTableName)) {
      hBaseAdmin.createTable(new HTableDescriptor(secondaryIndexTableName));
    }
    secondaryIndexHTable = new HTable(hTable.getConfiguration(), secondaryIndexTableName);
  } catch (Exception e) {
    Throwables.propagate(e);
  }

  this.secondaryIndex = secondaryIndex;
  this.transactionAwareHTable = new TransactionAwareHTable(hTable);
  this.secondaryIndexTable = new TransactionAwareHTable(secondaryIndexHTable);
  this.transactionContext = new TransactionContext(transactionServiceClient, transactionAwareHTable,
                                                   secondaryIndexTable);
}
 
开发者ID:apache,项目名称:incubator-tephra,代码行数:20,代码来源:SecondaryIndexTable.java

示例8: verifyRows

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
private void verifyRows(HTableInterface table, Get get, List<byte[]> expectedValues) throws Exception {
  Result result = table.get(get);
  if (expectedValues == null) {
    assertTrue(result.isEmpty());
  } else {
    assertFalse(result.isEmpty());
    byte[] family = TestBytes.family;
    byte[] col = TestBytes.qualifier;
    if (get.hasFamilies()) {
      family = get.getFamilyMap().keySet().iterator().next();
      col = get.getFamilyMap().get(family).first();
    }
    Iterator<Cell> it = result.getColumnCells(family, col).iterator();
    for (byte[] expectedValue : expectedValues) {
      Assert.assertTrue(it.hasNext());
      assertArrayEquals(expectedValue, CellUtil.cloneValue(it.next()));
    }
  }
}
 
开发者ID:apache,项目名称:incubator-tephra,代码行数:20,代码来源:TransactionAwareHTableTest.java

示例9: getRegionsOnOrBeforeTime

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
/**
 * Return the set of regions saved for the time at or before the given time. This method finds the greatest time
 * that is less than or equal to the given time, and then returns all regions with that exact time, but none that are
 * older than that.
 *
 * @param time timestamp in milliseconds
 * @return set of regions and time at which they were recorded, or null if no regions found
 * @throws IOException when not able to read the data from HBase
 */
@Nullable
public TimeRegions getRegionsOnOrBeforeTime(long time) throws IOException {
  try (HTableInterface stateTable = stateTableSupplier.get()) {
    TimeRegions timeRegions;
    while ((timeRegions = getNextSetOfTimeRegions(stateTable, time)) != null) {
      int count = getRegionCountForTime(stateTable, timeRegions.getTime());
      if (count != -1 && count == timeRegions.getRegions().size()) {
        return timeRegions;
      } else {
        LOG.warn(String.format("Got incorrect count for regions saved at time %s, expected = %s but actual = %s",
                               timeRegions.getTime(), count, timeRegions.getRegions().size()));
        time = timeRegions.getTime() - 1;
      }
    }
    return null;
  }
}
 
开发者ID:apache,项目名称:incubator-tephra,代码行数:27,代码来源:DataJanitorState.java

示例10: initialize

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
@Override
public void initialize(Configuration conf) throws IOException {
  this.conf = conf;
  this.hBaseAdmin = new HBaseAdmin(conf);
  this.connection = HConnectionManager.createConnection(conf);

  final TableName stateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                                          TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  LOG.info("Initializing plugin with state table {}:{}", stateTable.getNamespaceAsString(),
           stateTable.getNameAsString());
  createPruneTable(stateTable);
  this.dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
    @Override
    public HTableInterface get() throws IOException {
      return connection.getTable(stateTable);
    }
  });
}
 
开发者ID:apache,项目名称:incubator-tephra,代码行数:19,代码来源:HBaseTransactionPruningPlugin.java

示例11: getPruneInfoForRegions

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
/**
 * Gets a list of {@link RegionPruneInfo} for given regions. Returns all regions if the given regions set is null.
 *
 * @param regions a set of regions
 * @return list of {@link RegionPruneInfo}s.
 * @throws IOException when not able to read the data from HBase
 */
public List<RegionPruneInfo> getPruneInfoForRegions(@Nullable SortedSet<byte[]> regions) throws IOException {
  List<RegionPruneInfo> regionPruneInfos = new ArrayList<>();
  try (HTableInterface stateTable = stateTableSupplier.get()) {
    byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY);
    Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP);
    scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL);

    try (ResultScanner scanner = stateTable.getScanner(scan)) {
      Result next;
      while ((next = scanner.next()) != null) {
        byte[] region = getRegionFromKey(next.getRow());
        if (regions == null || regions.contains(region)) {
          Cell cell = next.getColumnLatestCell(FAMILY, PRUNE_UPPER_BOUND_COL);
          if (cell != null) {
            byte[] pruneUpperBoundBytes = CellUtil.cloneValue(cell);
            long timestamp = cell.getTimestamp();
            regionPruneInfos.add(new RegionPruneInfo(region, Bytes.toStringBinary(region),
                                                     Bytes.toLong(pruneUpperBoundBytes), timestamp));
          }
        }
      }
    }
  }
  return Collections.unmodifiableList(regionPruneInfos);
}
 
开发者ID:apache,项目名称:incubator-tephra,代码行数:33,代码来源:DataJanitorState.java

示例12: deletePruneUpperBounds

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
/**
 * Delete prune upper bounds for the regions that are not in the given exclude set, and the
 * prune upper bound is less than the given value.
 * After the invalid list is pruned up to deletionPruneUpperBound, we do not need entries for regions that have
 * prune upper bound less than deletionPruneUpperBound. We however limit the deletion to only regions that are
 * no longer in existence (due to deletion, etc.), to avoid update/delete race conditions.
 *
 * @param deletionPruneUpperBound prune upper bound below which regions will be deleted
 * @param excludeRegions set of regions that should not be deleted
 * @throws IOException when not able to delete data in HBase
 */
public void deletePruneUpperBounds(long deletionPruneUpperBound, SortedSet<byte[]> excludeRegions)
  throws IOException {
  try (HTableInterface stateTable = stateTableSupplier.get()) {
    byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY);
    Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP);
    scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL);

    try (ResultScanner scanner = stateTable.getScanner(scan)) {
      Result next;
      while ((next = scanner.next()) != null) {
        byte[] region = getRegionFromKey(next.getRow());
        if (!excludeRegions.contains(region)) {
          byte[] timeBytes = next.getValue(FAMILY, PRUNE_UPPER_BOUND_COL);
          if (timeBytes != null) {
            long pruneUpperBoundRegion = Bytes.toLong(timeBytes);
            if (pruneUpperBoundRegion < deletionPruneUpperBound) {
              stateTable.delete(new Delete(next.getRow()));
            }
          }
        }
      }
    }
  }
}
 
开发者ID:apache,项目名称:incubator-tephra,代码行数:36,代码来源:DataJanitorState.java

示例13: getEmptyRegionsAfterTime

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
/**
 * Return regions that were recorded as empty after the given time.
 *
 * @param time time in milliseconds
 * @param includeRegions If not null, the returned set will be an intersection of the includeRegions set
 *                       and the empty regions after the given time
 */
public SortedSet<byte[]> getEmptyRegionsAfterTime(long time, @Nullable SortedSet<byte[]> includeRegions)
  throws IOException {
  SortedSet<byte[]> emptyRegions = new TreeSet<>(Bytes.BYTES_COMPARATOR);
  try (HTableInterface stateTable = stateTableSupplier.get()) {
    Scan scan = new Scan(makeEmptyRegionTimeKey(Bytes.toBytes(time + 1), EMPTY_BYTE_ARRAY),
                         EMPTY_REGION_TIME_KEY_PREFIX_STOP);
    scan.addColumn(FAMILY, EMPTY_REGION_TIME_COL);

    try (ResultScanner scanner = stateTable.getScanner(scan)) {
      Result next;
      while ((next = scanner.next()) != null) {
        byte[] emptyRegion = getEmptyRegionFromKey(next.getRow());
        if (includeRegions == null || includeRegions.contains(emptyRegion)) {
          emptyRegions.add(emptyRegion);
        }
      }
    }
  }
  return Collections.unmodifiableSortedSet(emptyRegions);
}
 
开发者ID:apache,项目名称:incubator-tephra,代码行数:28,代码来源:DataJanitorState.java

示例14: HBasePStore

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
public HBasePStore(PStoreConfig<V> config, HTableInterface table) throws IOException {
  this.tableName = config.getName() + '\0';
  this.tableNameStartKey = Bytes.toBytes(tableName); // "tableName\x00"
  this.tableNameStopKey = this.tableNameStartKey.clone();
  this.tableNameStopKey[tableNameStartKey.length-1] = 1;
  this.config = config;
  this.table = table;
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:9,代码来源:HBasePStore.java

示例15: shutdown

import org.apache.hadoop.hbase.client.HTableInterface; //导入依赖的package包/类
/** Clean up the environment */
protected void shutdown() {
  if (state == Coprocessor.State.ACTIVE) {
    state = Coprocessor.State.STOPPING;
    Thread currentThread = Thread.currentThread();
    ClassLoader hostClassLoader = currentThread.getContextClassLoader();
    try {
      currentThread.setContextClassLoader(this.getClassLoader());
      impl.stop(this);
      state = Coprocessor.State.STOPPED;
    } catch (IOException ioe) {
      LOG.error("Error stopping coprocessor "+impl.getClass().getName(), ioe);
    } finally {
      currentThread.setContextClassLoader(hostClassLoader);
    }
  } else {
    LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+
        " because not active (state="+state.toString()+")");
  }
  synchronized (openTables) {
    // clean up any table references
    for (HTableInterface table: openTables) {
      try {
        ((HTableWrapper)table).internalClose();
      } catch (IOException e) {
        // nothing can be done here
        LOG.warn("Failed to close " +
            Bytes.toStringBinary(table.getTableName()), e);
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:CoprocessorHost.java


注:本文中的org.apache.hadoop.hbase.client.HTableInterface类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。