當前位置: 首頁>>代碼示例>>Java>>正文


Java HTableInterface類代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.HTableInterface的典型用法代碼示例。如果您正苦於以下問題:Java HTableInterface類的具體用法?Java HTableInterface怎麽用?Java HTableInterface使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


HTableInterface類屬於org.apache.hadoop.hbase.client包,在下文中一共展示了HTableInterface類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: QueryAll

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
public static void QueryAll(String tableName) {
    try {
        HTableInterface table = conn.getTable(tableName);

        ResultScanner rs = table.getScanner(new Scan());
        for (Result r : rs) {
            System.out.println("rowkey:" + new String(r.getRow()));
            for (KeyValue keyValue : r.raw()) {
                System.out.println("column:" + new String(keyValue.getFamily())
                        + "====value:" + new String(keyValue.getValue()));
            }
        }
        table.close();
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:18,代碼來源:MyClass.java

示例2: copyTable

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
/**
 * 拷貝表
 * 
 * @throws IOException
 */
public static void copyTable(String oldTableName, String newTableName,String ColumnFamily, String ColumnName)throws IOException {
	if(CreateNewTable(newTableName))
		logger.info("創建表"+newTableName+"表成功");
	else{
		logger.info("創建表"+newTableName+"表失敗");
	}
	Scan s = new Scan();
	s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
	s.setMaxVersions(1);
	s.setCacheBlocks(false);
	ResultScanner rs = hbase_table.getScanner(s);
	
	HTableInterface hbase_table_new = conn.getTable(newTableName);
	for (Result r : rs) {
		byte[] key = r.getRow();
		byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
		Put put = new Put(key);
		put.add(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName), value);
		hbase_table_new.put(put);
	}
	rs.close();
	hbase_table_new.close();
}
 
開發者ID:ItGql,項目名稱:SparkIsax,代碼行數:29,代碼來源:HBaseUtils.java

示例3: beforeTest

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
@Before
public void beforeTest() throws Exception {
  pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                               TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  HTable table = createTable(pruneStateTable.getName(), new byte[][]{DataJanitorState.FAMILY}, false,
                             // Prune state table is a non-transactional table, hence no transaction co-processor
                             Collections.<String>emptyList());
  table.close();
  connection = HConnectionManager.createConnection(conf);

  dataJanitorState =
    new DataJanitorState(new DataJanitorState.TableSupplier() {
      @Override
      public HTableInterface get() throws IOException {
        return connection.getTable(pruneStateTable);
      }
    });

}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:20,代碼來源:DataJanitorStateTest.java

示例4: updateProfileCountsForSaleInHBase

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
public void updateProfileCountsForSaleInHBase(Long buyerId, Long sellerId, ItemSaleEvent event) throws IOException, InterruptedException {
  HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
  ArrayList<Row> actions = new ArrayList<Row>();
  
  Increment buyerValueIncrement = new Increment(generateProfileRowKey(buyerId));
  buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, event.getItemValue());
  buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue());
  actions.add(buyerValueIncrement);
  
  Increment sellerValueIncrement = new Increment(generateProfileRowKey(sellerId));
  sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, event.getItemValue());
  sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue());
  actions.add(sellerValueIncrement);
  
  profileTable.batch(actions);
  
}
 
開發者ID:amitchmca,項目名稱:hadooparchitecturebook,代碼行數:18,代碼來源:BasicFraudHBaseService.java

示例5: logInProfileInHBase

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
public void logInProfileInHBase(long userId, String ipAddress) throws IOException, Exception {
  HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
  
  ArrayList<Row> actions = new ArrayList<Row>();
  
  byte[] profileRowKey = generateProfileRowKey(userId);

  Delete delete = new Delete(profileRowKey);
  delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL);
  delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL);
  actions.add(delete);
  
  Increment increment = new Increment(profileRowKey);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
  actions.add(increment);
  
  Put put = new Put(profileRowKey);
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
  actions.add(put);
  
  profileTable.batch(actions);
}
 
開發者ID:amitchmca,項目名稱:hadooparchitecturebook,代碼行數:24,代碼來源:BasicFraudHBaseService.java

示例6: createProfile

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
@Override
public void createProfile(long userId, ProfilePojo pojo, String ipAddress) throws Exception {
  HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE);
  
  ArrayList<Row> actions = new ArrayList<Row>();
  
  byte[] rowKey = generateProfileRowKey(userId);
  Put put = new Put(rowKey);
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.FIXED_INFO_COL, Bytes.toBytes(pojo.getUsername() + "|" + pojo.getAge() + "|" + System.currentTimeMillis()));
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress));
  put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis()));
  actions.add(put);
  
  Increment increment = new Increment(rowKey);
  
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_SELLS_COL, 0);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_PURCHASES_COL, 0);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_PURCHASES_COL, 0);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, 0);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, 0);
  increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, 0);
  actions.add(increment);
  
  profileTable.batch(actions);
}
 
開發者ID:amitchmca,項目名稱:hadooparchitecturebook,代碼行數:27,代碼來源:BasicFraudHBaseService.java

示例7: SecondaryIndexTable

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
public SecondaryIndexTable(TransactionServiceClient transactionServiceClient, HTableInterface hTable,
                           byte[] secondaryIndex) {
  secondaryIndexTableName = TableName.valueOf(hTable.getName().getNameAsString() + ".idx");
  HTable secondaryIndexHTable = null;
  try (HBaseAdmin hBaseAdmin = new HBaseAdmin(hTable.getConfiguration())) {
    if (!hBaseAdmin.tableExists(secondaryIndexTableName)) {
      hBaseAdmin.createTable(new HTableDescriptor(secondaryIndexTableName));
    }
    secondaryIndexHTable = new HTable(hTable.getConfiguration(), secondaryIndexTableName);
  } catch (Exception e) {
    Throwables.propagate(e);
  }

  this.secondaryIndex = secondaryIndex;
  this.transactionAwareHTable = new TransactionAwareHTable(hTable);
  this.secondaryIndexTable = new TransactionAwareHTable(secondaryIndexHTable);
  this.transactionContext = new TransactionContext(transactionServiceClient, transactionAwareHTable,
                                                   secondaryIndexTable);
}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:20,代碼來源:SecondaryIndexTable.java

示例8: verifyRows

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
private void verifyRows(HTableInterface table, Get get, List<byte[]> expectedValues) throws Exception {
  Result result = table.get(get);
  if (expectedValues == null) {
    assertTrue(result.isEmpty());
  } else {
    assertFalse(result.isEmpty());
    byte[] family = TestBytes.family;
    byte[] col = TestBytes.qualifier;
    if (get.hasFamilies()) {
      family = get.getFamilyMap().keySet().iterator().next();
      col = get.getFamilyMap().get(family).first();
    }
    Iterator<Cell> it = result.getColumnCells(family, col).iterator();
    for (byte[] expectedValue : expectedValues) {
      Assert.assertTrue(it.hasNext());
      assertArrayEquals(expectedValue, CellUtil.cloneValue(it.next()));
    }
  }
}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:20,代碼來源:TransactionAwareHTableTest.java

示例9: getRegionsOnOrBeforeTime

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
/**
 * Return the set of regions saved for the time at or before the given time. This method finds the greatest time
 * that is less than or equal to the given time, and then returns all regions with that exact time, but none that are
 * older than that.
 *
 * @param time timestamp in milliseconds
 * @return set of regions and time at which they were recorded, or null if no regions found
 * @throws IOException when not able to read the data from HBase
 */
@Nullable
public TimeRegions getRegionsOnOrBeforeTime(long time) throws IOException {
  try (HTableInterface stateTable = stateTableSupplier.get()) {
    TimeRegions timeRegions;
    while ((timeRegions = getNextSetOfTimeRegions(stateTable, time)) != null) {
      int count = getRegionCountForTime(stateTable, timeRegions.getTime());
      if (count != -1 && count == timeRegions.getRegions().size()) {
        return timeRegions;
      } else {
        LOG.warn(String.format("Got incorrect count for regions saved at time %s, expected = %s but actual = %s",
                               timeRegions.getTime(), count, timeRegions.getRegions().size()));
        time = timeRegions.getTime() - 1;
      }
    }
    return null;
  }
}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:27,代碼來源:DataJanitorState.java

示例10: initialize

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
@Override
public void initialize(Configuration conf) throws IOException {
  this.conf = conf;
  this.hBaseAdmin = new HBaseAdmin(conf);
  this.connection = HConnectionManager.createConnection(conf);

  final TableName stateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE,
                                                          TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE));
  LOG.info("Initializing plugin with state table {}:{}", stateTable.getNamespaceAsString(),
           stateTable.getNameAsString());
  createPruneTable(stateTable);
  this.dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() {
    @Override
    public HTableInterface get() throws IOException {
      return connection.getTable(stateTable);
    }
  });
}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:19,代碼來源:HBaseTransactionPruningPlugin.java

示例11: getPruneInfoForRegions

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
/**
 * Gets a list of {@link RegionPruneInfo} for given regions. Returns all regions if the given regions set is null.
 *
 * @param regions a set of regions
 * @return list of {@link RegionPruneInfo}s.
 * @throws IOException when not able to read the data from HBase
 */
public List<RegionPruneInfo> getPruneInfoForRegions(@Nullable SortedSet<byte[]> regions) throws IOException {
  List<RegionPruneInfo> regionPruneInfos = new ArrayList<>();
  try (HTableInterface stateTable = stateTableSupplier.get()) {
    byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY);
    Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP);
    scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL);

    try (ResultScanner scanner = stateTable.getScanner(scan)) {
      Result next;
      while ((next = scanner.next()) != null) {
        byte[] region = getRegionFromKey(next.getRow());
        if (regions == null || regions.contains(region)) {
          Cell cell = next.getColumnLatestCell(FAMILY, PRUNE_UPPER_BOUND_COL);
          if (cell != null) {
            byte[] pruneUpperBoundBytes = CellUtil.cloneValue(cell);
            long timestamp = cell.getTimestamp();
            regionPruneInfos.add(new RegionPruneInfo(region, Bytes.toStringBinary(region),
                                                     Bytes.toLong(pruneUpperBoundBytes), timestamp));
          }
        }
      }
    }
  }
  return Collections.unmodifiableList(regionPruneInfos);
}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:33,代碼來源:DataJanitorState.java

示例12: deletePruneUpperBounds

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
/**
 * Delete prune upper bounds for the regions that are not in the given exclude set, and the
 * prune upper bound is less than the given value.
 * After the invalid list is pruned up to deletionPruneUpperBound, we do not need entries for regions that have
 * prune upper bound less than deletionPruneUpperBound. We however limit the deletion to only regions that are
 * no longer in existence (due to deletion, etc.), to avoid update/delete race conditions.
 *
 * @param deletionPruneUpperBound prune upper bound below which regions will be deleted
 * @param excludeRegions set of regions that should not be deleted
 * @throws IOException when not able to delete data in HBase
 */
public void deletePruneUpperBounds(long deletionPruneUpperBound, SortedSet<byte[]> excludeRegions)
  throws IOException {
  try (HTableInterface stateTable = stateTableSupplier.get()) {
    byte[] startRow = makeRegionKey(EMPTY_BYTE_ARRAY);
    Scan scan = new Scan(startRow, REGION_KEY_PREFIX_STOP);
    scan.addColumn(FAMILY, PRUNE_UPPER_BOUND_COL);

    try (ResultScanner scanner = stateTable.getScanner(scan)) {
      Result next;
      while ((next = scanner.next()) != null) {
        byte[] region = getRegionFromKey(next.getRow());
        if (!excludeRegions.contains(region)) {
          byte[] timeBytes = next.getValue(FAMILY, PRUNE_UPPER_BOUND_COL);
          if (timeBytes != null) {
            long pruneUpperBoundRegion = Bytes.toLong(timeBytes);
            if (pruneUpperBoundRegion < deletionPruneUpperBound) {
              stateTable.delete(new Delete(next.getRow()));
            }
          }
        }
      }
    }
  }
}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:36,代碼來源:DataJanitorState.java

示例13: getEmptyRegionsAfterTime

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
/**
 * Return regions that were recorded as empty after the given time.
 *
 * @param time time in milliseconds
 * @param includeRegions If not null, the returned set will be an intersection of the includeRegions set
 *                       and the empty regions after the given time
 */
public SortedSet<byte[]> getEmptyRegionsAfterTime(long time, @Nullable SortedSet<byte[]> includeRegions)
  throws IOException {
  SortedSet<byte[]> emptyRegions = new TreeSet<>(Bytes.BYTES_COMPARATOR);
  try (HTableInterface stateTable = stateTableSupplier.get()) {
    Scan scan = new Scan(makeEmptyRegionTimeKey(Bytes.toBytes(time + 1), EMPTY_BYTE_ARRAY),
                         EMPTY_REGION_TIME_KEY_PREFIX_STOP);
    scan.addColumn(FAMILY, EMPTY_REGION_TIME_COL);

    try (ResultScanner scanner = stateTable.getScanner(scan)) {
      Result next;
      while ((next = scanner.next()) != null) {
        byte[] emptyRegion = getEmptyRegionFromKey(next.getRow());
        if (includeRegions == null || includeRegions.contains(emptyRegion)) {
          emptyRegions.add(emptyRegion);
        }
      }
    }
  }
  return Collections.unmodifiableSortedSet(emptyRegions);
}
 
開發者ID:apache,項目名稱:incubator-tephra,代碼行數:28,代碼來源:DataJanitorState.java

示例14: HBasePStore

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
public HBasePStore(PStoreConfig<V> config, HTableInterface table) throws IOException {
  this.tableName = config.getName() + '\0';
  this.tableNameStartKey = Bytes.toBytes(tableName); // "tableName\x00"
  this.tableNameStopKey = this.tableNameStartKey.clone();
  this.tableNameStopKey[tableNameStartKey.length-1] = 1;
  this.config = config;
  this.table = table;
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:9,代碼來源:HBasePStore.java

示例15: shutdown

import org.apache.hadoop.hbase.client.HTableInterface; //導入依賴的package包/類
/** Clean up the environment */
protected void shutdown() {
  if (state == Coprocessor.State.ACTIVE) {
    state = Coprocessor.State.STOPPING;
    Thread currentThread = Thread.currentThread();
    ClassLoader hostClassLoader = currentThread.getContextClassLoader();
    try {
      currentThread.setContextClassLoader(this.getClassLoader());
      impl.stop(this);
      state = Coprocessor.State.STOPPED;
    } catch (IOException ioe) {
      LOG.error("Error stopping coprocessor "+impl.getClass().getName(), ioe);
    } finally {
      currentThread.setContextClassLoader(hostClassLoader);
    }
  } else {
    LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+
        " because not active (state="+state.toString()+")");
  }
  synchronized (openTables) {
    // clean up any table references
    for (HTableInterface table: openTables) {
      try {
        ((HTableWrapper)table).internalClose();
      } catch (IOException e) {
        // nothing can be done here
        LOG.warn("Failed to close " +
            Bytes.toStringBinary(table.getTableName()), e);
      }
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:CoprocessorHost.java


注:本文中的org.apache.hadoop.hbase.client.HTableInterface類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。